2 * Copyright (c) 2012 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/protosw.h>
41 #include <sys/domain.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
45 #include <netinet/in.h>
46 #include <netinet/in_pcb.h>
47 #include <netinet/ip.h>
48 #include <netinet/tcp_var.h>
50 #include <netinet/tcp_fsm.h>
51 #include <netinet/toecore.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_param.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_object.h>
62 #include "common/common.h"
63 #include "common/t4_msg.h"
64 #include "common/t4_regs.h"
65 #include "common/t4_tcb.h"
66 #include "tom/t4_tom.h"
68 VNET_DECLARE(int, tcp_do_autorcvbuf);
69 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
70 VNET_DECLARE(int, tcp_autorcvbuf_inc);
71 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
72 VNET_DECLARE(int, tcp_autorcvbuf_max);
73 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
75 static struct mbuf *get_ddp_mbuf(int len);
77 #define PPOD_SZ(n) ((n) * sizeof(struct pagepod))
78 #define PPOD_SIZE (PPOD_SZ(1))
80 /* XXX: must match A_ULP_RX_TDDP_PSZ */
81 static int t4_ddp_pgsz[] = {4096, 4096 << 2, 4096 << 4, 4096 << 6};
85 t4_dump_tcb(struct adapter *sc, int tid)
87 uint32_t tcb_base, off, i, j;
89 /* Dump TCB for the tid */
90 tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
91 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2),
92 tcb_base + tid * TCB_SIZE);
93 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
96 for (i = 0; i < 4; i++) {
98 for (j = 0; j < 8; j++, off += 4)
99 buf[j] = htonl(t4_read_reg(sc, MEMWIN2_BASE + off));
101 printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
102 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
108 #define MAX_DDP_BUFFER_SIZE (M_TCB_RX_DDP_BUF0_LEN)
110 alloc_ppods(struct tom_data *td, int n, u_int *ppod_addr)
117 rc = vmem_alloc(td->ppod_arena, PPOD_SZ(n), M_NOWAIT | M_FIRSTFIT, &v);
118 *ppod_addr = (u_int)v;
124 free_ppods(struct tom_data *td, u_int ppod_addr, int n)
129 vmem_free(td->ppod_arena, (vmem_addr_t)ppod_addr, PPOD_SZ(n));
133 pages_to_nppods(int npages, int ddp_pgsz)
135 int nsegs = npages * PAGE_SIZE / ddp_pgsz;
137 return (howmany(nsegs, PPOD_PAGES));
141 free_ddp_buffer(struct tom_data *td, struct ddp_buffer *db)
148 free(db->pages, M_CXGBE);
151 free_ppods(td, db->ppod_addr, db->nppods);
157 release_ddp_resources(struct toepcb *toep)
161 for (i = 0; i < nitems(toep->db); i++) {
162 if (toep->db[i] != NULL) {
163 free_ddp_buffer(toep->td, toep->db[i]);
169 /* XXX: handle_ddp_data code duplication */
171 insert_ddp_data(struct toepcb *toep, uint32_t n)
173 struct inpcb *inp = toep->inp;
174 struct tcpcb *tp = intotcpcb(inp);
175 struct sockbuf *sb = &inp->inp_socket->so_rcv;
178 INP_WLOCK_ASSERT(inp);
179 SOCKBUF_LOCK_ASSERT(sb);
183 #ifndef USE_DDP_RX_FLOW_CONTROL
184 KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__));
188 KASSERT(toep->sb_cc >= sbused(sb),
189 ("%s: sb %p has more data (%d) than last time (%d).",
190 __func__, sb, sbused(sb), toep->sb_cc));
191 toep->rx_credits += toep->sb_cc - sbused(sb);
192 #ifdef USE_DDP_RX_FLOW_CONTROL
193 toep->rx_credits -= n; /* adjust for F_RX_FC_DDP */
195 sbappendstream_locked(sb, m, 0);
196 toep->sb_cc = sbused(sb);
199 /* SET_TCB_FIELD sent as a ULP command looks like this */
200 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
201 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
203 /* RX_DATA_ACK sent as a ULP command looks like this */
204 #define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \
205 sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core))
208 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep,
209 uint64_t word, uint64_t mask, uint64_t val)
211 struct ulptx_idata *ulpsc;
212 struct cpl_set_tcb_field_core *req;
214 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
215 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
217 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
218 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
219 ulpsc->len = htobe32(sizeof(*req));
221 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
222 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid));
223 req->reply_ctrl = htobe16(V_NO_REPLY(1) |
224 V_QUEUENO(toep->ofld_rxq->iq.abs_id));
225 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
226 req->mask = htobe64(mask);
227 req->val = htobe64(val);
229 ulpsc = (struct ulptx_idata *)(req + 1);
230 if (LEN__SET_TCB_FIELD_ULP % 16) {
231 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
232 ulpsc->len = htobe32(0);
239 mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep)
241 struct ulptx_idata *ulpsc;
242 struct cpl_rx_data_ack_core *req;
244 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
245 ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16));
247 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
248 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
249 ulpsc->len = htobe32(sizeof(*req));
251 req = (struct cpl_rx_data_ack_core *)(ulpsc + 1);
252 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid));
253 req->credit_dack = htobe32(F_RX_MODULATE_RX);
255 ulpsc = (struct ulptx_idata *)(req + 1);
256 if (LEN__RX_DATA_ACK_ULP % 16) {
257 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
258 ulpsc->len = htobe32(0);
264 static inline uint64_t
265 select_ddp_flags(struct socket *so, int flags, int db_idx)
267 uint64_t ddp_flags = V_TF_DDP_INDICATE_OUT(0);
268 int waitall = flags & MSG_WAITALL;
269 int nb = so->so_state & SS_NBIO || flags & (MSG_DONTWAIT | MSG_NBIO);
271 KASSERT(db_idx == 0 || db_idx == 1,
272 ("%s: bad DDP buffer index %d", __func__, db_idx));
275 ddp_flags |= V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_ACTIVE_BUF(0);
277 ddp_flags |= V_TF_DDP_PUSH_DISABLE_0(1);
279 ddp_flags |= V_TF_DDP_BUF0_FLUSH(1);
281 ddp_flags |= V_TF_DDP_BUF0_FLUSH(0);
283 ddp_flags |= V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1);
285 ddp_flags |= V_TF_DDP_PUSH_DISABLE_1(1);
287 ddp_flags |= V_TF_DDP_BUF1_FLUSH(1);
289 ddp_flags |= V_TF_DDP_BUF1_FLUSH(0);
296 mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx,
297 int offset, uint64_t ddp_flags)
299 struct ddp_buffer *db = toep->db[db_idx];
301 struct work_request_hdr *wrh;
302 struct ulp_txpkt *ulpmc;
305 KASSERT(db_idx == 0 || db_idx == 1,
306 ("%s: bad DDP buffer index %d", __func__, db_idx));
309 * We'll send a compound work request that has 3 SET_TCB_FIELDs and an
310 * RX_DATA_ACK (with RX_MODULATE to speed up delivery).
312 * The work request header is 16B and always ends at a 16B boundary.
313 * The ULPTX master commands that follow must all end at 16B boundaries
314 * too so we round up the size to 16.
316 len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
317 roundup2(LEN__RX_DATA_ACK_ULP, 16);
319 wr = alloc_wrqe(len, toep->ctrlq);
323 INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */
324 ulpmc = (struct ulp_txpkt *)(wrh + 1);
326 /* Write the buffer's tag */
327 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
328 W_TCB_RX_DDP_BUF0_TAG + db_idx,
329 V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG),
330 V_TCB_RX_DDP_BUF0_TAG(db->tag));
332 /* Update the current offset in the DDP buffer and its total length */
334 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
335 W_TCB_RX_DDP_BUF0_OFFSET,
336 V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) |
337 V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN),
338 V_TCB_RX_DDP_BUF0_OFFSET(offset) |
339 V_TCB_RX_DDP_BUF0_LEN(db->len));
341 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
342 W_TCB_RX_DDP_BUF1_OFFSET,
343 V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) |
344 V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32),
345 V_TCB_RX_DDP_BUF1_OFFSET(offset) |
346 V_TCB_RX_DDP_BUF1_LEN((u64)db->len << 32));
348 /* Update DDP flags */
349 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS,
350 V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF1_FLUSH(1) |
351 V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PUSH_DISABLE_1(1) |
352 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1) |
353 V_TF_DDP_ACTIVE_BUF(1) | V_TF_DDP_INDICATE_OUT(1), ddp_flags);
355 /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */
356 ulpmc = mk_rx_data_ack_ulp(ulpmc, toep);
362 discourage_ddp(struct toepcb *toep)
365 if (toep->ddp_score && --toep->ddp_score == 0) {
366 toep->ddp_flags &= ~DDP_OK;
367 toep->ddp_disabled = time_uptime;
368 CTR3(KTR_CXGBE, "%s: tid %u !DDP_OK @ %u",
369 __func__, toep->tid, time_uptime);
374 handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
376 uint32_t report = be32toh(ddp_report);
377 unsigned int db_flag;
378 struct inpcb *inp = toep->inp;
384 db_flag = report & F_DDP_BUF_IDX ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
386 if (__predict_false(!(report & F_DDP_INV)))
387 CXGBE_UNIMPLEMENTED("DDP buffer still valid");
390 so = inp_inpcbtosocket(inp);
392 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) {
395 * XXX: think a bit more.
396 * tcpcb probably gone, but socket should still be around
397 * because we always wait for DDP completion in soreceive no
398 * matter what. Just wake it up and let it clean up.
401 CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x",
402 __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags);
410 * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the
411 * sequence number of the next byte to receive. The length of
412 * the data received for this message must be computed by
413 * comparing the new and old values of rcv_nxt.
415 * For RX_DATA_DDP, len might be non-zero, but it is only the
416 * length of the most recent DMA. It does not include the
417 * total length of the data received since the previous update
418 * for this DDP buffer. rcv_nxt is the sequence number of the
419 * first received byte from the most recent DMA.
421 len += be32toh(rcv_nxt) - tp->rcv_nxt;
423 tp->t_rcvtime = ticks;
424 #ifndef USE_DDP_RX_FLOW_CONTROL
425 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
428 m = get_ddp_mbuf(len);
431 if (report & F_DDP_BUF_COMPLETE)
432 toep->ddp_score = DDP_HIGH_SCORE;
434 discourage_ddp(toep);
436 /* receive buffer autosize */
437 if (sb->sb_flags & SB_AUTOSIZE &&
438 V_tcp_do_autorcvbuf &&
439 sb->sb_hiwat < V_tcp_autorcvbuf_max &&
440 len > (sbspace(sb) / 8 * 7)) {
441 unsigned int hiwat = sb->sb_hiwat;
442 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc,
443 V_tcp_autorcvbuf_max);
445 if (!sbreserve_locked(sb, newsize, so, NULL))
446 sb->sb_flags &= ~SB_AUTOSIZE;
448 toep->rx_credits += newsize - hiwat;
451 KASSERT(toep->sb_cc >= sbused(sb),
452 ("%s: sb %p has more data (%d) than last time (%d).",
453 __func__, sb, sbused(sb), toep->sb_cc));
454 toep->rx_credits += toep->sb_cc - sbused(sb);
455 #ifdef USE_DDP_RX_FLOW_CONTROL
456 toep->rx_credits -= len; /* adjust for F_RX_FC_DDP */
458 sbappendstream_locked(sb, m, 0);
459 toep->sb_cc = sbused(sb);
461 KASSERT(toep->ddp_flags & db_flag,
462 ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x, report 0x%x",
463 __func__, toep, toep->ddp_flags, report));
464 toep->ddp_flags &= ~db_flag;
465 sorwakeup_locked(so);
466 SOCKBUF_UNLOCK_ASSERT(sb);
473 handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, struct sockbuf *sb,
479 SOCKBUF_LOCK_ASSERT(sb);
480 INP_WLOCK_ASSERT(toep->inp);
481 len = be32toh(rcv_nxt) - tp->rcv_nxt;
483 /* Signal handle_ddp() to break out of its sleep loop. */
484 toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE);
489 KASSERT(toep->sb_cc >= sbused(sb),
490 ("%s: sb %p has more data (%d) than last time (%d).",
491 __func__, sb, sbused(sb), toep->sb_cc));
492 toep->rx_credits += toep->sb_cc - sbused(sb);
493 #ifdef USE_DDP_RX_FLOW_CONTROL
494 toep->rx_credits -= len; /* adjust for F_RX_FC_DDP */
497 m = get_ddp_mbuf(len);
499 sbappendstream_locked(sb, m, 0);
500 toep->sb_cc = sbused(sb);
503 #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\
504 F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\
505 F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\
506 F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR)
509 do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
511 struct adapter *sc = iq->adapter;
512 const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1);
513 unsigned int tid = GET_TID(cpl);
515 struct toepcb *toep = lookup_tid(sc, tid);
516 struct tom_data *td = toep->td;
518 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
519 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
520 KASSERT(!(toep->flags & TPF_SYNQE),
521 ("%s: toep %p claims to be a synq entry", __func__, toep));
523 vld = be32toh(cpl->ddpvld);
524 if (__predict_false(vld & DDP_ERR)) {
525 panic("%s: DDP error 0x%x (tid %d, toep %p)",
526 __func__, vld, tid, toep);
528 if (toep->ulp_mode == ULP_MODE_ISCSI) {
529 m = m_get(M_NOWAIT, MT_DATA);
531 CXGBE_UNIMPLEMENTED("mbuf alloc failure");
532 memcpy(mtod(m, unsigned char *), cpl,
533 sizeof(struct cpl_rx_data_ddp));
534 if (!t4_cpl_iscsi_callback(td, toep, m, CPL_RX_DATA_DDP))
539 handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len));
545 do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss,
548 struct adapter *sc = iq->adapter;
549 const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1);
550 unsigned int tid = GET_TID(cpl);
551 struct toepcb *toep = lookup_tid(sc, tid);
553 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
554 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
555 KASSERT(!(toep->flags & TPF_SYNQE),
556 ("%s: toep %p claims to be a synq entry", __func__, toep));
558 handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0);
564 enable_ddp(struct adapter *sc, struct toepcb *toep)
567 KASSERT((toep->ddp_flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK,
568 ("%s: toep %p has bad ddp_flags 0x%x",
569 __func__, toep, toep->ddp_flags));
571 CTR3(KTR_CXGBE, "%s: tid %u (time %u)",
572 __func__, toep->tid, time_uptime);
574 toep->ddp_flags |= DDP_SC_REQ;
575 t4_set_tcb_field(sc, toep, 1, W_TCB_RX_DDP_FLAGS,
576 V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) |
577 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) |
578 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1),
579 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1));
580 t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS,
581 V_TF_RCV_COALESCE_ENABLE(1), 0);
585 disable_ddp(struct adapter *sc, struct toepcb *toep)
588 KASSERT((toep->ddp_flags & (DDP_ON | DDP_SC_REQ)) == DDP_ON,
589 ("%s: toep %p has bad ddp_flags 0x%x",
590 __func__, toep, toep->ddp_flags));
592 CTR3(KTR_CXGBE, "%s: tid %u (time %u)",
593 __func__, toep->tid, time_uptime);
595 toep->ddp_flags |= DDP_SC_REQ;
596 t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS,
597 V_TF_RCV_COALESCE_ENABLE(1), V_TF_RCV_COALESCE_ENABLE(1));
598 t4_set_tcb_field(sc, toep, 1, W_TCB_RX_DDP_FLAGS, V_TF_DDP_OFF(1),
603 hold_uio(struct uio *uio, vm_page_t **ppages, int *pnpages)
607 vm_offset_t start, end;
611 KASSERT(uio->uio_iovcnt == 1,
612 ("%s: uio_iovcnt %d", __func__, uio->uio_iovcnt));
613 KASSERT(uio->uio_td->td_proc == curproc,
614 ("%s: uio proc (%p) is not curproc (%p)",
615 __func__, uio->uio_td->td_proc, curproc));
617 map = &curproc->p_vmspace->vm_map;
618 iov = &uio->uio_iov[0];
619 start = trunc_page((uintptr_t)iov->iov_base);
620 end = round_page((vm_offset_t)iov->iov_base + iov->iov_len);
621 n = howmany(end - start, PAGE_SIZE);
623 if (end - start > MAX_DDP_BUFFER_SIZE)
626 pp = malloc(n * sizeof(vm_page_t), M_CXGBE, M_NOWAIT);
630 if (vm_fault_quick_hold_pages(map, (vm_offset_t)iov->iov_base,
631 iov->iov_len, VM_PROT_WRITE, pp, n) < 0) {
643 bufcmp(struct ddp_buffer *db, vm_page_t *pages, int npages, int offset, int len)
647 if (db == NULL || db->npages != npages || db->offset != offset ||
651 for (i = 0; i < npages; i++) {
652 if (pages[i]->phys_addr != db->pages[i]->phys_addr)
660 calculate_hcf(int n1, int n2)
681 static struct ddp_buffer *
682 alloc_ddp_buffer(struct tom_data *td, vm_page_t *pages, int npages, int offset,
685 int i, hcf, seglen, idx, ppod, nppods;
686 struct ddp_buffer *db;
689 * The DDP page size is unrelated to the VM page size. We combine
690 * contiguous physical pages into larger segments to get the best DDP
691 * page size possible. This is the largest of the four sizes in
692 * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in
696 for (i = 0; i < npages; i++) {
698 while (i < npages - 1 &&
699 pages[i]->phys_addr + PAGE_SIZE == pages[i + 1]->phys_addr) {
704 hcf = calculate_hcf(hcf, seglen);
705 if (hcf < t4_ddp_pgsz[1]) {
707 goto have_pgsz; /* give up, short circuit */
711 if (hcf % t4_ddp_pgsz[0] != 0) {
712 /* hmmm. This could only happen when PAGE_SIZE < 4K */
713 KASSERT(PAGE_SIZE < 4096,
714 ("%s: PAGE_SIZE %d, hcf %d", __func__, PAGE_SIZE, hcf));
715 CTR3(KTR_CXGBE, "%s: PAGE_SIZE %d, hcf %d",
716 __func__, PAGE_SIZE, hcf);
720 for (idx = nitems(t4_ddp_pgsz) - 1; idx > 0; idx--) {
721 if (hcf % t4_ddp_pgsz[idx] == 0)
725 MPASS(idx <= M_PPOD_PGSZ);
727 db = malloc(sizeof(*db), M_CXGBE, M_NOWAIT);
729 CTR1(KTR_CXGBE, "%s: malloc failed.", __func__);
733 nppods = pages_to_nppods(npages, t4_ddp_pgsz[idx]);
734 if (alloc_ppods(td, nppods, &db->ppod_addr) != 0) {
736 CTR4(KTR_CXGBE, "%s: no pods, nppods %d, resid %d, pgsz %d",
737 __func__, nppods, len, t4_ddp_pgsz[idx]);
740 ppod = (db->ppod_addr - td->ppod_start) / PPOD_SIZE;
742 db->tag = V_PPOD_PGSZ(idx) | V_PPOD_TAG(ppod);
749 CTR6(KTR_CXGBE, "New DDP buffer. "
750 "ddp_pgsz %d, ppod 0x%x, npages %d, nppods %d, offset %d, len %d",
751 t4_ddp_pgsz[idx], ppod, db->npages, db->nppods, db->offset,
757 #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE)
760 write_page_pods(struct adapter *sc, struct toepcb *toep, struct ddp_buffer *db)
763 struct ulp_mem_io *ulpmc;
764 struct ulptx_idata *ulpsc;
765 struct pagepod *ppod;
766 int i, j, k, n, chunk, len, ddp_pgsz, idx;
770 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
772 cmd |= htobe32(F_ULP_MEMIO_ORDER);
774 cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
775 ddp_pgsz = t4_ddp_pgsz[G_PPOD_PGSZ(db->tag)];
776 ppod_addr = db->ppod_addr;
777 for (i = 0; i < db->nppods; ppod_addr += chunk) {
779 /* How many page pods are we writing in this cycle */
780 n = min(db->nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
782 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
784 wr = alloc_wrqe(len, toep->ctrlq);
786 return (ENOMEM); /* ok to just bail out */
789 INIT_ULPTX_WR(ulpmc, len, 0, 0);
791 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
792 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
793 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
795 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
796 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
797 ulpsc->len = htobe32(chunk);
799 ppod = (struct pagepod *)(ulpsc + 1);
800 for (j = 0; j < n; i++, j++, ppod++) {
801 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
802 V_PPOD_TID(toep->tid) | db->tag);
803 ppod->len_offset = htobe64(V_PPOD_LEN(db->len) |
804 V_PPOD_OFST(db->offset));
806 idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE);
807 for (k = 0; k < nitems(ppod->addr); k++) {
808 if (idx < db->npages) {
810 htobe64(db->pages[idx]->phys_addr);
811 idx += ddp_pgsz / PAGE_SIZE;
816 "%s: tid %d ppod[%d]->addr[%d] = %p",
817 __func__, toep->tid, i, k,
818 htobe64(ppod->addr[k]));
831 * Reuse, or allocate (and program the page pods for) a new DDP buffer. The
832 * "pages" array is handed over to this function and should not be used in any
833 * way by the caller after that.
836 select_ddp_buffer(struct adapter *sc, struct toepcb *toep, vm_page_t *pages,
837 int npages, int db_off, int db_len)
839 struct ddp_buffer *db;
840 struct tom_data *td = sc->tom_softc;
841 int i, empty_slot = -1;
844 for (i = 0; i < nitems(toep->db); i++) {
845 if (bufcmp(toep->db[i], pages, npages, db_off, db_len) == 0) {
846 free(pages, M_CXGBE);
847 return (i); /* pages still held */
848 } else if (toep->db[i] == NULL && empty_slot < 0)
852 /* Allocate new buffer, write its page pods. */
853 db = alloc_ddp_buffer(td, pages, npages, db_off, db_len);
855 vm_page_unhold_pages(pages, npages);
856 free(pages, M_CXGBE);
859 if (write_page_pods(sc, toep, db) != 0) {
860 vm_page_unhold_pages(pages, npages);
861 free_ddp_buffer(td, db);
867 i = arc4random() % nitems(toep->db);
868 free_ddp_buffer(td, toep->db[i]);
872 CTR5(KTR_CXGBE, "%s: tid %d, DDP buffer[%d] = %p (tag 0x%x)",
873 __func__, toep->tid, i, db, db->tag);
879 wire_ddp_buffer(struct ddp_buffer *db)
884 for (i = 0; i < db->npages; i++) {
894 unwire_ddp_buffer(struct ddp_buffer *db)
899 for (i = 0; i < db->npages; i++) {
902 vm_page_unwire(p, PQ_INACTIVE);
908 handle_ddp(struct socket *so, struct uio *uio, int flags, int error)
910 struct sockbuf *sb = &so->so_rcv;
911 struct tcpcb *tp = so_sototcpcb(so);
912 struct toepcb *toep = tp->t_toe;
913 struct adapter *sc = td_adapter(toep->td);
915 int npages, db_idx, rc, buf_flag;
916 struct ddp_buffer *db;
920 SOCKBUF_LOCK_ASSERT(sb);
923 if (sbused(sb) + sc->tt.ddp_thres > uio->uio_resid) {
924 CTR4(KTR_CXGBE, "%s: sb_cc %d, threshold %d, resid %d",
925 __func__, sbused(sb), sc->tt.ddp_thres, uio->uio_resid);
929 /* XXX: too eager to disable DDP, could handle NBIO better than this. */
930 if (sbused(sb) >= uio->uio_resid || uio->uio_resid < sc->tt.ddp_thres ||
931 uio->uio_resid > MAX_DDP_BUFFER_SIZE || uio->uio_iovcnt > 1 ||
932 so->so_state & SS_NBIO || flags & (MSG_DONTWAIT | MSG_NBIO) ||
933 error || so->so_error || sb->sb_state & SBS_CANTRCVMORE)
937 * Fault in and then hold the pages of the uio buffers. We'll wire them
938 * a bit later if everything else works out.
941 if (hold_uio(uio, &pages, &npages) != 0) {
946 if (__predict_false(so->so_error || sb->sb_state & SBS_CANTRCVMORE)) {
947 vm_page_unhold_pages(pages, npages);
948 free(pages, M_CXGBE);
953 * Figure out which one of the two DDP buffers to use this time.
955 db_idx = select_ddp_buffer(sc, toep, pages, npages,
956 (uintptr_t)uio->uio_iov->iov_base & PAGE_MASK, uio->uio_resid);
957 pages = NULL; /* handed off to select_ddp_buffer */
960 db = toep->db[db_idx];
961 buf_flag = db_idx == 0 ? DDP_BUF0_ACTIVE : DDP_BUF1_ACTIVE;
964 * Build the compound work request that tells the chip where to DMA the
967 ddp_flags = select_ddp_flags(so, flags, db_idx);
968 wr = mk_update_tcb_for_ddp(sc, toep, db_idx, sbused(sb), ddp_flags);
971 * Just unhold the pages. The DDP buffer's software state is
972 * left as-is in the toep. The page pods were written
973 * successfully and we may have an opportunity to use it in the
976 vm_page_unhold_pages(db->pages, db->npages);
980 /* Wire (and then unhold) the pages, and give the chip the go-ahead. */
983 sb->sb_flags &= ~SB_DDP_INDICATE;
984 toep->ddp_flags |= buf_flag;
987 * Wait for the DDP operation to complete and then unwire the pages.
988 * The return code from the sbwait will be the final return code of this
989 * function. But we do need to wait for DDP no matter what.
992 while (toep->ddp_flags & buf_flag) {
993 /* XXXGL: shouldn't here be sbwait() call? */
994 sb->sb_flags |= SB_WAIT;
995 msleep(&sb->sb_acc, &sb->sb_mtx, PSOCK , "sbwait", 0);
997 unwire_ddp_buffer(db);
1000 disable_ddp(sc, toep);
1001 discourage_ddp(toep);
1002 sb->sb_flags &= ~SB_DDP_INDICATE;
1007 t4_init_ddp(struct adapter *sc, struct tom_data *td)
1010 td->ppod_start = sc->vres.ddp.start;
1011 td->ppod_arena = vmem_create("DDP page pods", sc->vres.ddp.start,
1012 sc->vres.ddp.size, 1, 32, M_FIRSTFIT | M_NOWAIT);
1014 t4_register_cpl_handler(sc, CPL_RX_DATA_DDP, do_rx_data_ddp);
1015 t4_register_cpl_handler(sc, CPL_RX_DDP_COMPLETE, do_rx_ddp_complete);
1019 t4_uninit_ddp(struct adapter *sc __unused, struct tom_data *td)
1022 if (td->ppod_arena != NULL) {
1023 vmem_destroy(td->ppod_arena);
1024 td->ppod_arena = NULL;
1028 #define VNET_SO_ASSERT(so) \
1029 VNET_ASSERT(curvnet != NULL, \
1030 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
1031 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
1033 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1036 CXGBE_UNIMPLEMENTED(__func__);
1039 static char ddp_magic_str[] = "nothing to see here";
1041 static struct mbuf *
1042 get_ddp_mbuf(int len)
1046 m = m_get(M_NOWAIT, MT_DATA);
1048 CXGBE_UNIMPLEMENTED("mbuf alloc failure");
1050 m->m_data = &ddp_magic_str[0];
1056 is_ddp_mbuf(struct mbuf *m)
1059 return (m->m_data == &ddp_magic_str[0]);
1063 * Copy an mbuf chain into a uio limited by len if set.
1066 m_mbuftouio_ddp(struct uio *uio, struct mbuf *m, int len)
1068 int error, length, total;
1072 total = min(uio->uio_resid, len);
1074 total = uio->uio_resid;
1076 /* Fill the uio with data from the mbufs. */
1077 for (; m != NULL; m = m->m_next) {
1078 length = min(m->m_len, total - progress);
1080 if (is_ddp_mbuf(m)) {
1081 enum uio_seg segflag = uio->uio_segflg;
1083 uio->uio_segflg = UIO_NOCOPY;
1084 error = uiomove(mtod(m, void *), length, uio);
1085 uio->uio_segflg = segflag;
1087 error = uiomove(mtod(m, void *), length, uio);
1098 * Based on soreceive_stream() in uipc_socket.c
1101 t4_soreceive_ddp(struct socket *so, struct sockaddr **psa, struct uio *uio,
1102 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1104 int len = 0, error = 0, flags, oresid, ddp_handled = 0;
1106 struct mbuf *m, *n = NULL;
1108 /* We only do stream sockets. */
1109 if (so->so_type != SOCK_STREAM)
1113 if (controlp != NULL)
1116 flags = *flagsp &~ MSG_EOR;
1119 if (flags & MSG_OOB)
1120 return (soreceive_rcvoob(so, uio, flags));
1126 /* Prevent other readers from entering the socket. */
1127 error = sblock(sb, SBLOCKWAIT(flags));
1132 /* Easy one, no space to copyout anything. */
1133 if (uio->uio_resid == 0) {
1137 oresid = uio->uio_resid;
1139 /* We will never ever get anything unless we are or were connected. */
1140 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
1146 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1148 if (sb->sb_flags & SB_DDP_INDICATE && !ddp_handled) {
1150 /* uio should be just as it was at entry */
1151 KASSERT(oresid == uio->uio_resid,
1152 ("%s: oresid = %d, uio_resid = %zd, sbavail = %d",
1153 __func__, oresid, uio->uio_resid, sbavail(sb)));
1155 error = handle_ddp(so, uio, flags, 0);
1161 /* Abort if socket has reported problems. */
1165 if (oresid > uio->uio_resid)
1167 error = so->so_error;
1168 if (!(flags & MSG_PEEK))
1173 /* Door is closed. Deliver what is left, if any. */
1174 if (sb->sb_state & SBS_CANTRCVMORE) {
1181 /* Socket buffer is empty and we shall not block. */
1182 if (sbavail(sb) == 0 &&
1183 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
1188 /* Socket buffer got some data that we shall deliver now. */
1189 if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
1190 ((so->so_state & SS_NBIO) ||
1191 (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
1192 sbavail(sb) >= sb->sb_lowat ||
1193 sbavail(sb) >= uio->uio_resid ||
1194 sbavail(sb) >= sb->sb_hiwat) ) {
1198 /* On MSG_WAITALL we must wait until all data or error arrives. */
1199 if ((flags & MSG_WAITALL) &&
1200 (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_lowat))
1204 * Wait and block until (more) data comes in.
1205 * NB: Drops the sockbuf lock during wait.
1209 if (sb->sb_flags & SB_DDP_INDICATE && !ddp_handled) {
1210 (void) handle_ddp(so, uio, flags, 1);
1218 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1219 KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
1220 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
1222 if (sb->sb_flags & SB_DDP_INDICATE && !ddp_handled)
1227 uio->uio_td->td_ru.ru_msgrcv++;
1229 /* Fill uio until full or current end of socket buffer is reached. */
1230 len = min(uio->uio_resid, sbavail(sb));
1232 /* Dequeue as many mbufs as possible. */
1233 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
1234 for (*mp0 = m = sb->sb_mb;
1235 m != NULL && m->m_len <= len;
1238 uio->uio_resid -= m->m_len;
1243 if (sb->sb_mb == NULL)
1247 /* Copy the remainder. */
1249 KASSERT(sb->sb_mb != NULL,
1250 ("%s: len > 0 && sb->sb_mb empty", __func__));
1252 m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
1254 len = 0; /* Don't flush data from sockbuf. */
1256 uio->uio_resid -= m->m_len;
1267 /* NB: Must unlock socket buffer as uiomove may sleep. */
1269 error = m_mbuftouio_ddp(uio, sb->sb_mb, len);
1274 SBLASTRECORDCHK(sb);
1278 * Remove the delivered data from the socket buffer unless we
1279 * were only peeking.
1281 if (!(flags & MSG_PEEK)) {
1283 sbdrop_locked(sb, len);
1285 /* Notify protocol that we drained some data. */
1286 if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
1287 (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
1288 !(flags & MSG_SOCALLBCK))) {
1291 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
1297 * For MSG_WAITALL we may have to loop again and wait for
1298 * more data to come in.
1300 if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
1303 SOCKBUF_LOCK_ASSERT(sb);
1304 SBLASTRECORDCHK(sb);