2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
41 #include <sys/module.h>
42 #include <sys/protosw.h>
44 #include <sys/domain.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/taskqueue.h>
49 #include <netinet/in.h>
50 #include <netinet/in_pcb.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp_var.h>
54 #include <netinet/tcp_fsm.h>
55 #include <netinet/toecore.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_param.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_object.h>
65 #include <cam/scsi/scsi_all.h>
66 #include <cam/ctl/ctl_io.h>
69 #include "common/common.h"
70 #include "common/t4_msg.h"
71 #include "common/t4_regs.h"
72 #include "common/t4_tcb.h"
73 #include "tom/t4_tom.h"
76 * Use the 'backend3' field in AIO jobs to store the amount of data
77 * received by the AIO job so far.
79 #define aio_received backend3
81 static void aio_ddp_requeue_task(void *context, int pending);
82 static void ddp_complete_all(struct toepcb *toep, int error);
83 static void t4_aio_cancel_active(struct kaiocb *job);
84 static void t4_aio_cancel_queued(struct kaiocb *job);
86 static TAILQ_HEAD(, pageset) ddp_orphan_pagesets;
87 static struct mtx ddp_orphan_pagesets_lock;
88 static struct task ddp_orphan_task;
90 #define MAX_DDP_BUFFER_SIZE (M_TCB_RX_DDP_BUF0_LEN)
93 * A page set holds information about a buffer used for DDP. The page
94 * set holds resources such as the VM pages backing the buffer (either
95 * held or wired) and the page pods associated with the buffer.
96 * Recently used page sets are cached to allow for efficient reuse of
97 * buffers (avoiding the need to re-fault in pages, hold them, etc.).
98 * Note that cached page sets keep the backing pages wired. The
99 * number of wired pages is capped by only allowing for two wired
100 * pagesets per connection. This is not a perfect cap, but is a
101 * trade-off for performance.
103 * If an application ping-pongs two buffers for a connection via
104 * aio_read(2) then those buffers should remain wired and expensive VM
105 * fault lookups should be avoided after each buffer has been used
106 * once. If an application uses more than two buffers then this will
107 * fall back to doing expensive VM fault lookups for each operation.
110 free_pageset(struct tom_data *td, struct pageset *ps)
115 if (ps->prsv.prsv_nppods > 0)
116 t4_free_page_pods(&ps->prsv);
118 for (i = 0; i < ps->npages; i++) {
120 vm_page_unwire(p, PQ_INACTIVE);
122 mtx_lock(&ddp_orphan_pagesets_lock);
123 TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link);
124 taskqueue_enqueue(taskqueue_thread, &ddp_orphan_task);
125 mtx_unlock(&ddp_orphan_pagesets_lock);
129 ddp_free_orphan_pagesets(void *context, int pending)
133 mtx_lock(&ddp_orphan_pagesets_lock);
134 while (!TAILQ_EMPTY(&ddp_orphan_pagesets)) {
135 ps = TAILQ_FIRST(&ddp_orphan_pagesets);
136 TAILQ_REMOVE(&ddp_orphan_pagesets, ps, link);
137 mtx_unlock(&ddp_orphan_pagesets_lock);
139 vmspace_free(ps->vm);
141 mtx_lock(&ddp_orphan_pagesets_lock);
143 mtx_unlock(&ddp_orphan_pagesets_lock);
147 recycle_pageset(struct toepcb *toep, struct pageset *ps)
150 DDP_ASSERT_LOCKED(toep);
151 if (!(toep->ddp.flags & DDP_DEAD)) {
152 KASSERT(toep->ddp.cached_count + toep->ddp.active_count <
153 nitems(toep->ddp.db), ("too many wired pagesets"));
154 TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link);
155 toep->ddp.cached_count++;
157 free_pageset(toep->td, ps);
161 ddp_complete_one(struct kaiocb *job, int error)
166 * If this job had copied data out of the socket buffer before
167 * it was cancelled, report it as a short read rather than an
170 copied = job->aio_received;
171 if (copied != 0 || error == 0)
172 aio_complete(job, copied, 0);
174 aio_complete(job, -1, error);
178 free_ddp_buffer(struct tom_data *td, struct ddp_buffer *db)
183 * XXX: If we are un-offloading the socket then we
184 * should requeue these on the socket somehow. If we
185 * got a FIN from the remote end, then this completes
186 * any remaining requests with an EOF read.
188 if (!aio_clear_cancel_function(db->job))
189 ddp_complete_one(db->job, 0);
193 free_pageset(td, db->ps);
197 ddp_init_toep(struct toepcb *toep)
200 TAILQ_INIT(&toep->ddp.aiojobq);
201 TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task, toep);
202 toep->ddp.flags = DDP_OK;
203 toep->ddp.active_id = -1;
204 mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF);
208 ddp_uninit_toep(struct toepcb *toep)
211 mtx_destroy(&toep->ddp.lock);
215 release_ddp_resources(struct toepcb *toep)
221 toep->ddp.flags |= DDP_DEAD;
222 for (i = 0; i < nitems(toep->ddp.db); i++) {
223 free_ddp_buffer(toep->td, &toep->ddp.db[i]);
225 while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) {
226 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
227 free_pageset(toep->td, ps);
229 ddp_complete_all(toep, 0);
235 ddp_assert_empty(struct toepcb *toep)
239 MPASS(!(toep->ddp.flags & DDP_TASK_ACTIVE));
240 for (i = 0; i < nitems(toep->ddp.db); i++) {
241 MPASS(toep->ddp.db[i].job == NULL);
242 MPASS(toep->ddp.db[i].ps == NULL);
244 MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets));
245 MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq));
250 complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db,
253 unsigned int db_flag;
255 toep->ddp.active_count--;
256 if (toep->ddp.active_id == db_idx) {
257 if (toep->ddp.active_count == 0) {
258 KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL,
259 ("%s: active_count mismatch", __func__));
260 toep->ddp.active_id = -1;
262 toep->ddp.active_id ^= 1;
263 #ifdef VERBOSE_TRACES
264 CTR3(KTR_CXGBE, "%s: tid %u, ddp_active_id = %d", __func__,
265 toep->tid, toep->ddp.active_id);
268 KASSERT(toep->ddp.active_count != 0 &&
269 toep->ddp.active_id != -1,
270 ("%s: active count mismatch", __func__));
273 db->cancel_pending = 0;
275 recycle_pageset(toep, db->ps);
278 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
279 KASSERT(toep->ddp.flags & db_flag,
280 ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x",
281 __func__, toep, toep->ddp.flags));
282 toep->ddp.flags &= ~db_flag;
285 /* XXX: handle_ddp_data code duplication */
287 insert_ddp_data(struct toepcb *toep, uint32_t n)
289 struct inpcb *inp = toep->inp;
290 struct tcpcb *tp = intotcpcb(inp);
291 struct ddp_buffer *db;
297 unsigned int db_flag;
300 INP_WLOCK_ASSERT(inp);
301 DDP_ASSERT_LOCKED(toep);
304 #ifndef USE_DDP_RX_FLOW_CONTROL
305 KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__));
308 CTR2(KTR_CXGBE, "%s: placed %u bytes before falling out of DDP",
310 while (toep->ddp.active_count > 0) {
311 MPASS(toep->ddp.active_id != -1);
312 db_idx = toep->ddp.active_id;
314 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
316 MPASS((toep->ddp.flags & db_flag) != 0);
317 db = &toep->ddp.db[db_idx];
319 copied = job->aio_received;
321 if (placed > job->uaiocb.aio_nbytes - copied)
322 placed = job->uaiocb.aio_nbytes - copied;
325 if (!aio_clear_cancel_function(job)) {
327 * Update the copied length for when
328 * t4_aio_cancel_active() completes this
331 job->aio_received += placed;
332 } else if (copied + placed != 0) {
334 "%s: completing %p (copied %ld, placed %lu)",
335 __func__, job, copied, placed);
336 /* XXX: This always completes if there is some data. */
337 aio_complete(job, copied + placed, 0);
338 } else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) {
339 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
340 toep->ddp.waiting_count++;
344 complete_ddp_buffer(toep, db, db_idx);
350 /* SET_TCB_FIELD sent as a ULP command looks like this */
351 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
352 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
354 /* RX_DATA_ACK sent as a ULP command looks like this */
355 #define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \
356 sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core))
359 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep,
360 uint64_t word, uint64_t mask, uint64_t val)
362 struct ulptx_idata *ulpsc;
363 struct cpl_set_tcb_field_core *req;
365 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
366 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
368 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
369 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
370 ulpsc->len = htobe32(sizeof(*req));
372 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
373 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid));
374 req->reply_ctrl = htobe16(V_NO_REPLY(1) |
375 V_QUEUENO(toep->ofld_rxq->iq.abs_id));
376 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
377 req->mask = htobe64(mask);
378 req->val = htobe64(val);
380 ulpsc = (struct ulptx_idata *)(req + 1);
381 if (LEN__SET_TCB_FIELD_ULP % 16) {
382 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
383 ulpsc->len = htobe32(0);
390 mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep)
392 struct ulptx_idata *ulpsc;
393 struct cpl_rx_data_ack_core *req;
395 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
396 ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16));
398 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
399 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
400 ulpsc->len = htobe32(sizeof(*req));
402 req = (struct cpl_rx_data_ack_core *)(ulpsc + 1);
403 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid));
404 req->credit_dack = htobe32(F_RX_MODULATE_RX);
406 ulpsc = (struct ulptx_idata *)(req + 1);
407 if (LEN__RX_DATA_ACK_ULP % 16) {
408 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
409 ulpsc->len = htobe32(0);
416 mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx,
417 struct pageset *ps, int offset, uint64_t ddp_flags, uint64_t ddp_flags_mask)
420 struct work_request_hdr *wrh;
421 struct ulp_txpkt *ulpmc;
424 KASSERT(db_idx == 0 || db_idx == 1,
425 ("%s: bad DDP buffer index %d", __func__, db_idx));
428 * We'll send a compound work request that has 3 SET_TCB_FIELDs and an
429 * RX_DATA_ACK (with RX_MODULATE to speed up delivery).
431 * The work request header is 16B and always ends at a 16B boundary.
432 * The ULPTX master commands that follow must all end at 16B boundaries
433 * too so we round up the size to 16.
435 len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
436 roundup2(LEN__RX_DATA_ACK_ULP, 16);
438 wr = alloc_wrqe(len, toep->ctrlq);
442 INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */
443 ulpmc = (struct ulp_txpkt *)(wrh + 1);
445 /* Write the buffer's tag */
446 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
447 W_TCB_RX_DDP_BUF0_TAG + db_idx,
448 V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG),
449 V_TCB_RX_DDP_BUF0_TAG(ps->prsv.prsv_tag));
451 /* Update the current offset in the DDP buffer and its total length */
453 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
454 W_TCB_RX_DDP_BUF0_OFFSET,
455 V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) |
456 V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN),
457 V_TCB_RX_DDP_BUF0_OFFSET(offset) |
458 V_TCB_RX_DDP_BUF0_LEN(ps->len));
460 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
461 W_TCB_RX_DDP_BUF1_OFFSET,
462 V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) |
463 V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32),
464 V_TCB_RX_DDP_BUF1_OFFSET(offset) |
465 V_TCB_RX_DDP_BUF1_LEN((u64)ps->len << 32));
467 /* Update DDP flags */
468 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS,
469 ddp_flags_mask, ddp_flags);
471 /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */
472 ulpmc = mk_rx_data_ack_ulp(ulpmc, toep);
478 handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
480 uint32_t report = be32toh(ddp_report);
482 struct inpcb *inp = toep->inp;
483 struct ddp_buffer *db;
490 db_idx = report & F_DDP_BUF_IDX ? 1 : 0;
492 if (__predict_false(!(report & F_DDP_INV)))
493 CXGBE_UNIMPLEMENTED("DDP buffer still valid");
496 so = inp_inpcbtosocket(inp);
500 KASSERT(toep->ddp.active_id == db_idx,
501 ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx,
502 toep->ddp.active_id, toep->tid));
503 db = &toep->ddp.db[db_idx];
506 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) {
508 * This can happen due to an administrative tcpdrop(8).
509 * Just fail the request with ECONNRESET.
511 CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x",
512 __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags);
513 if (aio_clear_cancel_function(job))
514 ddp_complete_one(job, ECONNRESET);
521 * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the
522 * sequence number of the next byte to receive. The length of
523 * the data received for this message must be computed by
524 * comparing the new and old values of rcv_nxt.
526 * For RX_DATA_DDP, len might be non-zero, but it is only the
527 * length of the most recent DMA. It does not include the
528 * total length of the data received since the previous update
529 * for this DDP buffer. rcv_nxt is the sequence number of the
530 * first received byte from the most recent DMA.
532 len += be32toh(rcv_nxt) - tp->rcv_nxt;
534 tp->t_rcvtime = ticks;
535 #ifndef USE_DDP_RX_FLOW_CONTROL
536 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
539 #ifdef VERBOSE_TRACES
540 CTR5(KTR_CXGBE, "%s: tid %u, DDP[%d] placed %d bytes (%#x)", __func__,
541 toep->tid, db_idx, len, report);
544 /* receive buffer autosize */
545 MPASS(toep->vnet == so->so_vnet);
546 CURVNET_SET(toep->vnet);
548 if (sb->sb_flags & SB_AUTOSIZE &&
549 V_tcp_do_autorcvbuf &&
550 sb->sb_hiwat < V_tcp_autorcvbuf_max &&
551 len > (sbspace(sb) / 8 * 7)) {
552 struct adapter *sc = td_adapter(toep->td);
553 unsigned int hiwat = sb->sb_hiwat;
554 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
555 V_tcp_autorcvbuf_max);
557 if (!sbreserve_locked(sb, newsize, so, NULL))
558 sb->sb_flags &= ~SB_AUTOSIZE;
564 if (db->cancel_pending) {
566 * Update the job's length but defer completion to the
569 job->aio_received += len;
571 } else if (!aio_clear_cancel_function(job)) {
573 * Update the copied length for when
574 * t4_aio_cancel_active() completes this request.
576 job->aio_received += len;
578 copied = job->aio_received;
579 #ifdef VERBOSE_TRACES
581 "%s: tid %u, completing %p (copied %ld, placed %d)",
582 __func__, toep->tid, job, copied, len);
584 aio_complete(job, copied + len, 0);
585 t4_rcvd(&toep->td->tod, tp);
589 complete_ddp_buffer(toep, db, db_idx);
590 if (toep->ddp.waiting_count > 0)
591 ddp_queue_toep(toep);
600 handle_ddp_indicate(struct toepcb *toep)
603 DDP_ASSERT_LOCKED(toep);
604 MPASS(toep->ddp.active_count == 0);
605 MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0);
606 if (toep->ddp.waiting_count == 0) {
608 * The pending requests that triggered the request for an
609 * an indicate were cancelled. Those cancels should have
610 * already disabled DDP. Just ignore this as the data is
611 * going into the socket buffer anyway.
615 CTR3(KTR_CXGBE, "%s: tid %d indicated (%d waiting)", __func__,
616 toep->tid, toep->ddp.waiting_count);
617 ddp_queue_toep(toep);
620 CTASSERT(CPL_COOKIE_DDP0 + 1 == CPL_COOKIE_DDP1);
623 do_ddp_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
625 struct adapter *sc = iq->adapter;
626 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
627 unsigned int tid = GET_TID(cpl);
631 struct ddp_buffer *db;
635 if (cpl->status != CPL_ERR_NONE)
636 panic("XXX: tcp_rpl failed: %d", cpl->status);
638 toep = lookup_tid(sc, tid);
640 switch (cpl->cookie) {
641 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP0):
642 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP1):
644 * XXX: This duplicates a lot of code with handle_ddp_data().
646 db_idx = G_COOKIE(cpl->cookie) - CPL_COOKIE_DDP0;
647 MPASS(db_idx < nitems(toep->ddp.db));
650 db = &toep->ddp.db[db_idx];
653 * handle_ddp_data() should leave the job around until
654 * this callback runs once a cancel is pending.
657 MPASS(db->job != NULL);
658 MPASS(db->cancel_pending);
661 * XXX: It's not clear what happens if there is data
662 * placed when the buffer is invalidated. I suspect we
663 * need to read the TCB to see how much data was placed.
665 * For now this just pretends like nothing was placed.
667 * XXX: Note that if we did check the PCB we would need to
668 * also take care of updating the tp, etc.
671 copied = job->aio_received;
673 CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job);
676 CTR3(KTR_CXGBE, "%s: completing %p (copied %ld)",
677 __func__, job, copied);
678 aio_complete(job, copied, 0);
679 t4_rcvd(&toep->td->tod, intotcpcb(inp));
682 complete_ddp_buffer(toep, db, db_idx);
683 if (toep->ddp.waiting_count > 0)
684 ddp_queue_toep(toep);
689 panic("XXX: unknown tcb_rpl offset %#x, cookie %#x",
690 G_WORD(cpl->cookie), G_COOKIE(cpl->cookie));
697 handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt)
699 struct ddp_buffer *db;
704 unsigned int db_flag;
708 INP_WLOCK_ASSERT(toep->inp);
709 DDP_ASSERT_LOCKED(toep);
711 /* - 1 is to ignore the byte for FIN */
712 len = be32toh(rcv_nxt) - tp->rcv_nxt - 1;
715 while (toep->ddp.active_count > 0) {
716 MPASS(toep->ddp.active_id != -1);
717 db_idx = toep->ddp.active_id;
719 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
721 MPASS((toep->ddp.flags & db_flag) != 0);
722 db = &toep->ddp.db[db_idx];
724 copied = job->aio_received;
726 if (placed > job->uaiocb.aio_nbytes - copied)
727 placed = job->uaiocb.aio_nbytes - copied;
730 if (!aio_clear_cancel_function(job)) {
732 * Update the copied length for when
733 * t4_aio_cancel_active() completes this
736 job->aio_received += placed;
738 CTR4(KTR_CXGBE, "%s: tid %d completed buf %d len %d",
739 __func__, toep->tid, db_idx, placed);
740 aio_complete(job, copied + placed, 0);
743 complete_ddp_buffer(toep, db, db_idx);
747 ddp_complete_all(toep, 0);
750 #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\
751 F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\
752 F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\
753 F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR)
755 extern cpl_handler_t t4_cpl_handler[];
758 do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
760 struct adapter *sc = iq->adapter;
761 const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1);
762 unsigned int tid = GET_TID(cpl);
764 struct toepcb *toep = lookup_tid(sc, tid);
766 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
767 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
768 KASSERT(!(toep->flags & TPF_SYNQE),
769 ("%s: toep %p claims to be a synq entry", __func__, toep));
771 vld = be32toh(cpl->ddpvld);
772 if (__predict_false(vld & DDP_ERR)) {
773 panic("%s: DDP error 0x%x (tid %d, toep %p)",
774 __func__, vld, tid, toep);
777 if (ulp_mode(toep) == ULP_MODE_ISCSI) {
778 t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m);
782 handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len));
788 do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss,
791 struct adapter *sc = iq->adapter;
792 const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1);
793 unsigned int tid = GET_TID(cpl);
794 struct toepcb *toep = lookup_tid(sc, tid);
796 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
797 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
798 KASSERT(!(toep->flags & TPF_SYNQE),
799 ("%s: toep %p claims to be a synq entry", __func__, toep));
801 handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0);
807 enable_ddp(struct adapter *sc, struct toepcb *toep)
810 KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK,
811 ("%s: toep %p has bad ddp_flags 0x%x",
812 __func__, toep, toep->ddp.flags));
814 CTR3(KTR_CXGBE, "%s: tid %u (time %u)",
815 __func__, toep->tid, time_uptime);
817 DDP_ASSERT_LOCKED(toep);
818 toep->ddp.flags |= DDP_SC_REQ;
819 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_RX_DDP_FLAGS,
820 V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) |
821 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) |
822 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1),
823 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1), 0, 0);
824 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS,
825 V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0);
829 calculate_hcf(int n1, int n2)
851 pages_to_nppods(int npages, int ddp_page_shift)
854 MPASS(ddp_page_shift >= PAGE_SHIFT);
856 return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES));
860 alloc_page_pods(struct ppod_region *pr, u_int nppods, u_int pgsz_idx,
861 struct ppod_reservation *prsv)
863 vmem_addr_t addr; /* relative to start of region */
865 if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT,
869 #ifdef VERBOSE_TRACES
870 CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d",
871 __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask,
872 nppods, 1 << pr->pr_page_shift[pgsz_idx]);
876 * The hardware tagmask includes an extra invalid bit but the arena was
877 * seeded with valid values only. An allocation out of this arena will
878 * fit inside the tagmask but won't have the invalid bit set.
880 MPASS((addr & pr->pr_tag_mask) == addr);
881 MPASS((addr & pr->pr_invalid_bit) == 0);
884 prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr;
885 prsv->prsv_nppods = nppods;
891 t4_alloc_page_pods_for_ps(struct ppod_region *pr, struct pageset *ps)
893 int i, hcf, seglen, idx, nppods;
894 struct ppod_reservation *prsv = &ps->prsv;
896 KASSERT(prsv->prsv_nppods == 0,
897 ("%s: page pods already allocated", __func__));
900 * The DDP page size is unrelated to the VM page size. We combine
901 * contiguous physical pages into larger segments to get the best DDP
902 * page size possible. This is the largest of the four sizes in
903 * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in
907 for (i = 0; i < ps->npages; i++) {
909 while (i < ps->npages - 1 &&
910 VM_PAGE_TO_PHYS(ps->pages[i]) + PAGE_SIZE ==
911 VM_PAGE_TO_PHYS(ps->pages[i + 1])) {
916 hcf = calculate_hcf(hcf, seglen);
917 if (hcf < (1 << pr->pr_page_shift[1])) {
919 goto have_pgsz; /* give up, short circuit */
923 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
924 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
925 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
926 if ((hcf & PR_PAGE_MASK(idx)) == 0)
932 MPASS(idx <= M_PPOD_PGSZ);
934 nppods = pages_to_nppods(ps->npages, pr->pr_page_shift[idx]);
935 if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
937 MPASS(prsv->prsv_nppods > 0);
943 t4_alloc_page_pods_for_buf(struct ppod_region *pr, vm_offset_t buf, int len,
944 struct ppod_reservation *prsv)
946 int hcf, seglen, idx, npages, nppods;
947 uintptr_t start_pva, end_pva, pva, p1;
953 * The DDP page size is unrelated to the VM page size. We combine
954 * contiguous physical pages into larger segments to get the best DDP
955 * page size possible. This is the largest of the four sizes in
956 * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes
960 start_pva = trunc_page(buf);
961 end_pva = trunc_page(buf + len - 1);
963 while (pva <= end_pva) {
965 p1 = pmap_kextract(pva);
967 while (pva <= end_pva && p1 + seglen == pmap_kextract(pva)) {
972 hcf = calculate_hcf(hcf, seglen);
973 if (hcf < (1 << pr->pr_page_shift[1])) {
975 goto have_pgsz; /* give up, short circuit */
979 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
980 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
981 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
982 if ((hcf & PR_PAGE_MASK(idx)) == 0)
988 MPASS(idx <= M_PPOD_PGSZ);
991 npages += (end_pva - start_pva) >> pr->pr_page_shift[idx];
992 nppods = howmany(npages, PPOD_PAGES);
993 if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
995 MPASS(prsv->prsv_nppods > 0);
1001 t4_alloc_page_pods_for_sgl(struct ppod_region *pr, struct ctl_sg_entry *sgl,
1002 int entries, struct ppod_reservation *prsv)
1004 int hcf, seglen, idx = 0, npages, nppods, i, len;
1005 uintptr_t start_pva, end_pva, pva, p1 ;
1007 struct ctl_sg_entry *sge;
1013 * The DDP page size is unrelated to the VM page size. We combine
1014 * contiguous physical pages into larger segments to get the best DDP
1015 * page size possible. This is the largest of the four sizes in
1016 * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes
1020 for (i = entries - 1; i >= 0; i--) {
1022 buf = (vm_offset_t)sge->addr;
1024 start_pva = trunc_page(buf);
1025 end_pva = trunc_page(buf + len - 1);
1027 while (pva <= end_pva) {
1029 p1 = pmap_kextract(pva);
1031 while (pva <= end_pva && p1 + seglen ==
1032 pmap_kextract(pva)) {
1033 seglen += PAGE_SIZE;
1037 hcf = calculate_hcf(hcf, seglen);
1038 if (hcf < (1 << pr->pr_page_shift[1])) {
1040 goto have_pgsz; /* give up, short circuit */
1044 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
1045 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
1046 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
1047 if ((hcf & PR_PAGE_MASK(idx)) == 0)
1053 MPASS(idx <= M_PPOD_PGSZ);
1058 start_pva = trunc_page((vm_offset_t)sgl->addr);
1059 end_pva = trunc_page((vm_offset_t)sgl->addr + sgl->len - 1);
1060 npages += (end_pva - start_pva) >> pr->pr_page_shift[idx];
1063 nppods = howmany(npages, PPOD_PAGES);
1064 if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
1066 MPASS(prsv->prsv_nppods > 0);
1071 t4_free_page_pods(struct ppod_reservation *prsv)
1073 struct ppod_region *pr = prsv->prsv_pr;
1076 MPASS(prsv != NULL);
1077 MPASS(prsv->prsv_nppods != 0);
1079 addr = prsv->prsv_tag & pr->pr_tag_mask;
1080 MPASS((addr & pr->pr_invalid_bit) == 0);
1082 #ifdef VERBOSE_TRACES
1083 CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__,
1084 pr->pr_arena, addr, prsv->prsv_nppods);
1087 vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods));
1088 prsv->prsv_nppods = 0;
1091 #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE)
1094 t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid,
1098 struct ulp_mem_io *ulpmc;
1099 struct ulptx_idata *ulpsc;
1100 struct pagepod *ppod;
1101 int i, j, k, n, chunk, len, ddp_pgsz, idx;
1104 struct ppod_reservation *prsv = &ps->prsv;
1105 struct ppod_region *pr = prsv->prsv_pr;
1108 KASSERT(!(ps->flags & PS_PPODS_WRITTEN),
1109 ("%s: page pods already written", __func__));
1110 MPASS(prsv->prsv_nppods > 0);
1112 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1114 cmd |= htobe32(F_ULP_MEMIO_ORDER);
1116 cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1117 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1118 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1119 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1121 /* How many page pods are we writing in this cycle */
1122 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1124 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1126 wr = alloc_wrqe(len, wrq);
1128 return (ENOMEM); /* ok to just bail out */
1131 INIT_ULPTX_WR(ulpmc, len, 0, 0);
1133 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1134 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1135 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1137 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1138 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1139 ulpsc->len = htobe32(chunk);
1141 ppod = (struct pagepod *)(ulpsc + 1);
1142 for (j = 0; j < n; i++, j++, ppod++) {
1143 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1144 V_PPOD_TID(tid) | prsv->prsv_tag);
1145 ppod->len_offset = htobe64(V_PPOD_LEN(ps->len) |
1146 V_PPOD_OFST(ps->offset));
1148 idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE);
1149 for (k = 0; k < nitems(ppod->addr); k++) {
1150 if (idx < ps->npages) {
1151 pa = VM_PAGE_TO_PHYS(ps->pages[idx]);
1152 ppod->addr[k] = htobe64(pa);
1153 idx += ddp_pgsz / PAGE_SIZE;
1158 "%s: tid %d ppod[%d]->addr[%d] = %p",
1159 __func__, toep->tid, i, k,
1160 be64toh(ppod->addr[k]));
1168 ps->flags |= PS_PPODS_WRITTEN;
1173 static struct mbuf *
1174 alloc_raw_wr_mbuf(int len)
1179 m = m_gethdr(M_NOWAIT, MT_DATA);
1180 else if (len <= MCLBYTES)
1181 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1186 m->m_pkthdr.len = len;
1188 set_mbuf_raw_wr(m, true);
1193 t4_write_page_pods_for_buf(struct adapter *sc, struct toepcb *toep,
1194 struct ppod_reservation *prsv, vm_offset_t buf, int buflen,
1197 struct ulp_mem_io *ulpmc;
1198 struct ulptx_idata *ulpsc;
1199 struct pagepod *ppod;
1200 int i, j, k, n, chunk, len, ddp_pgsz;
1201 u_int ppod_addr, offset;
1203 struct ppod_region *pr = prsv->prsv_pr;
1204 uintptr_t end_pva, pva;
1208 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1210 cmd |= htobe32(F_ULP_MEMIO_ORDER);
1212 cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1213 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1214 offset = buf & PAGE_MASK;
1215 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1216 pva = trunc_page(buf);
1217 end_pva = trunc_page(buf + buflen - 1);
1218 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1220 /* How many page pods are we writing in this cycle */
1221 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1224 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1226 m = alloc_raw_wr_mbuf(len);
1229 ulpmc = mtod(m, struct ulp_mem_io *);
1231 INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
1233 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1234 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1235 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1237 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1238 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1239 ulpsc->len = htobe32(chunk);
1241 ppod = (struct pagepod *)(ulpsc + 1);
1242 for (j = 0; j < n; i++, j++, ppod++) {
1243 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1244 V_PPOD_TID(toep->tid) |
1245 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ)));
1246 ppod->len_offset = htobe64(V_PPOD_LEN(buflen) |
1247 V_PPOD_OFST(offset));
1250 for (k = 0; k < nitems(ppod->addr); k++) {
1254 pa = pmap_kextract(pva);
1255 ppod->addr[k] = htobe64(pa);
1260 "%s: tid %d ppod[%d]->addr[%d] = %p",
1261 __func__, toep->tid, i, k,
1262 be64toh(ppod->addr[k]));
1267 * Walk back 1 segment so that the first address in the
1268 * next pod is the same as the last one in the current
1274 mbufq_enqueue(wrq, m);
1277 MPASS(pva <= end_pva);
1283 t4_write_page_pods_for_sgl(struct adapter *sc, struct toepcb *toep,
1284 struct ppod_reservation *prsv, struct ctl_sg_entry *sgl, int entries,
1285 int xferlen, struct mbufq *wrq)
1287 struct ulp_mem_io *ulpmc;
1288 struct ulptx_idata *ulpsc;
1289 struct pagepod *ppod;
1290 int i, j, k, n, chunk, len, ddp_pgsz;
1291 u_int ppod_addr, offset, sg_offset = 0;
1293 struct ppod_region *pr = prsv->prsv_pr;
1300 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1302 cmd |= htobe32(F_ULP_MEMIO_ORDER);
1304 cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1305 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1306 offset = (vm_offset_t)sgl->addr & PAGE_MASK;
1307 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1308 pva = trunc_page((vm_offset_t)sgl->addr);
1309 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1311 /* How many page pods are we writing in this cycle */
1312 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1315 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1317 m = alloc_raw_wr_mbuf(len);
1320 ulpmc = mtod(m, struct ulp_mem_io *);
1322 INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
1324 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1325 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1326 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1328 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1329 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1330 ulpsc->len = htobe32(chunk);
1332 ppod = (struct pagepod *)(ulpsc + 1);
1333 for (j = 0; j < n; i++, j++, ppod++) {
1334 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1335 V_PPOD_TID(toep->tid) |
1336 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ)));
1337 ppod->len_offset = htobe64(V_PPOD_LEN(xferlen) |
1338 V_PPOD_OFST(offset));
1341 for (k = 0; k < nitems(ppod->addr); k++) {
1343 pa = pmap_kextract(pva + sg_offset);
1344 ppod->addr[k] = htobe64(pa);
1350 "%s: tid %d ppod[%d]->addr[%d] = %p",
1351 __func__, toep->tid, i, k,
1352 be64toh(ppod->addr[k]));
1356 * If this is the last entry in a pod,
1357 * reuse the same entry for first address
1360 if (k + 1 == nitems(ppod->addr))
1364 * Don't move to the next DDP page if the
1365 * sgl is already finished.
1370 sg_offset += ddp_pgsz;
1371 if (sg_offset == sgl->len) {
1373 * This sgl entry is done. Go
1381 (vm_offset_t)sgl->addr);
1386 mbufq_enqueue(wrq, m);
1393 * Prepare a pageset for DDP. This sets up page pods.
1396 prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps)
1398 struct tom_data *td = sc->tom_softc;
1400 if (ps->prsv.prsv_nppods == 0 &&
1401 !t4_alloc_page_pods_for_ps(&td->pr, ps)) {
1404 if (!(ps->flags & PS_PPODS_WRITTEN) &&
1405 t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) {
1413 t4_init_ppod_region(struct ppod_region *pr, struct t4_range *r, u_int psz,
1421 pr->pr_start = r->start;
1422 pr->pr_len = r->size;
1423 pr->pr_page_shift[0] = 12 + G_HPZ0(psz);
1424 pr->pr_page_shift[1] = 12 + G_HPZ1(psz);
1425 pr->pr_page_shift[2] = 12 + G_HPZ2(psz);
1426 pr->pr_page_shift[3] = 12 + G_HPZ3(psz);
1428 /* The SGL -> page pod algorithm requires the sizes to be in order. */
1429 for (i = 1; i < nitems(pr->pr_page_shift); i++) {
1430 if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1])
1434 pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG);
1435 pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask;
1436 if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0)
1438 pr->pr_alias_shift = fls(pr->pr_tag_mask);
1439 pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1);
1441 pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0,
1442 M_FIRSTFIT | M_NOWAIT);
1443 if (pr->pr_arena == NULL)
1450 t4_free_ppod_region(struct ppod_region *pr)
1456 vmem_destroy(pr->pr_arena);
1457 bzero(pr, sizeof(*pr));
1461 pscmp(struct pageset *ps, struct vmspace *vm, vm_offset_t start, int npages,
1465 if (ps->start != start || ps->npages != npages ||
1466 ps->offset != pgoff || ps->len != len)
1469 return (ps->vm != vm || ps->vm_timestamp != vm->vm_map.timestamp);
1473 hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps)
1477 vm_offset_t start, end, pgoff;
1481 DDP_ASSERT_LOCKED(toep);
1484 * The AIO subsystem will cancel and drain all requests before
1485 * permitting a process to exit or exec, so p_vmspace should
1488 vm = job->userproc->p_vmspace;
1490 start = (uintptr_t)job->uaiocb.aio_buf;
1491 pgoff = start & PAGE_MASK;
1492 end = round_page(start + job->uaiocb.aio_nbytes);
1493 start = trunc_page(start);
1495 if (end - start > MAX_DDP_BUFFER_SIZE) {
1497 * Truncate the request to a short read.
1498 * Alternatively, we could DDP in chunks to the larger
1499 * buffer, but that would be quite a bit more work.
1501 * When truncating, round the request down to avoid
1502 * crossing a cache line on the final transaction.
1504 end = rounddown2(start + MAX_DDP_BUFFER_SIZE, CACHE_LINE_SIZE);
1505 #ifdef VERBOSE_TRACES
1506 CTR4(KTR_CXGBE, "%s: tid %d, truncating size from %lu to %lu",
1507 __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes,
1508 (unsigned long)(end - (start + pgoff)));
1509 job->uaiocb.aio_nbytes = end - (start + pgoff);
1511 end = round_page(end);
1514 n = atop(end - start);
1517 * Try to reuse a cached pageset.
1519 TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) {
1520 if (pscmp(ps, vm, start, n, pgoff,
1521 job->uaiocb.aio_nbytes) == 0) {
1522 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
1523 toep->ddp.cached_count--;
1530 * If there are too many cached pagesets to create a new one,
1531 * free a pageset before creating a new one.
1533 KASSERT(toep->ddp.active_count + toep->ddp.cached_count <=
1534 nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__));
1535 if (toep->ddp.active_count + toep->ddp.cached_count ==
1536 nitems(toep->ddp.db)) {
1537 KASSERT(toep->ddp.cached_count > 0,
1538 ("no cached pageset to free"));
1539 ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq);
1540 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
1541 toep->ddp.cached_count--;
1542 free_pageset(toep->td, ps);
1546 /* Create a new pageset. */
1547 ps = malloc(sizeof(*ps) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK |
1549 ps->pages = (vm_page_t *)(ps + 1);
1550 ps->vm_timestamp = map->timestamp;
1551 ps->npages = vm_fault_quick_hold_pages(map, start, end - start,
1552 VM_PROT_WRITE, ps->pages, n);
1555 if (ps->npages < 0) {
1560 KASSERT(ps->npages == n, ("hold_aio: page count mismatch: %d vs %d",
1564 ps->len = job->uaiocb.aio_nbytes;
1565 refcount_acquire(&vm->vm_refcnt);
1569 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d",
1570 __func__, toep->tid, ps, job, ps->npages);
1576 ddp_complete_all(struct toepcb *toep, int error)
1580 DDP_ASSERT_LOCKED(toep);
1581 while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) {
1582 job = TAILQ_FIRST(&toep->ddp.aiojobq);
1583 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
1584 toep->ddp.waiting_count--;
1585 if (aio_clear_cancel_function(job))
1586 ddp_complete_one(job, error);
1591 aio_ddp_cancel_one(struct kaiocb *job)
1596 * If this job had copied data out of the socket buffer before
1597 * it was cancelled, report it as a short read rather than an
1600 copied = job->aio_received;
1602 aio_complete(job, copied, 0);
1608 * Called when the main loop wants to requeue a job to retry it later.
1609 * Deals with the race of the job being cancelled while it was being
1613 aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job)
1616 DDP_ASSERT_LOCKED(toep);
1617 if (!(toep->ddp.flags & DDP_DEAD) &&
1618 aio_set_cancel_function(job, t4_aio_cancel_queued)) {
1619 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
1620 toep->ddp.waiting_count++;
1622 aio_ddp_cancel_one(job);
1626 aio_ddp_requeue(struct toepcb *toep)
1628 struct adapter *sc = td_adapter(toep->td);
1633 struct ddp_buffer *db;
1634 size_t copied, offset, resid;
1637 uint64_t ddp_flags, ddp_flags_mask;
1639 int buf_flag, db_idx, error;
1641 DDP_ASSERT_LOCKED(toep);
1644 if (toep->ddp.flags & DDP_DEAD) {
1645 MPASS(toep->ddp.waiting_count == 0);
1646 MPASS(toep->ddp.active_count == 0);
1650 if (toep->ddp.waiting_count == 0 ||
1651 toep->ddp.active_count == nitems(toep->ddp.db)) {
1655 job = TAILQ_FIRST(&toep->ddp.aiojobq);
1656 so = job->fd_file->f_data;
1660 /* We will never get anything unless we are or were connected. */
1661 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
1663 ddp_complete_all(toep, ENOTCONN);
1667 KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0,
1668 ("%s: pending sockbuf data and DDP is active", __func__));
1670 /* Abort if socket has reported problems. */
1671 /* XXX: Wait for any queued DDP's to finish and/or flush them? */
1672 if (so->so_error && sbavail(sb) == 0) {
1673 toep->ddp.waiting_count--;
1674 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
1675 if (!aio_clear_cancel_function(job)) {
1681 * If this job has previously copied some data, report
1682 * a short read and leave the error to be reported by
1685 copied = job->aio_received;
1688 aio_complete(job, copied, 0);
1691 error = so->so_error;
1694 aio_complete(job, -1, error);
1699 * Door is closed. If there is pending data in the socket buffer,
1700 * deliver it. If there are pending DDP requests, wait for those
1701 * to complete. Once they have completed, return EOF reads.
1703 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) {
1705 if (toep->ddp.active_count != 0)
1707 ddp_complete_all(toep, 0);
1712 * If DDP is not enabled and there is no pending socket buffer
1713 * data, try to enable DDP.
1715 if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) {
1719 * Wait for the card to ACK that DDP is enabled before
1720 * queueing any buffers. Currently this waits for an
1721 * indicate to arrive. This could use a TCB_SET_FIELD_RPL
1722 * message to know that DDP was enabled instead of waiting
1723 * for the indicate which would avoid copying the indicate
1724 * if no data is pending.
1726 * XXX: Might want to limit the indicate size to the size
1727 * of the first queued request.
1729 if ((toep->ddp.flags & DDP_SC_REQ) == 0)
1730 enable_ddp(sc, toep);
1736 * If another thread is queueing a buffer for DDP, let it
1737 * drain any work and return.
1739 if (toep->ddp.queueing != NULL)
1742 /* Take the next job to prep it for DDP. */
1743 toep->ddp.waiting_count--;
1744 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
1745 if (!aio_clear_cancel_function(job))
1747 toep->ddp.queueing = job;
1749 /* NB: This drops DDP_LOCK while it holds the backing VM pages. */
1750 error = hold_aio(toep, job, &ps);
1752 ddp_complete_one(job, error);
1753 toep->ddp.queueing = NULL;
1758 if (so->so_error && sbavail(sb) == 0) {
1759 copied = job->aio_received;
1762 recycle_pageset(toep, ps);
1763 aio_complete(job, copied, 0);
1764 toep->ddp.queueing = NULL;
1768 error = so->so_error;
1771 recycle_pageset(toep, ps);
1772 aio_complete(job, -1, error);
1773 toep->ddp.queueing = NULL;
1777 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) {
1779 recycle_pageset(toep, ps);
1780 if (toep->ddp.active_count != 0) {
1782 * The door is closed, but there are still pending
1783 * DDP buffers. Requeue. These jobs will all be
1784 * completed once those buffers drain.
1786 aio_ddp_requeue_one(toep, job);
1787 toep->ddp.queueing = NULL;
1790 ddp_complete_one(job, 0);
1791 ddp_complete_all(toep, 0);
1792 toep->ddp.queueing = NULL;
1798 * If the toep is dead, there shouldn't be any data in the socket
1799 * buffer, so the above case should have handled this.
1801 MPASS(!(toep->ddp.flags & DDP_DEAD));
1804 * If there is pending data in the socket buffer (either
1805 * from before the requests were queued or a DDP indicate),
1806 * copy those mbufs out directly.
1809 offset = ps->offset + job->aio_received;
1810 MPASS(job->aio_received <= job->uaiocb.aio_nbytes);
1811 resid = job->uaiocb.aio_nbytes - job->aio_received;
1813 KASSERT(m == NULL || toep->ddp.active_count == 0,
1814 ("%s: sockbuf data with active DDP", __func__));
1815 while (m != NULL && resid > 0) {
1816 struct iovec iov[1];
1822 iov[0].iov_base = mtod(m, void *);
1823 iov[0].iov_len = m->m_len;
1824 if (iov[0].iov_len > resid)
1825 iov[0].iov_len = resid;
1829 uio.uio_resid = iov[0].iov_len;
1830 uio.uio_segflg = UIO_SYSSPACE;
1831 uio.uio_rw = UIO_WRITE;
1833 error = uiomove_fromphys(ps->pages, offset + copied,
1834 uio.uio_resid, &uio);
1836 uiomove_fromphys(ps->pages, offset + copied, uio.uio_resid, &uio);
1838 MPASS(error == 0 && uio.uio_resid == 0);
1839 copied += uio.uio_offset;
1840 resid -= uio.uio_offset;
1844 sbdrop_locked(sb, copied);
1845 job->aio_received += copied;
1847 copied = job->aio_received;
1848 inp = sotoinpcb(so);
1849 if (!INP_TRY_WLOCK(inp)) {
1851 * The reference on the socket file descriptor in
1852 * the AIO job should keep 'sb' and 'inp' stable.
1853 * Our caller has a reference on the 'toep' that
1863 * If the socket has been closed, we should detect
1864 * that and complete this request if needed on
1865 * the next trip around the loop.
1868 t4_rcvd_locked(&toep->td->tod, intotcpcb(inp));
1870 if (resid == 0 || toep->ddp.flags & DDP_DEAD) {
1872 * We filled the entire buffer with socket
1873 * data, DDP is not being used, or the socket
1874 * is being shut down, so complete the
1878 recycle_pageset(toep, ps);
1879 aio_complete(job, copied, 0);
1880 toep->ddp.queueing = NULL;
1885 * If DDP is not enabled, requeue this request and restart.
1886 * This will either enable DDP or wait for more data to
1887 * arrive on the socket buffer.
1889 if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) {
1891 recycle_pageset(toep, ps);
1892 aio_ddp_requeue_one(toep, job);
1893 toep->ddp.queueing = NULL;
1898 * An indicate might have arrived and been added to
1899 * the socket buffer while it was unlocked after the
1900 * copy to lock the INP. If so, restart the copy.
1902 if (sbavail(sb) != 0)
1907 if (prep_pageset(sc, toep, ps) == 0) {
1908 recycle_pageset(toep, ps);
1909 aio_ddp_requeue_one(toep, job);
1910 toep->ddp.queueing = NULL;
1913 * XXX: Need to retry this later. Mostly need a trigger
1914 * when page pods are freed up.
1916 printf("%s: prep_pageset failed\n", __func__);
1920 /* Determine which DDP buffer to use. */
1921 if (toep->ddp.db[0].job == NULL) {
1924 MPASS(toep->ddp.db[1].job == NULL);
1931 ddp_flags |= V_TF_DDP_BUF0_VALID(1);
1932 if (so->so_state & SS_NBIO)
1933 ddp_flags |= V_TF_DDP_BUF0_FLUSH(1);
1934 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) |
1935 V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) |
1936 V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1);
1937 buf_flag = DDP_BUF0_ACTIVE;
1939 ddp_flags |= V_TF_DDP_BUF1_VALID(1);
1940 if (so->so_state & SS_NBIO)
1941 ddp_flags |= V_TF_DDP_BUF1_FLUSH(1);
1942 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) |
1943 V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) |
1944 V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1);
1945 buf_flag = DDP_BUF1_ACTIVE;
1947 MPASS((toep->ddp.flags & buf_flag) == 0);
1948 if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) {
1950 MPASS(toep->ddp.active_id == -1);
1951 MPASS(toep->ddp.active_count == 0);
1952 ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1);
1956 * The TID for this connection should still be valid. If DDP_DEAD
1957 * is set, SBS_CANTRCVMORE should be set, so we shouldn't be
1958 * this far anyway. Even if the socket is closing on the other
1959 * end, the AIO job holds a reference on this end of the socket
1960 * which will keep it open and keep the TCP PCB attached until
1961 * after the job is completed.
1963 wr = mk_update_tcb_for_ddp(sc, toep, db_idx, ps, job->aio_received,
1964 ddp_flags, ddp_flags_mask);
1966 recycle_pageset(toep, ps);
1967 aio_ddp_requeue_one(toep, job);
1968 toep->ddp.queueing = NULL;
1971 * XXX: Need a way to kick a retry here.
1973 * XXX: We know the fixed size needed and could
1974 * preallocate this using a blocking request at the
1975 * start of the task to avoid having to handle this
1978 printf("%s: mk_update_tcb_for_ddp failed\n", __func__);
1982 if (!aio_set_cancel_function(job, t4_aio_cancel_active)) {
1984 recycle_pageset(toep, ps);
1985 aio_ddp_cancel_one(job);
1986 toep->ddp.queueing = NULL;
1990 #ifdef VERBOSE_TRACES
1992 "%s: tid %u, scheduling %p for DDP[%d] (flags %#lx/%#lx)", __func__,
1993 toep->tid, job, db_idx, ddp_flags, ddp_flags_mask);
1995 /* Give the chip the go-ahead. */
1997 db = &toep->ddp.db[db_idx];
1998 db->cancel_pending = 0;
2001 toep->ddp.queueing = NULL;
2002 toep->ddp.flags |= buf_flag;
2003 toep->ddp.active_count++;
2004 if (toep->ddp.active_count == 1) {
2005 MPASS(toep->ddp.active_id == -1);
2006 toep->ddp.active_id = db_idx;
2007 CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__,
2008 toep->ddp.active_id);
2014 ddp_queue_toep(struct toepcb *toep)
2017 DDP_ASSERT_LOCKED(toep);
2018 if (toep->ddp.flags & DDP_TASK_ACTIVE)
2020 toep->ddp.flags |= DDP_TASK_ACTIVE;
2022 soaio_enqueue(&toep->ddp.requeue_task);
2026 aio_ddp_requeue_task(void *context, int pending)
2028 struct toepcb *toep = context;
2031 aio_ddp_requeue(toep);
2032 toep->ddp.flags &= ~DDP_TASK_ACTIVE;
2039 t4_aio_cancel_active(struct kaiocb *job)
2041 struct socket *so = job->fd_file->f_data;
2042 struct tcpcb *tp = so_sototcpcb(so);
2043 struct toepcb *toep = tp->t_toe;
2044 struct adapter *sc = td_adapter(toep->td);
2045 uint64_t valid_flag;
2049 if (aio_cancel_cleared(job)) {
2051 aio_ddp_cancel_one(job);
2055 for (i = 0; i < nitems(toep->ddp.db); i++) {
2056 if (toep->ddp.db[i].job == job) {
2057 /* Should only ever get one cancel request for a job. */
2058 MPASS(toep->ddp.db[i].cancel_pending == 0);
2061 * Invalidate this buffer. It will be
2062 * cancelled or partially completed once the
2063 * card ACKs the invalidate.
2065 valid_flag = i == 0 ? V_TF_DDP_BUF0_VALID(1) :
2066 V_TF_DDP_BUF1_VALID(1);
2067 t4_set_tcb_field(sc, toep->ctrlq, toep,
2068 W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1,
2069 CPL_COOKIE_DDP0 + i);
2070 toep->ddp.db[i].cancel_pending = 1;
2071 CTR2(KTR_CXGBE, "%s: request %p marked pending",
2080 t4_aio_cancel_queued(struct kaiocb *job)
2082 struct socket *so = job->fd_file->f_data;
2083 struct tcpcb *tp = so_sototcpcb(so);
2084 struct toepcb *toep = tp->t_toe;
2087 if (!aio_cancel_cleared(job)) {
2088 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2089 toep->ddp.waiting_count--;
2090 if (toep->ddp.waiting_count == 0)
2091 ddp_queue_toep(toep);
2093 CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job);
2096 aio_ddp_cancel_one(job);
2100 t4_aio_queue_ddp(struct socket *so, struct kaiocb *job)
2102 struct tcpcb *tp = so_sototcpcb(so);
2103 struct toepcb *toep = tp->t_toe;
2106 /* Ignore writes. */
2107 if (job->uaiocb.aio_lio_opcode != LIO_READ)
2108 return (EOPNOTSUPP);
2113 * XXX: Think about possibly returning errors for ENOTCONN,
2114 * etc. Perhaps the caller would only queue the request
2115 * if it failed with EOPNOTSUPP?
2118 #ifdef VERBOSE_TRACES
2119 CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid);
2121 if (!aio_set_cancel_function(job, t4_aio_cancel_queued))
2122 panic("new job was cancelled");
2123 TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list);
2124 toep->ddp.waiting_count++;
2125 toep->ddp.flags |= DDP_OK;
2128 * Try to handle this request synchronously. If this has
2129 * to block because the task is running, it will just bail
2130 * and let the task handle it instead.
2132 aio_ddp_requeue(toep);
2138 t4_ddp_mod_load(void)
2141 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl,
2143 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl,
2145 t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp);
2146 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete);
2147 TAILQ_INIT(&ddp_orphan_pagesets);
2148 mtx_init(&ddp_orphan_pagesets_lock, "ddp orphans", NULL, MTX_DEF);
2149 TASK_INIT(&ddp_orphan_task, 0, ddp_free_orphan_pagesets, NULL);
2153 t4_ddp_mod_unload(void)
2156 taskqueue_drain(taskqueue_thread, &ddp_orphan_task);
2157 MPASS(TAILQ_EMPTY(&ddp_orphan_pagesets));
2158 mtx_destroy(&ddp_orphan_pagesets_lock);
2159 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP0);
2160 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP1);
2161 t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL);
2162 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL);