2 * Copyright (c) 2012 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/protosw.h>
42 #include <sys/domain.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <sys/taskqueue.h>
47 #include <netinet/in.h>
48 #include <netinet/in_pcb.h>
49 #include <netinet/ip.h>
50 #include <netinet/tcp_var.h>
52 #include <netinet/tcp_fsm.h>
53 #include <netinet/toecore.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_param.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_object.h>
64 #include "common/common.h"
65 #include "common/t4_msg.h"
66 #include "common/t4_regs.h"
67 #include "common/t4_tcb.h"
68 #include "tom/t4_tom.h"
70 VNET_DECLARE(int, tcp_do_autorcvbuf);
71 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
72 VNET_DECLARE(int, tcp_autorcvbuf_inc);
73 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
74 VNET_DECLARE(int, tcp_autorcvbuf_max);
75 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
78 * Use the 'backend3' field in AIO jobs to store the amount of data
79 * received by the AIO job so far.
81 #define aio_received backend3
83 static void aio_ddp_requeue_task(void *context, int pending);
84 static void ddp_complete_all(struct toepcb *toep, int error);
85 static void t4_aio_cancel_active(struct kaiocb *job);
86 static void t4_aio_cancel_queued(struct kaiocb *job);
88 static TAILQ_HEAD(, pageset) ddp_orphan_pagesets;
89 static struct mtx ddp_orphan_pagesets_lock;
90 static struct task ddp_orphan_task;
92 #define MAX_DDP_BUFFER_SIZE (M_TCB_RX_DDP_BUF0_LEN)
95 * A page set holds information about a buffer used for DDP. The page
96 * set holds resources such as the VM pages backing the buffer (either
97 * held or wired) and the page pods associated with the buffer.
98 * Recently used page sets are cached to allow for efficient reuse of
99 * buffers (avoiding the need to re-fault in pages, hold them, etc.).
100 * Note that cached page sets keep the backing pages wired. The
101 * number of wired pages is capped by only allowing for two wired
102 * pagesets per connection. This is not a perfect cap, but is a
103 * trade-off for performance.
105 * If an application ping-pongs two buffers for a connection via
106 * aio_read(2) then those buffers should remain wired and expensive VM
107 * fault lookups should be avoided after each buffer has been used
108 * once. If an application uses more than two buffers then this will
109 * fall back to doing expensive VM fault lookups for each operation.
112 free_pageset(struct tom_data *td, struct pageset *ps)
117 if (ps->prsv.prsv_nppods > 0)
118 t4_free_page_pods(&ps->prsv);
120 if (ps->flags & PS_WIRED) {
121 for (i = 0; i < ps->npages; i++) {
124 vm_page_unwire(p, PQ_INACTIVE);
128 vm_page_unhold_pages(ps->pages, ps->npages);
129 mtx_lock(&ddp_orphan_pagesets_lock);
130 TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link);
131 taskqueue_enqueue(taskqueue_thread, &ddp_orphan_task);
132 mtx_unlock(&ddp_orphan_pagesets_lock);
136 ddp_free_orphan_pagesets(void *context, int pending)
140 mtx_lock(&ddp_orphan_pagesets_lock);
141 while (!TAILQ_EMPTY(&ddp_orphan_pagesets)) {
142 ps = TAILQ_FIRST(&ddp_orphan_pagesets);
143 TAILQ_REMOVE(&ddp_orphan_pagesets, ps, link);
144 mtx_unlock(&ddp_orphan_pagesets_lock);
146 vmspace_free(ps->vm);
148 mtx_lock(&ddp_orphan_pagesets_lock);
150 mtx_unlock(&ddp_orphan_pagesets_lock);
154 recycle_pageset(struct toepcb *toep, struct pageset *ps)
157 DDP_ASSERT_LOCKED(toep);
158 if (!(toep->ddp_flags & DDP_DEAD) && ps->flags & PS_WIRED) {
159 KASSERT(toep->ddp_cached_count + toep->ddp_active_count <
160 nitems(toep->db), ("too many wired pagesets"));
161 TAILQ_INSERT_HEAD(&toep->ddp_cached_pagesets, ps, link);
162 toep->ddp_cached_count++;
164 free_pageset(toep->td, ps);
168 ddp_complete_one(struct kaiocb *job, int error)
173 * If this job had copied data out of the socket buffer before
174 * it was cancelled, report it as a short read rather than an
177 copied = job->aio_received;
178 if (copied != 0 || error == 0)
179 aio_complete(job, copied, 0);
181 aio_complete(job, -1, error);
185 free_ddp_buffer(struct tom_data *td, struct ddp_buffer *db)
190 * XXX: If we are un-offloading the socket then we
191 * should requeue these on the socket somehow. If we
192 * got a FIN from the remote end, then this completes
193 * any remaining requests with an EOF read.
195 if (!aio_clear_cancel_function(db->job))
196 ddp_complete_one(db->job, 0);
200 free_pageset(td, db->ps);
204 ddp_init_toep(struct toepcb *toep)
207 TAILQ_INIT(&toep->ddp_aiojobq);
208 TASK_INIT(&toep->ddp_requeue_task, 0, aio_ddp_requeue_task, toep);
209 toep->ddp_active_id = -1;
210 mtx_init(&toep->ddp_lock, "t4 ddp", NULL, MTX_DEF);
214 ddp_uninit_toep(struct toepcb *toep)
217 mtx_destroy(&toep->ddp_lock);
221 release_ddp_resources(struct toepcb *toep)
227 toep->flags |= DDP_DEAD;
228 for (i = 0; i < nitems(toep->db); i++) {
229 free_ddp_buffer(toep->td, &toep->db[i]);
231 while ((ps = TAILQ_FIRST(&toep->ddp_cached_pagesets)) != NULL) {
232 TAILQ_REMOVE(&toep->ddp_cached_pagesets, ps, link);
233 free_pageset(toep->td, ps);
235 ddp_complete_all(toep, 0);
241 ddp_assert_empty(struct toepcb *toep)
245 MPASS(!(toep->ddp_flags & DDP_TASK_ACTIVE));
246 for (i = 0; i < nitems(toep->db); i++) {
247 MPASS(toep->db[i].job == NULL);
248 MPASS(toep->db[i].ps == NULL);
250 MPASS(TAILQ_EMPTY(&toep->ddp_cached_pagesets));
251 MPASS(TAILQ_EMPTY(&toep->ddp_aiojobq));
256 complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db,
259 unsigned int db_flag;
261 toep->ddp_active_count--;
262 if (toep->ddp_active_id == db_idx) {
263 if (toep->ddp_active_count == 0) {
264 KASSERT(toep->db[db_idx ^ 1].job == NULL,
265 ("%s: active_count mismatch", __func__));
266 toep->ddp_active_id = -1;
268 toep->ddp_active_id ^= 1;
269 #ifdef VERBOSE_TRACES
270 CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__,
271 toep->ddp_active_id);
274 KASSERT(toep->ddp_active_count != 0 &&
275 toep->ddp_active_id != -1,
276 ("%s: active count mismatch", __func__));
279 db->cancel_pending = 0;
281 recycle_pageset(toep, db->ps);
284 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
285 KASSERT(toep->ddp_flags & db_flag,
286 ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x",
287 __func__, toep, toep->ddp_flags));
288 toep->ddp_flags &= ~db_flag;
291 /* XXX: handle_ddp_data code duplication */
293 insert_ddp_data(struct toepcb *toep, uint32_t n)
295 struct inpcb *inp = toep->inp;
296 struct tcpcb *tp = intotcpcb(inp);
297 struct ddp_buffer *db;
301 unsigned int db_flag, db_idx;
303 INP_WLOCK_ASSERT(inp);
304 DDP_ASSERT_LOCKED(toep);
307 #ifndef USE_DDP_RX_FLOW_CONTROL
308 KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__));
311 #ifndef USE_DDP_RX_FLOW_CONTROL
312 toep->rx_credits += n;
314 CTR2(KTR_CXGBE, "%s: placed %u bytes before falling out of DDP",
316 while (toep->ddp_active_count > 0) {
317 MPASS(toep->ddp_active_id != -1);
318 db_idx = toep->ddp_active_id;
319 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
320 MPASS((toep->ddp_flags & db_flag) != 0);
321 db = &toep->db[db_idx];
323 copied = job->aio_received;
325 if (placed > job->uaiocb.aio_nbytes - copied)
326 placed = job->uaiocb.aio_nbytes - copied;
329 if (!aio_clear_cancel_function(job)) {
331 * Update the copied length for when
332 * t4_aio_cancel_active() completes this
335 job->aio_received += placed;
336 } else if (copied + placed != 0) {
338 "%s: completing %p (copied %ld, placed %lu)",
339 __func__, job, copied, placed);
340 /* XXX: This always completes if there is some data. */
341 aio_complete(job, copied + placed, 0);
342 } else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) {
343 TAILQ_INSERT_HEAD(&toep->ddp_aiojobq, job, list);
344 toep->ddp_waiting_count++;
348 complete_ddp_buffer(toep, db, db_idx);
354 /* SET_TCB_FIELD sent as a ULP command looks like this */
355 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
356 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
358 /* RX_DATA_ACK sent as a ULP command looks like this */
359 #define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \
360 sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core))
363 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep,
364 uint64_t word, uint64_t mask, uint64_t val)
366 struct ulptx_idata *ulpsc;
367 struct cpl_set_tcb_field_core *req;
369 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
370 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
372 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
373 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
374 ulpsc->len = htobe32(sizeof(*req));
376 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
377 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid));
378 req->reply_ctrl = htobe16(V_NO_REPLY(1) |
379 V_QUEUENO(toep->ofld_rxq->iq.abs_id));
380 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
381 req->mask = htobe64(mask);
382 req->val = htobe64(val);
384 ulpsc = (struct ulptx_idata *)(req + 1);
385 if (LEN__SET_TCB_FIELD_ULP % 16) {
386 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
387 ulpsc->len = htobe32(0);
394 mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep)
396 struct ulptx_idata *ulpsc;
397 struct cpl_rx_data_ack_core *req;
399 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
400 ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16));
402 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
403 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
404 ulpsc->len = htobe32(sizeof(*req));
406 req = (struct cpl_rx_data_ack_core *)(ulpsc + 1);
407 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid));
408 req->credit_dack = htobe32(F_RX_MODULATE_RX);
410 ulpsc = (struct ulptx_idata *)(req + 1);
411 if (LEN__RX_DATA_ACK_ULP % 16) {
412 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
413 ulpsc->len = htobe32(0);
420 mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx,
421 struct pageset *ps, int offset, uint64_t ddp_flags, uint64_t ddp_flags_mask)
424 struct work_request_hdr *wrh;
425 struct ulp_txpkt *ulpmc;
428 KASSERT(db_idx == 0 || db_idx == 1,
429 ("%s: bad DDP buffer index %d", __func__, db_idx));
432 * We'll send a compound work request that has 3 SET_TCB_FIELDs and an
433 * RX_DATA_ACK (with RX_MODULATE to speed up delivery).
435 * The work request header is 16B and always ends at a 16B boundary.
436 * The ULPTX master commands that follow must all end at 16B boundaries
437 * too so we round up the size to 16.
439 len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
440 roundup2(LEN__RX_DATA_ACK_ULP, 16);
442 wr = alloc_wrqe(len, toep->ctrlq);
446 INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */
447 ulpmc = (struct ulp_txpkt *)(wrh + 1);
449 /* Write the buffer's tag */
450 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
451 W_TCB_RX_DDP_BUF0_TAG + db_idx,
452 V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG),
453 V_TCB_RX_DDP_BUF0_TAG(ps->prsv.prsv_tag));
455 /* Update the current offset in the DDP buffer and its total length */
457 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
458 W_TCB_RX_DDP_BUF0_OFFSET,
459 V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) |
460 V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN),
461 V_TCB_RX_DDP_BUF0_OFFSET(offset) |
462 V_TCB_RX_DDP_BUF0_LEN(ps->len));
464 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
465 W_TCB_RX_DDP_BUF1_OFFSET,
466 V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) |
467 V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32),
468 V_TCB_RX_DDP_BUF1_OFFSET(offset) |
469 V_TCB_RX_DDP_BUF1_LEN((u64)ps->len << 32));
471 /* Update DDP flags */
472 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS,
473 ddp_flags_mask, ddp_flags);
475 /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */
476 ulpmc = mk_rx_data_ack_ulp(ulpmc, toep);
482 handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
484 uint32_t report = be32toh(ddp_report);
486 struct inpcb *inp = toep->inp;
487 struct ddp_buffer *db;
494 db_idx = report & F_DDP_BUF_IDX ? 1 : 0;
496 if (__predict_false(!(report & F_DDP_INV)))
497 CXGBE_UNIMPLEMENTED("DDP buffer still valid");
500 so = inp_inpcbtosocket(inp);
504 KASSERT(toep->ddp_active_id == db_idx,
505 ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx,
506 toep->ddp_active_id, toep->tid));
507 db = &toep->db[db_idx];
510 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) {
512 * This can happen due to an administrative tcpdrop(8).
513 * Just fail the request with ECONNRESET.
515 CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x",
516 __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags);
517 if (aio_clear_cancel_function(job))
518 ddp_complete_one(job, ECONNRESET);
525 * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the
526 * sequence number of the next byte to receive. The length of
527 * the data received for this message must be computed by
528 * comparing the new and old values of rcv_nxt.
530 * For RX_DATA_DDP, len might be non-zero, but it is only the
531 * length of the most recent DMA. It does not include the
532 * total length of the data received since the previous update
533 * for this DDP buffer. rcv_nxt is the sequence number of the
534 * first received byte from the most recent DMA.
536 len += be32toh(rcv_nxt) - tp->rcv_nxt;
538 tp->t_rcvtime = ticks;
539 #ifndef USE_DDP_RX_FLOW_CONTROL
540 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
543 #ifdef VERBOSE_TRACES
544 CTR4(KTR_CXGBE, "%s: DDP[%d] placed %d bytes (%#x)", __func__, db_idx,
548 /* receive buffer autosize */
549 MPASS(toep->vnet == so->so_vnet);
550 CURVNET_SET(toep->vnet);
552 if (sb->sb_flags & SB_AUTOSIZE &&
553 V_tcp_do_autorcvbuf &&
554 sb->sb_hiwat < V_tcp_autorcvbuf_max &&
555 len > (sbspace(sb) / 8 * 7)) {
556 unsigned int hiwat = sb->sb_hiwat;
557 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc,
558 V_tcp_autorcvbuf_max);
560 if (!sbreserve_locked(sb, newsize, so, NULL))
561 sb->sb_flags &= ~SB_AUTOSIZE;
563 toep->rx_credits += newsize - hiwat;
568 #ifndef USE_DDP_RX_FLOW_CONTROL
569 toep->rx_credits += len;
573 if (db->cancel_pending) {
575 * Update the job's length but defer completion to the
578 job->aio_received += len;
580 } else if (!aio_clear_cancel_function(job)) {
582 * Update the copied length for when
583 * t4_aio_cancel_active() completes this request.
585 job->aio_received += len;
587 copied = job->aio_received;
588 #ifdef VERBOSE_TRACES
589 CTR4(KTR_CXGBE, "%s: completing %p (copied %ld, placed %d)",
590 __func__, job, copied, len);
592 aio_complete(job, copied + len, 0);
593 t4_rcvd(&toep->td->tod, tp);
597 complete_ddp_buffer(toep, db, db_idx);
598 if (toep->ddp_waiting_count > 0)
599 ddp_queue_toep(toep);
608 handle_ddp_indicate(struct toepcb *toep)
611 DDP_ASSERT_LOCKED(toep);
612 MPASS(toep->ddp_active_count == 0);
613 MPASS((toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0);
614 if (toep->ddp_waiting_count == 0) {
616 * The pending requests that triggered the request for an
617 * an indicate were cancelled. Those cancels should have
618 * already disabled DDP. Just ignore this as the data is
619 * going into the socket buffer anyway.
623 CTR3(KTR_CXGBE, "%s: tid %d indicated (%d waiting)", __func__,
624 toep->tid, toep->ddp_waiting_count);
625 ddp_queue_toep(toep);
629 DDP_BUF0_INVALIDATED = 0x2,
634 handle_ddp_tcb_rpl(struct toepcb *toep, const struct cpl_set_tcb_rpl *cpl)
637 struct inpcb *inp = toep->inp;
638 struct ddp_buffer *db;
642 if (cpl->status != CPL_ERR_NONE)
643 panic("XXX: tcp_rpl failed: %d", cpl->status);
645 switch (cpl->cookie) {
646 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(DDP_BUF0_INVALIDATED):
647 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(DDP_BUF1_INVALIDATED):
649 * XXX: This duplicates a lot of code with handle_ddp_data().
651 db_idx = G_COOKIE(cpl->cookie) - DDP_BUF0_INVALIDATED;
654 db = &toep->db[db_idx];
657 * handle_ddp_data() should leave the job around until
658 * this callback runs once a cancel is pending.
661 MPASS(db->job != NULL);
662 MPASS(db->cancel_pending);
665 * XXX: It's not clear what happens if there is data
666 * placed when the buffer is invalidated. I suspect we
667 * need to read the TCB to see how much data was placed.
669 * For now this just pretends like nothing was placed.
671 * XXX: Note that if we did check the PCB we would need to
672 * also take care of updating the tp, etc.
675 copied = job->aio_received;
677 CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job);
680 CTR3(KTR_CXGBE, "%s: completing %p (copied %ld)",
681 __func__, job, copied);
682 aio_complete(job, copied, 0);
683 t4_rcvd(&toep->td->tod, intotcpcb(inp));
686 complete_ddp_buffer(toep, db, db_idx);
687 if (toep->ddp_waiting_count > 0)
688 ddp_queue_toep(toep);
693 panic("XXX: unknown tcb_rpl offset %#x, cookie %#x",
694 G_WORD(cpl->cookie), G_COOKIE(cpl->cookie));
699 handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt)
701 struct ddp_buffer *db;
704 unsigned int db_flag, db_idx;
707 INP_WLOCK_ASSERT(toep->inp);
708 DDP_ASSERT_LOCKED(toep);
709 len = be32toh(rcv_nxt) - tp->rcv_nxt;
712 #ifndef USE_DDP_RX_FLOW_CONTROL
713 toep->rx_credits += len;
716 while (toep->ddp_active_count > 0) {
717 MPASS(toep->ddp_active_id != -1);
718 db_idx = toep->ddp_active_id;
719 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
720 MPASS((toep->ddp_flags & db_flag) != 0);
721 db = &toep->db[db_idx];
723 copied = job->aio_received;
725 if (placed > job->uaiocb.aio_nbytes - copied)
726 placed = job->uaiocb.aio_nbytes - copied;
729 if (!aio_clear_cancel_function(job)) {
731 * Update the copied length for when
732 * t4_aio_cancel_active() completes this
735 job->aio_received += placed;
737 CTR4(KTR_CXGBE, "%s: tid %d completed buf %d len %d",
738 __func__, toep->tid, db_idx, placed);
739 aio_complete(job, copied + placed, 0);
742 complete_ddp_buffer(toep, db, db_idx);
746 ddp_complete_all(toep, 0);
749 #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\
750 F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\
751 F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\
752 F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR)
754 extern cpl_handler_t t4_cpl_handler[];
757 do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
759 struct adapter *sc = iq->adapter;
760 const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1);
761 unsigned int tid = GET_TID(cpl);
763 struct toepcb *toep = lookup_tid(sc, tid);
765 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
766 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
767 KASSERT(!(toep->flags & TPF_SYNQE),
768 ("%s: toep %p claims to be a synq entry", __func__, toep));
770 vld = be32toh(cpl->ddpvld);
771 if (__predict_false(vld & DDP_ERR)) {
772 panic("%s: DDP error 0x%x (tid %d, toep %p)",
773 __func__, vld, tid, toep);
776 if (toep->ulp_mode == ULP_MODE_ISCSI) {
777 t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m);
781 handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len));
787 do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss,
790 struct adapter *sc = iq->adapter;
791 const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1);
792 unsigned int tid = GET_TID(cpl);
793 struct toepcb *toep = lookup_tid(sc, tid);
795 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
796 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
797 KASSERT(!(toep->flags & TPF_SYNQE),
798 ("%s: toep %p claims to be a synq entry", __func__, toep));
800 handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0);
806 enable_ddp(struct adapter *sc, struct toepcb *toep)
809 KASSERT((toep->ddp_flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK,
810 ("%s: toep %p has bad ddp_flags 0x%x",
811 __func__, toep, toep->ddp_flags));
813 CTR3(KTR_CXGBE, "%s: tid %u (time %u)",
814 __func__, toep->tid, time_uptime);
816 DDP_ASSERT_LOCKED(toep);
817 toep->ddp_flags |= DDP_SC_REQ;
818 t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_RX_DDP_FLAGS,
819 V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) |
820 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) |
821 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1),
822 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1), 0, 0,
823 toep->ofld_rxq->iq.abs_id);
824 t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_T_FLAGS,
825 V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0, toep->ofld_rxq->iq.abs_id);
829 calculate_hcf(int n1, int n2)
851 pages_to_nppods(int npages, int ddp_page_shift)
854 MPASS(ddp_page_shift >= PAGE_SHIFT);
856 return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES));
860 alloc_page_pods(struct ppod_region *pr, u_int nppods, u_int pgsz_idx,
861 struct ppod_reservation *prsv)
863 vmem_addr_t addr; /* relative to start of region */
865 if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT,
869 CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d",
870 __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask,
871 nppods, 1 << pr->pr_page_shift[pgsz_idx]);
874 * The hardware tagmask includes an extra invalid bit but the arena was
875 * seeded with valid values only. An allocation out of this arena will
876 * fit inside the tagmask but won't have the invalid bit set.
878 MPASS((addr & pr->pr_tag_mask) == addr);
879 MPASS((addr & pr->pr_invalid_bit) == 0);
882 prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr;
883 prsv->prsv_nppods = nppods;
889 t4_alloc_page_pods_for_ps(struct ppod_region *pr, struct pageset *ps)
891 int i, hcf, seglen, idx, nppods;
892 struct ppod_reservation *prsv = &ps->prsv;
894 KASSERT(prsv->prsv_nppods == 0,
895 ("%s: page pods already allocated", __func__));
898 * The DDP page size is unrelated to the VM page size. We combine
899 * contiguous physical pages into larger segments to get the best DDP
900 * page size possible. This is the largest of the four sizes in
901 * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in
905 for (i = 0; i < ps->npages; i++) {
907 while (i < ps->npages - 1 &&
908 ps->pages[i]->phys_addr + PAGE_SIZE ==
909 ps->pages[i + 1]->phys_addr) {
914 hcf = calculate_hcf(hcf, seglen);
915 if (hcf < (1 << pr->pr_page_shift[1])) {
917 goto have_pgsz; /* give up, short circuit */
921 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
922 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
923 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
924 if ((hcf & PR_PAGE_MASK(idx)) == 0)
930 MPASS(idx <= M_PPOD_PGSZ);
932 nppods = pages_to_nppods(ps->npages, pr->pr_page_shift[idx]);
933 if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
935 MPASS(prsv->prsv_nppods > 0);
941 t4_alloc_page_pods_for_buf(struct ppod_region *pr, vm_offset_t buf, int len,
942 struct ppod_reservation *prsv)
944 int hcf, seglen, idx, npages, nppods;
945 uintptr_t start_pva, end_pva, pva, p1;
951 * The DDP page size is unrelated to the VM page size. We combine
952 * contiguous physical pages into larger segments to get the best DDP
953 * page size possible. This is the largest of the four sizes in
954 * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes
958 start_pva = trunc_page(buf);
959 end_pva = trunc_page(buf + len - 1);
961 while (pva <= end_pva) {
963 p1 = pmap_kextract(pva);
965 while (pva <= end_pva && p1 + seglen == pmap_kextract(pva)) {
970 hcf = calculate_hcf(hcf, seglen);
971 if (hcf < (1 << pr->pr_page_shift[1])) {
973 goto have_pgsz; /* give up, short circuit */
977 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
978 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
979 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
980 if ((hcf & PR_PAGE_MASK(idx)) == 0)
986 MPASS(idx <= M_PPOD_PGSZ);
989 npages += (end_pva - start_pva) >> pr->pr_page_shift[idx];
990 nppods = howmany(npages, PPOD_PAGES);
991 if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
993 MPASS(prsv->prsv_nppods > 0);
999 t4_free_page_pods(struct ppod_reservation *prsv)
1001 struct ppod_region *pr = prsv->prsv_pr;
1004 MPASS(prsv != NULL);
1005 MPASS(prsv->prsv_nppods != 0);
1007 addr = prsv->prsv_tag & pr->pr_tag_mask;
1008 MPASS((addr & pr->pr_invalid_bit) == 0);
1010 CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__,
1011 pr->pr_arena, addr, prsv->prsv_nppods);
1013 vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods));
1014 prsv->prsv_nppods = 0;
1017 #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE)
1020 t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid,
1024 struct ulp_mem_io *ulpmc;
1025 struct ulptx_idata *ulpsc;
1026 struct pagepod *ppod;
1027 int i, j, k, n, chunk, len, ddp_pgsz, idx;
1030 struct ppod_reservation *prsv = &ps->prsv;
1031 struct ppod_region *pr = prsv->prsv_pr;
1033 KASSERT(!(ps->flags & PS_PPODS_WRITTEN),
1034 ("%s: page pods already written", __func__));
1035 MPASS(prsv->prsv_nppods > 0);
1037 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1039 cmd |= htobe32(F_ULP_MEMIO_ORDER);
1041 cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1042 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1043 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1044 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1046 /* How many page pods are we writing in this cycle */
1047 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1049 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1051 wr = alloc_wrqe(len, wrq);
1053 return (ENOMEM); /* ok to just bail out */
1056 INIT_ULPTX_WR(ulpmc, len, 0, 0);
1058 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1059 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1060 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1062 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1063 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1064 ulpsc->len = htobe32(chunk);
1066 ppod = (struct pagepod *)(ulpsc + 1);
1067 for (j = 0; j < n; i++, j++, ppod++) {
1068 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1069 V_PPOD_TID(tid) | prsv->prsv_tag);
1070 ppod->len_offset = htobe64(V_PPOD_LEN(ps->len) |
1071 V_PPOD_OFST(ps->offset));
1073 idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE);
1074 for (k = 0; k < nitems(ppod->addr); k++) {
1075 if (idx < ps->npages) {
1077 htobe64(ps->pages[idx]->phys_addr);
1078 idx += ddp_pgsz / PAGE_SIZE;
1083 "%s: tid %d ppod[%d]->addr[%d] = %p",
1084 __func__, toep->tid, i, k,
1085 htobe64(ppod->addr[k]));
1093 ps->flags |= PS_PPODS_WRITTEN;
1099 t4_write_page_pods_for_buf(struct adapter *sc, struct sge_wrq *wrq, int tid,
1100 struct ppod_reservation *prsv, vm_offset_t buf, int buflen)
1103 struct ulp_mem_io *ulpmc;
1104 struct ulptx_idata *ulpsc;
1105 struct pagepod *ppod;
1106 int i, j, k, n, chunk, len, ddp_pgsz;
1107 u_int ppod_addr, offset;
1109 struct ppod_region *pr = prsv->prsv_pr;
1110 uintptr_t end_pva, pva, pa;
1112 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1114 cmd |= htobe32(F_ULP_MEMIO_ORDER);
1116 cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1117 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1118 offset = buf & PAGE_MASK;
1119 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1120 pva = trunc_page(buf);
1121 end_pva = trunc_page(buf + buflen - 1);
1122 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1124 /* How many page pods are we writing in this cycle */
1125 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1128 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1130 wr = alloc_wrqe(len, wrq);
1132 return (ENOMEM); /* ok to just bail out */
1135 INIT_ULPTX_WR(ulpmc, len, 0, 0);
1137 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1138 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1139 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1141 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1142 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1143 ulpsc->len = htobe32(chunk);
1145 ppod = (struct pagepod *)(ulpsc + 1);
1146 for (j = 0; j < n; i++, j++, ppod++) {
1147 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1149 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ)));
1150 ppod->len_offset = htobe64(V_PPOD_LEN(buflen) |
1151 V_PPOD_OFST(offset));
1154 for (k = 0; k < nitems(ppod->addr); k++) {
1158 pa = pmap_kextract(pva);
1159 ppod->addr[k] = htobe64(pa);
1164 "%s: tid %d ppod[%d]->addr[%d] = %p",
1165 __func__, tid, i, k,
1166 htobe64(ppod->addr[k]));
1171 * Walk back 1 segment so that the first address in the
1172 * next pod is the same as the last one in the current
1181 MPASS(pva <= end_pva);
1187 wire_pageset(struct pageset *ps)
1192 KASSERT(!(ps->flags & PS_WIRED), ("pageset already wired"));
1194 for (i = 0; i < ps->npages; i++) {
1201 ps->flags |= PS_WIRED;
1205 * Prepare a pageset for DDP. This wires the pageset and sets up page
1209 prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps)
1211 struct tom_data *td = sc->tom_softc;
1213 if (!(ps->flags & PS_WIRED))
1215 if (ps->prsv.prsv_nppods == 0 &&
1216 !t4_alloc_page_pods_for_ps(&td->pr, ps)) {
1219 if (!(ps->flags & PS_PPODS_WRITTEN) &&
1220 t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) {
1228 t4_init_ppod_region(struct ppod_region *pr, struct t4_range *r, u_int psz,
1236 pr->pr_start = r->start;
1237 pr->pr_len = r->size;
1238 pr->pr_page_shift[0] = 12 + G_HPZ0(psz);
1239 pr->pr_page_shift[1] = 12 + G_HPZ1(psz);
1240 pr->pr_page_shift[2] = 12 + G_HPZ2(psz);
1241 pr->pr_page_shift[3] = 12 + G_HPZ3(psz);
1243 /* The SGL -> page pod algorithm requires the sizes to be in order. */
1244 for (i = 1; i < nitems(pr->pr_page_shift); i++) {
1245 if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1])
1249 pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG);
1250 pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask;
1251 if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0)
1253 pr->pr_alias_shift = fls(pr->pr_tag_mask);
1254 pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1);
1256 pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0,
1257 M_FIRSTFIT | M_NOWAIT);
1258 if (pr->pr_arena == NULL)
1265 t4_free_ppod_region(struct ppod_region *pr)
1271 vmem_destroy(pr->pr_arena);
1272 bzero(pr, sizeof(*pr));
1276 pscmp(struct pageset *ps, struct vmspace *vm, vm_offset_t start, int npages,
1280 if (ps->start != start || ps->npages != npages ||
1281 ps->offset != pgoff || ps->len != len)
1284 return (ps->vm != vm || ps->vm_timestamp != vm->vm_map.timestamp);
1288 hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps)
1292 vm_offset_t start, end, pgoff;
1296 DDP_ASSERT_LOCKED(toep);
1299 * The AIO subsystem will cancel and drain all requests before
1300 * permitting a process to exit or exec, so p_vmspace should
1303 vm = job->userproc->p_vmspace;
1305 start = (uintptr_t)job->uaiocb.aio_buf;
1306 pgoff = start & PAGE_MASK;
1307 end = round_page(start + job->uaiocb.aio_nbytes);
1308 start = trunc_page(start);
1310 if (end - start > MAX_DDP_BUFFER_SIZE) {
1312 * Truncate the request to a short read.
1313 * Alternatively, we could DDP in chunks to the larger
1314 * buffer, but that would be quite a bit more work.
1316 * When truncating, round the request down to avoid
1317 * crossing a cache line on the final transaction.
1319 end = rounddown2(start + MAX_DDP_BUFFER_SIZE, CACHE_LINE_SIZE);
1320 #ifdef VERBOSE_TRACES
1321 CTR4(KTR_CXGBE, "%s: tid %d, truncating size from %lu to %lu",
1322 __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes,
1323 (unsigned long)(end - (start + pgoff)));
1324 job->uaiocb.aio_nbytes = end - (start + pgoff);
1326 end = round_page(end);
1329 n = atop(end - start);
1332 * Try to reuse a cached pageset.
1334 TAILQ_FOREACH(ps, &toep->ddp_cached_pagesets, link) {
1335 if (pscmp(ps, vm, start, n, pgoff,
1336 job->uaiocb.aio_nbytes) == 0) {
1337 TAILQ_REMOVE(&toep->ddp_cached_pagesets, ps, link);
1338 toep->ddp_cached_count--;
1345 * If there are too many cached pagesets to create a new one,
1346 * free a pageset before creating a new one.
1348 KASSERT(toep->ddp_active_count + toep->ddp_cached_count <=
1349 nitems(toep->db), ("%s: too many wired pagesets", __func__));
1350 if (toep->ddp_active_count + toep->ddp_cached_count ==
1352 KASSERT(toep->ddp_cached_count > 0,
1353 ("no cached pageset to free"));
1354 ps = TAILQ_LAST(&toep->ddp_cached_pagesets, pagesetq);
1355 TAILQ_REMOVE(&toep->ddp_cached_pagesets, ps, link);
1356 toep->ddp_cached_count--;
1357 free_pageset(toep->td, ps);
1361 /* Create a new pageset. */
1362 ps = malloc(sizeof(*ps) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK |
1364 ps->pages = (vm_page_t *)(ps + 1);
1365 ps->vm_timestamp = map->timestamp;
1366 ps->npages = vm_fault_quick_hold_pages(map, start, end - start,
1367 VM_PROT_WRITE, ps->pages, n);
1370 if (ps->npages < 0) {
1375 KASSERT(ps->npages == n, ("hold_aio: page count mismatch: %d vs %d",
1379 ps->len = job->uaiocb.aio_nbytes;
1380 atomic_add_int(&vm->vm_refcnt, 1);
1384 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d",
1385 __func__, toep->tid, ps, job, ps->npages);
1391 ddp_complete_all(struct toepcb *toep, int error)
1395 DDP_ASSERT_LOCKED(toep);
1396 while (!TAILQ_EMPTY(&toep->ddp_aiojobq)) {
1397 job = TAILQ_FIRST(&toep->ddp_aiojobq);
1398 TAILQ_REMOVE(&toep->ddp_aiojobq, job, list);
1399 toep->ddp_waiting_count--;
1400 if (aio_clear_cancel_function(job))
1401 ddp_complete_one(job, error);
1406 aio_ddp_cancel_one(struct kaiocb *job)
1411 * If this job had copied data out of the socket buffer before
1412 * it was cancelled, report it as a short read rather than an
1415 copied = job->aio_received;
1417 aio_complete(job, copied, 0);
1423 * Called when the main loop wants to requeue a job to retry it later.
1424 * Deals with the race of the job being cancelled while it was being
1428 aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job)
1431 DDP_ASSERT_LOCKED(toep);
1432 if (!(toep->ddp_flags & DDP_DEAD) &&
1433 aio_set_cancel_function(job, t4_aio_cancel_queued)) {
1434 TAILQ_INSERT_HEAD(&toep->ddp_aiojobq, job, list);
1435 toep->ddp_waiting_count++;
1437 aio_ddp_cancel_one(job);
1441 aio_ddp_requeue(struct toepcb *toep)
1443 struct adapter *sc = td_adapter(toep->td);
1448 struct ddp_buffer *db;
1449 size_t copied, offset, resid;
1452 uint64_t ddp_flags, ddp_flags_mask;
1454 int buf_flag, db_idx, error;
1456 DDP_ASSERT_LOCKED(toep);
1459 if (toep->ddp_flags & DDP_DEAD) {
1460 MPASS(toep->ddp_waiting_count == 0);
1461 MPASS(toep->ddp_active_count == 0);
1465 if (toep->ddp_waiting_count == 0 ||
1466 toep->ddp_active_count == nitems(toep->db)) {
1470 job = TAILQ_FIRST(&toep->ddp_aiojobq);
1471 so = job->fd_file->f_data;
1475 /* We will never get anything unless we are or were connected. */
1476 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
1478 ddp_complete_all(toep, ENOTCONN);
1482 KASSERT(toep->ddp_active_count == 0 || sbavail(sb) == 0,
1483 ("%s: pending sockbuf data and DDP is active", __func__));
1485 /* Abort if socket has reported problems. */
1486 /* XXX: Wait for any queued DDP's to finish and/or flush them? */
1487 if (so->so_error && sbavail(sb) == 0) {
1488 toep->ddp_waiting_count--;
1489 TAILQ_REMOVE(&toep->ddp_aiojobq, job, list);
1490 if (!aio_clear_cancel_function(job)) {
1496 * If this job has previously copied some data, report
1497 * a short read and leave the error to be reported by
1500 copied = job->aio_received;
1503 aio_complete(job, copied, 0);
1506 error = so->so_error;
1509 aio_complete(job, -1, error);
1514 * Door is closed. If there is pending data in the socket buffer,
1515 * deliver it. If there are pending DDP requests, wait for those
1516 * to complete. Once they have completed, return EOF reads.
1518 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) {
1520 if (toep->ddp_active_count != 0)
1522 ddp_complete_all(toep, 0);
1527 * If DDP is not enabled and there is no pending socket buffer
1528 * data, try to enable DDP.
1530 if (sbavail(sb) == 0 && (toep->ddp_flags & DDP_ON) == 0) {
1534 * Wait for the card to ACK that DDP is enabled before
1535 * queueing any buffers. Currently this waits for an
1536 * indicate to arrive. This could use a TCB_SET_FIELD_RPL
1537 * message to know that DDP was enabled instead of waiting
1538 * for the indicate which would avoid copying the indicate
1539 * if no data is pending.
1541 * XXX: Might want to limit the indicate size to the size
1542 * of the first queued request.
1544 if ((toep->ddp_flags & DDP_SC_REQ) == 0)
1545 enable_ddp(sc, toep);
1551 * If another thread is queueing a buffer for DDP, let it
1552 * drain any work and return.
1554 if (toep->ddp_queueing != NULL)
1557 /* Take the next job to prep it for DDP. */
1558 toep->ddp_waiting_count--;
1559 TAILQ_REMOVE(&toep->ddp_aiojobq, job, list);
1560 if (!aio_clear_cancel_function(job))
1562 toep->ddp_queueing = job;
1564 /* NB: This drops DDP_LOCK while it holds the backing VM pages. */
1565 error = hold_aio(toep, job, &ps);
1567 ddp_complete_one(job, error);
1568 toep->ddp_queueing = NULL;
1573 if (so->so_error && sbavail(sb) == 0) {
1574 copied = job->aio_received;
1577 recycle_pageset(toep, ps);
1578 aio_complete(job, copied, 0);
1579 toep->ddp_queueing = NULL;
1583 error = so->so_error;
1586 recycle_pageset(toep, ps);
1587 aio_complete(job, -1, error);
1588 toep->ddp_queueing = NULL;
1592 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) {
1594 recycle_pageset(toep, ps);
1595 if (toep->ddp_active_count != 0) {
1597 * The door is closed, but there are still pending
1598 * DDP buffers. Requeue. These jobs will all be
1599 * completed once those buffers drain.
1601 aio_ddp_requeue_one(toep, job);
1602 toep->ddp_queueing = NULL;
1605 ddp_complete_one(job, 0);
1606 ddp_complete_all(toep, 0);
1607 toep->ddp_queueing = NULL;
1613 * If the toep is dead, there shouldn't be any data in the socket
1614 * buffer, so the above case should have handled this.
1616 MPASS(!(toep->ddp_flags & DDP_DEAD));
1619 * If there is pending data in the socket buffer (either
1620 * from before the requests were queued or a DDP indicate),
1621 * copy those mbufs out directly.
1624 offset = ps->offset + job->aio_received;
1625 MPASS(job->aio_received <= job->uaiocb.aio_nbytes);
1626 resid = job->uaiocb.aio_nbytes - job->aio_received;
1628 KASSERT(m == NULL || toep->ddp_active_count == 0,
1629 ("%s: sockbuf data with active DDP", __func__));
1630 while (m != NULL && resid > 0) {
1631 struct iovec iov[1];
1635 iov[0].iov_base = mtod(m, void *);
1636 iov[0].iov_len = m->m_len;
1637 if (iov[0].iov_len > resid)
1638 iov[0].iov_len = resid;
1642 uio.uio_resid = iov[0].iov_len;
1643 uio.uio_segflg = UIO_SYSSPACE;
1644 uio.uio_rw = UIO_WRITE;
1645 error = uiomove_fromphys(ps->pages, offset + copied,
1646 uio.uio_resid, &uio);
1647 MPASS(error == 0 && uio.uio_resid == 0);
1648 copied += uio.uio_offset;
1649 resid -= uio.uio_offset;
1653 sbdrop_locked(sb, copied);
1654 job->aio_received += copied;
1656 copied = job->aio_received;
1657 inp = sotoinpcb(so);
1658 if (!INP_TRY_WLOCK(inp)) {
1660 * The reference on the socket file descriptor in
1661 * the AIO job should keep 'sb' and 'inp' stable.
1662 * Our caller has a reference on the 'toep' that
1672 * If the socket has been closed, we should detect
1673 * that and complete this request if needed on
1674 * the next trip around the loop.
1677 t4_rcvd_locked(&toep->td->tod, intotcpcb(inp));
1679 if (resid == 0 || toep->ddp_flags & DDP_DEAD) {
1681 * We filled the entire buffer with socket
1682 * data, DDP is not being used, or the socket
1683 * is being shut down, so complete the
1687 recycle_pageset(toep, ps);
1688 aio_complete(job, copied, 0);
1689 toep->ddp_queueing = NULL;
1694 * If DDP is not enabled, requeue this request and restart.
1695 * This will either enable DDP or wait for more data to
1696 * arrive on the socket buffer.
1698 if ((toep->ddp_flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) {
1700 recycle_pageset(toep, ps);
1701 aio_ddp_requeue_one(toep, job);
1702 toep->ddp_queueing = NULL;
1707 * An indicate might have arrived and been added to
1708 * the socket buffer while it was unlocked after the
1709 * copy to lock the INP. If so, restart the copy.
1711 if (sbavail(sb) != 0)
1716 if (prep_pageset(sc, toep, ps) == 0) {
1717 recycle_pageset(toep, ps);
1718 aio_ddp_requeue_one(toep, job);
1719 toep->ddp_queueing = NULL;
1722 * XXX: Need to retry this later. Mostly need a trigger
1723 * when page pods are freed up.
1725 printf("%s: prep_pageset failed\n", __func__);
1729 /* Determine which DDP buffer to use. */
1730 if (toep->db[0].job == NULL) {
1733 MPASS(toep->db[1].job == NULL);
1740 ddp_flags |= V_TF_DDP_BUF0_VALID(1);
1741 if (so->so_state & SS_NBIO)
1742 ddp_flags |= V_TF_DDP_BUF0_FLUSH(1);
1743 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) |
1744 V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) |
1745 V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1);
1746 buf_flag = DDP_BUF0_ACTIVE;
1748 ddp_flags |= V_TF_DDP_BUF1_VALID(1);
1749 if (so->so_state & SS_NBIO)
1750 ddp_flags |= V_TF_DDP_BUF1_FLUSH(1);
1751 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) |
1752 V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) |
1753 V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1);
1754 buf_flag = DDP_BUF1_ACTIVE;
1756 MPASS((toep->ddp_flags & buf_flag) == 0);
1757 if ((toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) {
1759 MPASS(toep->ddp_active_id == -1);
1760 MPASS(toep->ddp_active_count == 0);
1761 ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1);
1765 * The TID for this connection should still be valid. If DDP_DEAD
1766 * is set, SBS_CANTRCVMORE should be set, so we shouldn't be
1767 * this far anyway. Even if the socket is closing on the other
1768 * end, the AIO job holds a reference on this end of the socket
1769 * which will keep it open and keep the TCP PCB attached until
1770 * after the job is completed.
1772 wr = mk_update_tcb_for_ddp(sc, toep, db_idx, ps, job->aio_received,
1773 ddp_flags, ddp_flags_mask);
1775 recycle_pageset(toep, ps);
1776 aio_ddp_requeue_one(toep, job);
1777 toep->ddp_queueing = NULL;
1780 * XXX: Need a way to kick a retry here.
1782 * XXX: We know the fixed size needed and could
1783 * preallocate this using a blocking request at the
1784 * start of the task to avoid having to handle this
1787 printf("%s: mk_update_tcb_for_ddp failed\n", __func__);
1791 if (!aio_set_cancel_function(job, t4_aio_cancel_active)) {
1793 recycle_pageset(toep, ps);
1794 aio_ddp_cancel_one(job);
1795 toep->ddp_queueing = NULL;
1799 #ifdef VERBOSE_TRACES
1800 CTR5(KTR_CXGBE, "%s: scheduling %p for DDP[%d] (flags %#lx/%#lx)",
1801 __func__, job, db_idx, ddp_flags, ddp_flags_mask);
1803 /* Give the chip the go-ahead. */
1805 db = &toep->db[db_idx];
1806 db->cancel_pending = 0;
1809 toep->ddp_queueing = NULL;
1810 toep->ddp_flags |= buf_flag;
1811 toep->ddp_active_count++;
1812 if (toep->ddp_active_count == 1) {
1813 MPASS(toep->ddp_active_id == -1);
1814 toep->ddp_active_id = db_idx;
1815 CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__,
1816 toep->ddp_active_id);
1822 ddp_queue_toep(struct toepcb *toep)
1825 DDP_ASSERT_LOCKED(toep);
1826 if (toep->ddp_flags & DDP_TASK_ACTIVE)
1828 toep->ddp_flags |= DDP_TASK_ACTIVE;
1830 soaio_enqueue(&toep->ddp_requeue_task);
1834 aio_ddp_requeue_task(void *context, int pending)
1836 struct toepcb *toep = context;
1839 aio_ddp_requeue(toep);
1840 toep->ddp_flags &= ~DDP_TASK_ACTIVE;
1847 t4_aio_cancel_active(struct kaiocb *job)
1849 struct socket *so = job->fd_file->f_data;
1850 struct tcpcb *tp = so_sototcpcb(so);
1851 struct toepcb *toep = tp->t_toe;
1852 struct adapter *sc = td_adapter(toep->td);
1853 uint64_t valid_flag;
1857 if (aio_cancel_cleared(job)) {
1859 aio_ddp_cancel_one(job);
1863 for (i = 0; i < nitems(toep->db); i++) {
1864 if (toep->db[i].job == job) {
1865 /* Should only ever get one cancel request for a job. */
1866 MPASS(toep->db[i].cancel_pending == 0);
1869 * Invalidate this buffer. It will be
1870 * cancelled or partially completed once the
1871 * card ACKs the invalidate.
1873 valid_flag = i == 0 ? V_TF_DDP_BUF0_VALID(1) :
1874 V_TF_DDP_BUF1_VALID(1);
1875 t4_set_tcb_field(sc, toep->ctrlq, toep->tid,
1876 W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1,
1877 i + DDP_BUF0_INVALIDATED,
1878 toep->ofld_rxq->iq.abs_id);
1879 toep->db[i].cancel_pending = 1;
1880 CTR2(KTR_CXGBE, "%s: request %p marked pending",
1889 t4_aio_cancel_queued(struct kaiocb *job)
1891 struct socket *so = job->fd_file->f_data;
1892 struct tcpcb *tp = so_sototcpcb(so);
1893 struct toepcb *toep = tp->t_toe;
1896 if (!aio_cancel_cleared(job)) {
1897 TAILQ_REMOVE(&toep->ddp_aiojobq, job, list);
1898 toep->ddp_waiting_count--;
1899 if (toep->ddp_waiting_count == 0)
1900 ddp_queue_toep(toep);
1902 CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job);
1905 aio_ddp_cancel_one(job);
1909 t4_aio_queue_ddp(struct socket *so, struct kaiocb *job)
1911 struct tcpcb *tp = so_sototcpcb(so);
1912 struct toepcb *toep = tp->t_toe;
1915 /* Ignore writes. */
1916 if (job->uaiocb.aio_lio_opcode != LIO_READ)
1917 return (EOPNOTSUPP);
1922 * XXX: Think about possibly returning errors for ENOTCONN,
1923 * etc. Perhaps the caller would only queue the request
1924 * if it failed with EOPNOTSUPP?
1927 #ifdef VERBOSE_TRACES
1928 CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job);
1930 if (!aio_set_cancel_function(job, t4_aio_cancel_queued))
1931 panic("new job was cancelled");
1932 TAILQ_INSERT_TAIL(&toep->ddp_aiojobq, job, list);
1933 toep->ddp_waiting_count++;
1934 toep->ddp_flags |= DDP_OK;
1937 * Try to handle this request synchronously. If this has
1938 * to block because the task is running, it will just bail
1939 * and let the task handle it instead.
1941 aio_ddp_requeue(toep);
1947 t4_ddp_mod_load(void)
1950 t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp);
1951 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete);
1952 TAILQ_INIT(&ddp_orphan_pagesets);
1953 mtx_init(&ddp_orphan_pagesets_lock, "ddp orphans", NULL, MTX_DEF);
1954 TASK_INIT(&ddp_orphan_task, 0, ddp_free_orphan_pagesets, NULL);
1959 t4_ddp_mod_unload(void)
1962 taskqueue_drain(taskqueue_thread, &ddp_orphan_task);
1963 MPASS(TAILQ_EMPTY(&ddp_orphan_pagesets));
1964 mtx_destroy(&ddp_orphan_pagesets_lock);
1965 t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL);
1966 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL);