1 /**************************************************************************
3 Copyright (c) 2007-2008, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/fcntl.h>
37 #include <sys/kernel.h>
39 #include <sys/limits.h>
42 #include <sys/condvar.h>
43 #include <sys/mutex.h>
45 #include <sys/sockbuf.h>
46 #include <sys/sockopt.h>
47 #include <sys/sockstate.h>
48 #include <sys/socket.h>
49 #include <sys/syslog.h>
52 #include <machine/bus.h>
55 #include <net/route.h>
57 #include <netinet/in.h>
58 #include <netinet/in_pcb.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
63 #include <dev/cxgb/cxgb_osdep.h>
64 #include <dev/cxgb/sys/mbufq.h>
66 #include <dev/cxgb/ulp/tom/cxgb_tcp_offload.h>
67 #include <netinet/tcp.h>
68 #include <netinet/tcp_var.h>
69 #include <netinet/tcp_fsm.h>
70 #include <netinet/tcp_offload.h>
71 #include <net/route.h>
73 #include <dev/cxgb/t3cdev.h>
74 #include <dev/cxgb/common/cxgb_firmware_exports.h>
75 #include <dev/cxgb/common/cxgb_t3_cpl.h>
76 #include <dev/cxgb/common/cxgb_tcb.h>
77 #include <dev/cxgb/common/cxgb_ctl_defs.h>
78 #include <dev/cxgb/cxgb_offload.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_extern.h>
86 #include <dev/cxgb/sys/mvec.h>
87 #include <dev/cxgb/ulp/toecore/cxgb_toedev.h>
88 #include <dev/cxgb/ulp/tom/cxgb_defs.h>
89 #include <dev/cxgb/ulp/tom/cxgb_tom.h>
90 #include <dev/cxgb/ulp/tom/cxgb_t3_ddp.h>
91 #include <dev/cxgb/ulp/tom/cxgb_toepcb.h>
92 #include <dev/cxgb/ulp/tom/cxgb_tcp.h>
93 #include <dev/cxgb/ulp/tom/cxgb_vm.h>
96 #define MAX_SCHEDULE_TIMEOUT 300
99 * Return the # of page pods needed to accommodate a # of pages.
101 static inline unsigned int
102 pages2ppods(unsigned int pages)
104 return (pages + PPOD_PAGES - 1) / PPOD_PAGES + NUM_SENTINEL_PPODS;
108 * t3_pin_pages - pin a user memory range and prepare it for DDP
109 * @addr - the starting address
110 * @len - the length of the range
111 * @newgl - contains the pages and physical addresses of the pinned range
112 * @gl - an existing gather list, may be %NULL
114 * Pins the pages in the user-space memory range [addr, addr + len) and
115 * maps them for DMA. Returns a gather list with the pinned pages and
116 * their physical addresses. If @gl is non NULL the pages it describes
117 * are compared against the pages for [addr, addr + len), and if the
118 * existing gather list already covers the range a new list is not
119 * allocated. Returns 0 on success, or a negative errno. On success if
120 * a new gather list was allocated it is returned in @newgl.
123 t3_pin_pages(bus_dma_tag_t tag, bus_dmamap_t map, vm_offset_t addr,
124 size_t len, struct ddp_gather_list **newgl,
125 const struct ddp_gather_list *gl)
130 struct ddp_gather_list *p;
133 * XXX need x86 agnostic check
135 if (addr + len > VM_MAXUSER_ADDRESS)
138 pg_off = addr & PAGE_MASK;
139 npages = (pg_off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
140 p = malloc(sizeof(struct ddp_gather_list) + npages * sizeof(vm_page_t *),
141 M_DEVBUF, M_NOWAIT|M_ZERO);
145 err = vm_fault_hold_user_pages(addr, p->dgl_pages, npages, VM_HOLD_WRITEABLE);
149 if (gl && gl->dgl_offset == pg_off && gl->dgl_nelem >= npages &&
150 gl->dgl_length >= len) {
151 for (i = 0; i < npages; i++)
152 if (p->dgl_pages[i] != gl->dgl_pages[i])
160 p->dgl_offset = pg_off;
161 p->dgl_nelem = npages;
163 p->phys_addr[0] = pci_map_page(pdev, p->pages[0], pg_off,
165 PCI_DMA_FROMDEVICE) - pg_off;
166 for (i = 1; i < npages; ++i)
167 p->phys_addr[i] = pci_map_page(pdev, p->pages[i], 0, PAGE_SIZE,
173 vm_fault_unhold_pages(p->dgl_pages, npages);
183 unmap_ddp_gl(const struct ddp_gather_list *gl)
191 pci_unmap_page(pdev, gl->phys_addr[0] + gl->offset,
192 PAGE_SIZE - gl->offset, PCI_DMA_FROMDEVICE);
193 for (i = 1; i < gl->nelem; ++i)
194 pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
201 ddp_gl_free_pages(struct ddp_gather_list *gl, int dirty)
204 * XXX mark pages as dirty before unholding
206 vm_fault_unhold_pages(gl->dgl_pages, gl->dgl_nelem);
210 t3_free_ddp_gl(struct ddp_gather_list *gl)
213 ddp_gl_free_pages(gl, 0);
217 /* Max # of page pods for a buffer, enough for 1MB buffer at 4KB page size */
218 #define MAX_PPODS 64U
221 * Allocate page pods for DDP buffer 1 (the user buffer) and set up the tag in
222 * the TCB. We allocate page pods in multiples of PPOD_CLUSTER_SIZE. First we
223 * try to allocate enough page pods to accommodate the whole buffer, subject to
224 * the MAX_PPODS limit. If that fails we try to allocate PPOD_CLUSTER_SIZE page
225 * pods before failing entirely.
228 alloc_buf1_ppods(struct toepcb *toep, struct ddp_state *p,
229 unsigned long addr, unsigned int len)
231 int err, tag, npages, nppods;
232 struct tom_data *d = TOM_DATA(toep->tp_toedev);
235 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
237 npages = ((addr & PAGE_MASK) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
238 nppods = min(pages2ppods(npages), MAX_PPODS);
239 nppods = roundup2(nppods, PPOD_CLUSTER_SIZE);
240 err = t3_alloc_ppods(d, nppods, &tag);
241 if (err && nppods > PPOD_CLUSTER_SIZE) {
242 nppods = PPOD_CLUSTER_SIZE;
243 err = t3_alloc_ppods(d, nppods, &tag);
248 p->ubuf_nppods = nppods;
250 #if NUM_DDP_KBUF == 1
251 t3_set_ddp_tag(toep, 1, tag << 6);
257 * Starting offset for the user DDP buffer. A non-0 value ensures a DDP flush
258 * won't block indefinitely if there's nothing to place (which should be rare).
260 #define UBUF_OFFSET 1
262 static __inline unsigned long
263 select_ddp_flags(const struct toepcb *toep, int buf_idx,
264 int nonblock, int rcv_flags)
267 if (__predict_false(rcv_flags & MSG_WAITALL))
268 return V_TF_DDP_PSH_NO_INVALIDATE0(1) |
269 V_TF_DDP_PSH_NO_INVALIDATE1(1) |
270 V_TF_DDP_PUSH_DISABLE_1(1);
272 return V_TF_DDP_BUF1_FLUSH(1);
274 return V_TF_DDP_BUF1_FLUSH(!TOM_TUNABLE(toep->tp_toedev,
278 if (__predict_false(rcv_flags & MSG_WAITALL))
279 return V_TF_DDP_PSH_NO_INVALIDATE0(1) |
280 V_TF_DDP_PSH_NO_INVALIDATE1(1) |
281 V_TF_DDP_PUSH_DISABLE_0(1);
283 return V_TF_DDP_BUF0_FLUSH(1);
285 return V_TF_DDP_BUF0_FLUSH(!TOM_TUNABLE(toep->tp_toedev, ddp_push_wait));
289 * Reposts the kernel DDP buffer after it has been previously become full and
290 * invalidated. We just need to reset the offset and adjust the DDP flags.
291 * Conveniently, we can set the flags and the offset with a single message.
292 * Note that this function does not set the buffer length. Again conveniently
293 * our kernel buffer is of fixed size. If the length needs to be changed it
294 * needs to be done separately.
297 t3_repost_kbuf(struct toepcb *toep, unsigned int bufidx, int modulate,
298 int activate, int nonblock)
300 struct ddp_state *p = &toep->tp_ddp_state;
304 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
306 p->buf_state[bufidx].cur_offset = p->kbuf[bufidx]->dgl_offset;
307 p->buf_state[bufidx].flags = p->kbuf_noinval ? DDP_BF_NOINVAL : 0;
308 p->buf_state[bufidx].gl = p->kbuf[bufidx];
310 p->kbuf_idx = bufidx;
312 flags = select_ddp_flags(toep, bufidx, nonblock, 0);
314 t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags |
315 V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) |
316 V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) |
317 V_TF_DDP_BUF0_VALID(1),
318 V_TF_DDP_BUF0_FLUSH(1) |
319 V_TF_DDP_PSH_NO_INVALIDATE0(1) |
320 V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) |
321 V_TF_DDP_BUF0_VALID(1) |
322 V_TF_DDP_ACTIVE_BUF(activate), modulate);
324 t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags |
325 V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) |
326 V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) |
327 V_TF_DDP_BUF1_VALID(1) |
328 V_TF_DDP_ACTIVE_BUF(activate),
329 V_TF_DDP_BUF1_FLUSH(1) |
330 V_TF_DDP_PSH_NO_INVALIDATE0(1) |
331 V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) |
332 V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1),
338 * setup_uio_ppods - setup HW page pods for a user iovec
339 * @sk: the associated socket
341 * @oft: additional bytes to map before the start of the buffer
343 * Pins a user iovec and sets up HW page pods for DDP into it. We allocate
344 * page pods for user buffers on the first call per socket. Afterwards we
345 * limit the buffer length to whatever the existing page pods can accommodate.
346 * Returns a negative error code or the length of the mapped buffer.
348 * The current implementation handles iovecs with only one entry.
351 setup_uio_ppods(struct toepcb *toep, const struct uio *uio, int oft, int *length)
355 struct ddp_gather_list *gl = NULL;
356 struct ddp_state *p = &toep->tp_ddp_state;
357 struct iovec *iov = uio->uio_iov;
358 vm_offset_t addr = (vm_offset_t)iov->iov_base - oft;
361 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
363 if (__predict_false(p->ubuf_nppods == 0)) {
364 err = alloc_buf1_ppods(toep, p, addr, iov->iov_len + oft);
369 len = (p->ubuf_nppods - NUM_SENTINEL_PPODS) * PPOD_PAGES * PAGE_SIZE;
370 len -= addr & PAGE_MASK;
371 if (len > M_TCB_RX_DDP_BUF0_LEN)
372 len = M_TCB_RX_DDP_BUF0_LEN;
373 len = min(len, toep->tp_tp->rcv_wnd - 32768);
374 len = min(len, iov->iov_len + oft);
376 if (len <= p->kbuf[0]->dgl_length) {
377 printf("length too short\n");
381 err = t3_pin_pages(toep->tp_rx_dmat, toep->tp_dmamap, addr, len, &gl, p->ubuf);
386 t3_free_ddp_gl(p->ubuf);
388 t3_setup_ppods(toep, gl, pages2ppods(gl->dgl_nelem), p->ubuf_tag, len,
399 t3_cancel_ubuf(struct toepcb *toep, struct sockbuf *rcv)
401 struct ddp_state *p = &toep->tp_ddp_state;
402 int ubuf_pending = t3_ddp_ubuf_pending(toep);
403 int err = 0, count = 0;
408 sockbuf_lock_assert(rcv);
411 while (ubuf_pending && !(rcv->sb_state & SBS_CANTRCVMORE)) {
413 "t3_cancel_ubuf: flags0 0x%x flags1 0x%x get_tcb_count %d",
414 p->buf_state[0].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY),
415 p->buf_state[1].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY),
417 if (p->get_tcb_count == 0)
418 t3_cancel_ddpbuf(toep, p->cur_buf);
420 CTR5(KTR_TOM, "waiting err=%d get_tcb_count=%d timeo=%d rcv=%p SBS_CANTRCVMORE=%d",
421 err, p->get_tcb_count, rcv->sb_timeo, rcv,
422 !!(rcv->sb_state & SBS_CANTRCVMORE));
424 while (p->get_tcb_count && !(rcv->sb_state & SBS_CANTRCVMORE)) {
425 if (count & 0xfffffff)
426 CTR5(KTR_TOM, "waiting err=%d get_tcb_count=%d timeo=%d rcv=%p count=%d",
427 err, p->get_tcb_count, rcv->sb_timeo, rcv, count);
431 ubuf_pending = t3_ddp_ubuf_pending(toep);
434 p->user_ddp_pending = 0;
438 #define OVERLAY_MASK (V_TF_DDP_PSH_NO_INVALIDATE0(1) | \
439 V_TF_DDP_PSH_NO_INVALIDATE1(1) | \
440 V_TF_DDP_BUF1_FLUSH(1) | \
441 V_TF_DDP_BUF0_FLUSH(1) | \
442 V_TF_DDP_PUSH_DISABLE_1(1) | \
443 V_TF_DDP_PUSH_DISABLE_0(1) | \
444 V_TF_DDP_INDICATE_OUT(1))
447 * Post a user buffer as an overlay on top of the current kernel buffer.
450 t3_overlay_ubuf(struct toepcb *toep, struct sockbuf *rcv,
451 const struct uio *uio, int nonblock, int rcv_flags,
452 int modulate, int post_kbuf)
454 int err, len, ubuf_idx;
456 struct ddp_state *p = &toep->tp_ddp_state;
458 if (p->kbuf[0] == NULL) {
462 err = setup_uio_ppods(toep, uio, 0, &len);
467 if ((rcv->sb_state & SBS_CANTRCVMORE) ||
468 (toep->tp_tp->t_flags & TF_TOE) == 0)
471 ubuf_idx = p->kbuf_idx;
472 p->buf_state[ubuf_idx].flags = DDP_BF_NOFLIP;
473 /* Use existing offset */
474 /* Don't need to update .gl, user buffer isn't copied. */
475 p->cur_buf = ubuf_idx;
477 flags = select_ddp_flags(toep, ubuf_idx, nonblock, rcv_flags);
480 struct ddp_buf_state *dbs = &p->buf_state[ubuf_idx ^ 1];
484 dbs->gl = p->kbuf[ubuf_idx ^ 1];
486 flags |= p->kbuf_idx ?
487 V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_PUSH_DISABLE_1(0) :
488 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_PUSH_DISABLE_0(0);
492 t3_overlay_ddpbuf(toep, 0, p->ubuf_tag << 6, p->kbuf_tag[1] << 6,
494 t3_setup_ddpbufs(toep, 0, 0, p->kbuf[1]->dgl_length, 0,
496 OVERLAY_MASK | flags, 1);
498 t3_overlay_ddpbuf(toep, 1, p->kbuf_tag[0] << 6, p->ubuf_tag << 6,
500 t3_setup_ddpbufs(toep, p->kbuf[0]->dgl_length, 0, 0, 0,
502 OVERLAY_MASK | flags, 1);
506 "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x ubuf_idx %d "
508 p->ubuf_tag, flags, OVERLAY_MASK, ubuf_idx, p->kbuf_idx);
511 "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x",
512 p->ubuf_tag, flags, OVERLAY_MASK);
514 "t3_overlay_ubuf: ubuf_idx %d kbuf_idx %d post_kbuf %d",
515 ubuf_idx, p->kbuf_idx, post_kbuf);
521 * Clean up DDP state that needs to survive until socket close time, such as the
522 * DDP buffers. The buffers are already unmapped at this point as unmapping
523 * needs the PCI device and a socket may close long after the device is removed.
526 t3_cleanup_ddp(struct toepcb *toep)
528 struct ddp_state *p = &toep->tp_ddp_state;
531 for (idx = 0; idx < NUM_DDP_KBUF; idx++)
533 ddp_gl_free_pages(p->kbuf[idx], 0);
534 free(p->kbuf[idx], M_DEVBUF);
537 ddp_gl_free_pages(p->ubuf, 0);
538 free(p->ubuf, M_DEVBUF);
541 toep->tp_ulp_mode = 0;
545 * This is a companion to t3_cleanup_ddp() and releases the HW resources
546 * associated with a connection's DDP state, such as the page pods.
547 * It's called when HW is done with a connection. The rest of the state
548 * remains available until both HW and the app are done with the connection.
551 t3_release_ddp_resources(struct toepcb *toep)
553 struct ddp_state *p = &toep->tp_ddp_state;
554 struct tom_data *d = TOM_DATA(toep->tp_toedev);
557 for (idx = 0; idx < NUM_DDP_KBUF; idx++) {
558 t3_free_ppods(d, p->kbuf_tag[idx],
559 p->kbuf_nppods[idx]);
560 unmap_ddp_gl(p->kbuf[idx]);
563 if (p->ubuf_nppods) {
564 t3_free_ppods(d, p->ubuf_tag, p->ubuf_nppods);
568 unmap_ddp_gl(p->ubuf);
573 t3_post_kbuf(struct toepcb *toep, int modulate, int nonblock)
575 struct ddp_state *p = &toep->tp_ddp_state;
577 t3_set_ddp_tag(toep, p->cur_buf, p->kbuf_tag[p->cur_buf] << 6);
578 t3_set_ddp_buf(toep, p->cur_buf, 0, p->kbuf[p->cur_buf]->dgl_length);
579 t3_repost_kbuf(toep, p->cur_buf, modulate, 1, nonblock);
582 "t3_post_kbuf: cur_buf = kbuf_idx = %u ", p->cur_buf);
585 "t3_post_kbuf: cur_buf = kbuf_idx = %u ", p->cur_buf);
589 * Prepare a socket for DDP. Must be called when the socket is known to be
593 t3_enter_ddp(struct toepcb *toep, unsigned int kbuf_size, unsigned int waitall, int nonblock)
596 static vm_pindex_t color;
597 unsigned int nppods, kbuf_pages, idx = 0;
598 struct ddp_state *p = &toep->tp_ddp_state;
599 struct tom_data *d = TOM_DATA(toep->tp_toedev);
602 if (kbuf_size > M_TCB_RX_DDP_BUF0_LEN)
606 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
608 kbuf_pages = (kbuf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
609 nppods = pages2ppods(kbuf_pages);
611 p->kbuf_noinval = !!waitall;
612 p->kbuf_tag[NUM_DDP_KBUF - 1] = -1;
613 for (idx = 0; idx < NUM_DDP_KBUF; idx++) {
615 malloc(sizeof (struct ddp_gather_list) + kbuf_pages *
616 sizeof(vm_page_t *), M_DEVBUF, M_NOWAIT|M_ZERO);
617 if (p->kbuf[idx] == NULL)
619 err = t3_alloc_ppods(d, nppods, &p->kbuf_tag[idx]);
621 printf("t3_alloc_ppods failed err=%d\n", err);
625 p->kbuf_nppods[idx] = nppods;
626 p->kbuf[idx]->dgl_length = kbuf_size;
627 p->kbuf[idx]->dgl_offset = 0;
628 p->kbuf[idx]->dgl_nelem = kbuf_pages;
630 for (i = 0; i < kbuf_pages; ++i) {
631 p->kbuf[idx]->dgl_pages[i] = vm_page_alloc(NULL, color,
632 VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED |
634 if (p->kbuf[idx]->dgl_pages[i] == NULL) {
635 p->kbuf[idx]->dgl_nelem = i;
636 printf("failed to allocate kbuf pages\n");
642 * XXX we'll need this for VT-d or any platform with an iommu :-/
645 for (i = 0; i < kbuf_pages; ++i)
646 p->kbuf[idx]->phys_addr[i] =
647 pci_map_page(p->pdev, p->kbuf[idx]->pages[i],
648 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
650 t3_setup_ppods(toep, p->kbuf[idx], nppods, p->kbuf_tag[idx],
651 p->kbuf[idx]->dgl_length, 0, 0);
653 cxgb_log_tcb(TOEP_T3C_DEV(toep)->adapter, toep->tp_tid);
655 t3_set_ddp_tag(toep, 0, p->kbuf_tag[0] << 6);
656 t3_set_ddp_buf(toep, 0, 0, p->kbuf[0]->dgl_length);
657 t3_repost_kbuf(toep, 0, 0, 1, nonblock);
659 t3_set_rcv_coalesce_enable(toep,
660 TOM_TUNABLE(toep->tp_toedev, ddp_rcvcoalesce));
661 t3_set_dack_mss(toep, TOM_TUNABLE(toep->tp_toedev, delack)>>1);
665 "t3_enter_ddp: kbuf_size %u waitall %u tag0 %d tag1 %d",
666 kbuf_size, waitall, p->kbuf_tag[0], p->kbuf_tag[1]);
669 "t3_enter_ddp: kbuf_size %u waitall %u tag0 %d tag1 %d",
670 kbuf_size, waitall, p->kbuf_tag[0], p->kbuf_tag[1]);
671 cxgb_log_tcb(TOEP_T3C_DEV(toep)->adapter, toep->tp_tid);
675 t3_release_ddp_resources(toep);
676 t3_cleanup_ddp(toep);
681 t3_ddp_copy(const struct mbuf *m, int offset, struct uio *uio, int len)
684 struct ddp_gather_list *gl = (struct ddp_gather_list *)m->m_ddp_gl;
686 resid_init = uio->uio_resid;
689 panic("pages not set\n");
691 CTR4(KTR_TOM, "t3_ddp_copy: offset=%d dgl_offset=%d cur_offset=%d len=%d",
692 offset, gl->dgl_offset, m->m_cur_offset, len);
693 offset += gl->dgl_offset + m->m_cur_offset;
694 KASSERT(len <= gl->dgl_length,
695 ("len=%d > dgl_length=%d in ddp_copy\n", len, gl->dgl_length));
698 err = uiomove_fromphys(gl->dgl_pages, offset, len, uio);
704 * Allocate n page pods. Returns -1 on failure or the page pod tag.
707 t3_alloc_ppods(struct tom_data *td, unsigned int n, int *ptag)
711 if (__predict_false(!td->ppod_map)) {
712 printf("ppod_map not set\n");
716 mtx_lock(&td->ppod_map_lock);
717 for (i = 0; i < td->nppods; ) {
719 for (j = 0; j < n; ++j) /* scan ppod_map[i..i+n-1] */
720 if (td->ppod_map[i + j]) {
724 memset(&td->ppod_map[i], 1, n); /* allocate range */
725 mtx_unlock(&td->ppod_map_lock);
727 "t3_alloc_ppods: n=%u tag=%u", n, i);
732 mtx_unlock(&td->ppod_map_lock);
737 t3_free_ppods(struct tom_data *td, unsigned int tag, unsigned int n)
739 /* No need to take ppod_lock here */
740 memset(&td->ppod_map[tag], 0, n);