1 /**************************************************************************
3 Copyright (c) 2007-2008, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/fcntl.h>
37 #include <sys/kernel.h>
39 #include <sys/limits.h>
42 #include <sys/condvar.h>
43 #include <sys/mutex.h>
45 #include <sys/sockstate.h>
46 #include <sys/sockopt.h>
47 #include <sys/socket.h>
48 #include <sys/sockbuf.h>
49 #include <sys/syslog.h>
52 #include <machine/bus.h>
55 #include <net/route.h>
57 #include <netinet/in.h>
58 #include <netinet/in_pcb.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
63 #include <cxgb_osdep.h>
64 #include <sys/mbufq.h>
66 #include <ulp/tom/cxgb_tcp_offload.h>
67 #include <netinet/tcp.h>
68 #include <netinet/tcp_var.h>
69 #include <netinet/tcp_fsm.h>
70 #include <netinet/tcp_offload.h>
71 #include <net/route.h>
74 #include <common/cxgb_firmware_exports.h>
75 #include <common/cxgb_t3_cpl.h>
76 #include <common/cxgb_tcb.h>
77 #include <common/cxgb_ctl_defs.h>
78 #include <cxgb_offload.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_extern.h>
87 #include <ulp/toecore/cxgb_toedev.h>
88 #include <ulp/tom/cxgb_defs.h>
89 #include <ulp/tom/cxgb_tom.h>
90 #include <ulp/tom/cxgb_t3_ddp.h>
91 #include <ulp/tom/cxgb_toepcb.h>
92 #include <ulp/tom/cxgb_tcp.h>
93 #include <ulp/tom/cxgb_vm.h>
96 #define MAX_SCHEDULE_TIMEOUT 300
99 * Return the # of page pods needed to accommodate a # of pages.
101 static inline unsigned int
102 pages2ppods(unsigned int pages)
104 return (pages + PPOD_PAGES - 1) / PPOD_PAGES + NUM_SENTINEL_PPODS;
108 * t3_pin_pages - pin a user memory range and prepare it for DDP
109 * @addr - the starting address
110 * @len - the length of the range
111 * @newgl - contains the pages and physical addresses of the pinned range
112 * @gl - an existing gather list, may be %NULL
114 * Pins the pages in the user-space memory range [addr, addr + len) and
115 * maps them for DMA. Returns a gather list with the pinned pages and
116 * their physical addresses. If @gl is non NULL the pages it describes
117 * are compared against the pages for [addr, addr + len), and if the
118 * existing gather list already covers the range a new list is not
119 * allocated. Returns 0 on success, or a negative errno. On success if
120 * a new gather list was allocated it is returned in @newgl.
123 t3_pin_pages(bus_dma_tag_t tag, bus_dmamap_t dmamap, vm_offset_t addr,
124 size_t len, struct ddp_gather_list **newgl,
125 const struct ddp_gather_list *gl)
130 struct ddp_gather_list *p;
134 * XXX need x86 agnostic check
136 if (addr + len > VM_MAXUSER_ADDRESS)
141 pg_off = addr & PAGE_MASK;
142 npages = (pg_off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
143 p = malloc(sizeof(struct ddp_gather_list) + npages * sizeof(vm_page_t *),
144 M_DEVBUF, M_NOWAIT|M_ZERO);
148 map = &curthread->td_proc->p_vmspace->vm_map;
149 err = vm_fault_hold_user_pages(map, addr, p->dgl_pages, npages,
150 VM_PROT_READ | VM_PROT_WRITE);
154 if (gl && gl->dgl_offset == pg_off && gl->dgl_nelem >= npages &&
155 gl->dgl_length >= len) {
156 for (i = 0; i < npages; i++)
157 if (p->dgl_pages[i] != gl->dgl_pages[i])
165 p->dgl_offset = pg_off;
166 p->dgl_nelem = npages;
168 p->phys_addr[0] = pci_map_page(pdev, p->pages[0], pg_off,
170 PCI_DMA_FROMDEVICE) - pg_off;
171 for (i = 1; i < npages; ++i)
172 p->phys_addr[i] = pci_map_page(pdev, p->pages[i], 0, PAGE_SIZE,
178 vm_fault_unhold_pages(p->dgl_pages, npages);
188 unmap_ddp_gl(const struct ddp_gather_list *gl)
196 pci_unmap_page(pdev, gl->phys_addr[0] + gl->offset,
197 PAGE_SIZE - gl->offset, PCI_DMA_FROMDEVICE);
198 for (i = 1; i < gl->nelem; ++i)
199 pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
206 ddp_gl_free_pages(struct ddp_gather_list *gl, int dirty)
209 * XXX mark pages as dirty before unholding
211 vm_fault_unhold_pages(gl->dgl_pages, gl->dgl_nelem);
215 t3_free_ddp_gl(struct ddp_gather_list *gl)
218 ddp_gl_free_pages(gl, 0);
222 /* Max # of page pods for a buffer, enough for 1MB buffer at 4KB page size */
223 #define MAX_PPODS 64U
226 * Allocate page pods for DDP buffer 1 (the user buffer) and set up the tag in
227 * the TCB. We allocate page pods in multiples of PPOD_CLUSTER_SIZE. First we
228 * try to allocate enough page pods to accommodate the whole buffer, subject to
229 * the MAX_PPODS limit. If that fails we try to allocate PPOD_CLUSTER_SIZE page
230 * pods before failing entirely.
233 alloc_buf1_ppods(struct toepcb *toep, struct ddp_state *p,
234 unsigned long addr, unsigned int len)
236 int err, tag, npages, nppods;
237 struct tom_data *d = TOM_DATA(toep->tp_toedev);
240 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
242 npages = ((addr & PAGE_MASK) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
243 nppods = min(pages2ppods(npages), MAX_PPODS);
244 nppods = roundup2(nppods, PPOD_CLUSTER_SIZE);
245 err = t3_alloc_ppods(d, nppods, &tag);
246 if (err && nppods > PPOD_CLUSTER_SIZE) {
247 nppods = PPOD_CLUSTER_SIZE;
248 err = t3_alloc_ppods(d, nppods, &tag);
253 p->ubuf_nppods = nppods;
255 #if NUM_DDP_KBUF == 1
256 t3_set_ddp_tag(toep, 1, tag << 6);
262 * Starting offset for the user DDP buffer. A non-0 value ensures a DDP flush
263 * won't block indefinitely if there's nothing to place (which should be rare).
265 #define UBUF_OFFSET 1
267 static __inline unsigned long
268 select_ddp_flags(const struct toepcb *toep, int buf_idx,
269 int nonblock, int rcv_flags)
272 if (__predict_false(rcv_flags & MSG_WAITALL))
273 return V_TF_DDP_PSH_NO_INVALIDATE0(1) |
274 V_TF_DDP_PSH_NO_INVALIDATE1(1) |
275 V_TF_DDP_PUSH_DISABLE_1(1);
277 return V_TF_DDP_BUF1_FLUSH(1);
279 return V_TF_DDP_BUF1_FLUSH(!TOM_TUNABLE(toep->tp_toedev,
283 if (__predict_false(rcv_flags & MSG_WAITALL))
284 return V_TF_DDP_PSH_NO_INVALIDATE0(1) |
285 V_TF_DDP_PSH_NO_INVALIDATE1(1) |
286 V_TF_DDP_PUSH_DISABLE_0(1);
288 return V_TF_DDP_BUF0_FLUSH(1);
290 return V_TF_DDP_BUF0_FLUSH(!TOM_TUNABLE(toep->tp_toedev, ddp_push_wait));
294 * Reposts the kernel DDP buffer after it has been previously become full and
295 * invalidated. We just need to reset the offset and adjust the DDP flags.
296 * Conveniently, we can set the flags and the offset with a single message.
297 * Note that this function does not set the buffer length. Again conveniently
298 * our kernel buffer is of fixed size. If the length needs to be changed it
299 * needs to be done separately.
302 t3_repost_kbuf(struct toepcb *toep, unsigned int bufidx, int modulate,
303 int activate, int nonblock)
305 struct ddp_state *p = &toep->tp_ddp_state;
309 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
311 p->buf_state[bufidx].cur_offset = p->kbuf[bufidx]->dgl_offset;
312 p->buf_state[bufidx].flags = p->kbuf_noinval ? DDP_BF_NOINVAL : 0;
313 p->buf_state[bufidx].gl = p->kbuf[bufidx];
315 p->kbuf_idx = bufidx;
317 flags = select_ddp_flags(toep, bufidx, nonblock, 0);
319 t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags |
320 V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) |
321 V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) |
322 V_TF_DDP_BUF0_VALID(1),
323 V_TF_DDP_BUF0_FLUSH(1) |
324 V_TF_DDP_PSH_NO_INVALIDATE0(1) |
325 V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) |
326 V_TF_DDP_BUF0_VALID(1) |
327 V_TF_DDP_ACTIVE_BUF(activate), modulate);
329 t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags |
330 V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) |
331 V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) |
332 V_TF_DDP_BUF1_VALID(1) |
333 V_TF_DDP_ACTIVE_BUF(activate),
334 V_TF_DDP_BUF1_FLUSH(1) |
335 V_TF_DDP_PSH_NO_INVALIDATE0(1) |
336 V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) |
337 V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1),
343 * setup_uio_ppods - setup HW page pods for a user iovec
344 * @sk: the associated socket
346 * @oft: additional bytes to map before the start of the buffer
348 * Pins a user iovec and sets up HW page pods for DDP into it. We allocate
349 * page pods for user buffers on the first call per socket. Afterwards we
350 * limit the buffer length to whatever the existing page pods can accommodate.
351 * Returns a negative error code or the length of the mapped buffer.
353 * The current implementation handles iovecs with only one entry.
356 setup_uio_ppods(struct toepcb *toep, const struct uio *uio, int oft, int *length)
360 struct ddp_gather_list *gl = NULL;
361 struct ddp_state *p = &toep->tp_ddp_state;
362 struct iovec *iov = uio->uio_iov;
363 vm_offset_t addr = (vm_offset_t)iov->iov_base - oft;
366 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
368 if (__predict_false(p->ubuf_nppods == 0)) {
369 err = alloc_buf1_ppods(toep, p, addr, iov->iov_len + oft);
374 len = (p->ubuf_nppods - NUM_SENTINEL_PPODS) * PPOD_PAGES * PAGE_SIZE;
375 len -= addr & PAGE_MASK;
376 if (len > M_TCB_RX_DDP_BUF0_LEN)
377 len = M_TCB_RX_DDP_BUF0_LEN;
378 len = min(len, toep->tp_tp->rcv_wnd - 32768);
379 len = min(len, iov->iov_len + oft);
381 if (len <= p->kbuf[0]->dgl_length) {
382 printf("length too short\n");
386 err = t3_pin_pages(toep->tp_rx_dmat, toep->tp_dmamap, addr, len, &gl, p->ubuf);
391 t3_free_ddp_gl(p->ubuf);
393 t3_setup_ppods(toep, gl, pages2ppods(gl->dgl_nelem), p->ubuf_tag, len,
404 t3_cancel_ubuf(struct toepcb *toep, struct sockbuf *rcv)
406 struct ddp_state *p = &toep->tp_ddp_state;
407 int ubuf_pending = t3_ddp_ubuf_pending(toep);
408 int err = 0, count = 0;
413 sockbuf_lock_assert(rcv);
416 while (ubuf_pending && !(rcv->sb_state & SBS_CANTRCVMORE)) {
418 "t3_cancel_ubuf: flags0 0x%x flags1 0x%x get_tcb_count %d",
419 p->buf_state[0].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY),
420 p->buf_state[1].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY),
422 if (p->get_tcb_count == 0)
423 t3_cancel_ddpbuf(toep, p->cur_buf);
425 CTR5(KTR_TOM, "waiting err=%d get_tcb_count=%d timeo=%d rcv=%p SBS_CANTRCVMORE=%d",
426 err, p->get_tcb_count, rcv->sb_timeo, rcv,
427 !!(rcv->sb_state & SBS_CANTRCVMORE));
429 while (p->get_tcb_count && !(rcv->sb_state & SBS_CANTRCVMORE)) {
430 if (count & 0xfffffff)
431 CTR5(KTR_TOM, "waiting err=%d get_tcb_count=%d timeo=%d rcv=%p count=%d",
432 err, p->get_tcb_count, rcv->sb_timeo, rcv, count);
436 ubuf_pending = t3_ddp_ubuf_pending(toep);
439 p->user_ddp_pending = 0;
443 #define OVERLAY_MASK (V_TF_DDP_PSH_NO_INVALIDATE0(1) | \
444 V_TF_DDP_PSH_NO_INVALIDATE1(1) | \
445 V_TF_DDP_BUF1_FLUSH(1) | \
446 V_TF_DDP_BUF0_FLUSH(1) | \
447 V_TF_DDP_PUSH_DISABLE_1(1) | \
448 V_TF_DDP_PUSH_DISABLE_0(1) | \
449 V_TF_DDP_INDICATE_OUT(1))
452 * Post a user buffer as an overlay on top of the current kernel buffer.
455 t3_overlay_ubuf(struct toepcb *toep, struct sockbuf *rcv,
456 const struct uio *uio, int nonblock, int rcv_flags,
457 int modulate, int post_kbuf)
459 int err, len, ubuf_idx;
461 struct ddp_state *p = &toep->tp_ddp_state;
463 if (p->kbuf[0] == NULL) {
467 err = setup_uio_ppods(toep, uio, 0, &len);
472 if ((rcv->sb_state & SBS_CANTRCVMORE) ||
473 (toep->tp_tp->t_flags & TF_TOE) == 0)
476 ubuf_idx = p->kbuf_idx;
477 p->buf_state[ubuf_idx].flags = DDP_BF_NOFLIP;
478 /* Use existing offset */
479 /* Don't need to update .gl, user buffer isn't copied. */
480 p->cur_buf = ubuf_idx;
482 flags = select_ddp_flags(toep, ubuf_idx, nonblock, rcv_flags);
485 struct ddp_buf_state *dbs = &p->buf_state[ubuf_idx ^ 1];
489 dbs->gl = p->kbuf[ubuf_idx ^ 1];
491 flags |= p->kbuf_idx ?
492 V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_PUSH_DISABLE_1(0) :
493 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_PUSH_DISABLE_0(0);
497 t3_overlay_ddpbuf(toep, 0, p->ubuf_tag << 6, p->kbuf_tag[1] << 6,
499 t3_setup_ddpbufs(toep, 0, 0, p->kbuf[1]->dgl_length, 0,
501 OVERLAY_MASK | flags, 1);
503 t3_overlay_ddpbuf(toep, 1, p->kbuf_tag[0] << 6, p->ubuf_tag << 6,
505 t3_setup_ddpbufs(toep, p->kbuf[0]->dgl_length, 0, 0, 0,
507 OVERLAY_MASK | flags, 1);
511 "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x ubuf_idx %d "
513 p->ubuf_tag, flags, OVERLAY_MASK, ubuf_idx, p->kbuf_idx);
516 "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x",
517 p->ubuf_tag, flags, OVERLAY_MASK);
519 "t3_overlay_ubuf: ubuf_idx %d kbuf_idx %d post_kbuf %d",
520 ubuf_idx, p->kbuf_idx, post_kbuf);
526 * Clean up DDP state that needs to survive until socket close time, such as the
527 * DDP buffers. The buffers are already unmapped at this point as unmapping
528 * needs the PCI device and a socket may close long after the device is removed.
531 t3_cleanup_ddp(struct toepcb *toep)
533 struct ddp_state *p = &toep->tp_ddp_state;
536 for (idx = 0; idx < NUM_DDP_KBUF; idx++)
538 ddp_gl_free_pages(p->kbuf[idx], 0);
539 free(p->kbuf[idx], M_DEVBUF);
542 ddp_gl_free_pages(p->ubuf, 0);
543 free(p->ubuf, M_DEVBUF);
546 toep->tp_ulp_mode = 0;
550 * This is a companion to t3_cleanup_ddp() and releases the HW resources
551 * associated with a connection's DDP state, such as the page pods.
552 * It's called when HW is done with a connection. The rest of the state
553 * remains available until both HW and the app are done with the connection.
556 t3_release_ddp_resources(struct toepcb *toep)
558 struct ddp_state *p = &toep->tp_ddp_state;
559 struct tom_data *d = TOM_DATA(toep->tp_toedev);
562 for (idx = 0; idx < NUM_DDP_KBUF; idx++) {
563 t3_free_ppods(d, p->kbuf_tag[idx],
564 p->kbuf_nppods[idx]);
565 unmap_ddp_gl(p->kbuf[idx]);
568 if (p->ubuf_nppods) {
569 t3_free_ppods(d, p->ubuf_tag, p->ubuf_nppods);
573 unmap_ddp_gl(p->ubuf);
578 t3_post_kbuf(struct toepcb *toep, int modulate, int nonblock)
580 struct ddp_state *p = &toep->tp_ddp_state;
582 t3_set_ddp_tag(toep, p->cur_buf, p->kbuf_tag[p->cur_buf] << 6);
583 t3_set_ddp_buf(toep, p->cur_buf, 0, p->kbuf[p->cur_buf]->dgl_length);
584 t3_repost_kbuf(toep, p->cur_buf, modulate, 1, nonblock);
587 "t3_post_kbuf: cur_buf = kbuf_idx = %u ", p->cur_buf);
590 "t3_post_kbuf: cur_buf = kbuf_idx = %u ", p->cur_buf);
594 * Prepare a socket for DDP. Must be called when the socket is known to be
598 t3_enter_ddp(struct toepcb *toep, unsigned int kbuf_size, unsigned int waitall, int nonblock)
601 static vm_pindex_t color;
602 unsigned int nppods, kbuf_pages, idx = 0;
603 struct ddp_state *p = &toep->tp_ddp_state;
604 struct tom_data *d = TOM_DATA(toep->tp_toedev);
607 if (kbuf_size > M_TCB_RX_DDP_BUF0_LEN)
611 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
613 kbuf_pages = (kbuf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
614 nppods = pages2ppods(kbuf_pages);
616 p->kbuf_noinval = !!waitall;
617 p->kbuf_tag[NUM_DDP_KBUF - 1] = -1;
618 for (idx = 0; idx < NUM_DDP_KBUF; idx++) {
620 malloc(sizeof (struct ddp_gather_list) + kbuf_pages *
621 sizeof(vm_page_t *), M_DEVBUF, M_NOWAIT|M_ZERO);
622 if (p->kbuf[idx] == NULL)
624 err = t3_alloc_ppods(d, nppods, &p->kbuf_tag[idx]);
626 printf("t3_alloc_ppods failed err=%d\n", err);
630 p->kbuf_nppods[idx] = nppods;
631 p->kbuf[idx]->dgl_length = kbuf_size;
632 p->kbuf[idx]->dgl_offset = 0;
633 p->kbuf[idx]->dgl_nelem = kbuf_pages;
635 for (i = 0; i < kbuf_pages; ++i) {
636 p->kbuf[idx]->dgl_pages[i] = vm_page_alloc(NULL, color,
637 VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED |
639 if (p->kbuf[idx]->dgl_pages[i] == NULL) {
640 p->kbuf[idx]->dgl_nelem = i;
641 printf("failed to allocate kbuf pages\n");
647 * XXX we'll need this for VT-d or any platform with an iommu :-/
650 for (i = 0; i < kbuf_pages; ++i)
651 p->kbuf[idx]->phys_addr[i] =
652 pci_map_page(p->pdev, p->kbuf[idx]->pages[i],
653 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
655 t3_setup_ppods(toep, p->kbuf[idx], nppods, p->kbuf_tag[idx],
656 p->kbuf[idx]->dgl_length, 0, 0);
658 cxgb_log_tcb(TOEP_T3C_DEV(toep)->adapter, toep->tp_tid);
660 t3_set_ddp_tag(toep, 0, p->kbuf_tag[0] << 6);
661 t3_set_ddp_buf(toep, 0, 0, p->kbuf[0]->dgl_length);
662 t3_repost_kbuf(toep, 0, 0, 1, nonblock);
664 t3_set_rcv_coalesce_enable(toep,
665 TOM_TUNABLE(toep->tp_toedev, ddp_rcvcoalesce));
666 t3_set_dack_mss(toep, TOM_TUNABLE(toep->tp_toedev, delack)>>1);
670 "t3_enter_ddp: kbuf_size %u waitall %u tag0 %d tag1 %d",
671 kbuf_size, waitall, p->kbuf_tag[0], p->kbuf_tag[1]);
674 "t3_enter_ddp: kbuf_size %u waitall %u tag0 %d tag1 %d",
675 kbuf_size, waitall, p->kbuf_tag[0], p->kbuf_tag[1]);
676 cxgb_log_tcb(TOEP_T3C_DEV(toep)->adapter, toep->tp_tid);
680 t3_release_ddp_resources(toep);
681 t3_cleanup_ddp(toep);
686 t3_ddp_copy(const struct mbuf *m, int offset, struct uio *uio, int len)
689 struct ddp_gather_list *gl = (struct ddp_gather_list *)m->m_ddp_gl;
691 resid_init = uio->uio_resid;
694 panic("pages not set\n");
696 CTR4(KTR_TOM, "t3_ddp_copy: offset=%d dgl_offset=%d cur_offset=%d len=%d",
697 offset, gl->dgl_offset, m->m_cur_offset, len);
698 offset += gl->dgl_offset + m->m_cur_offset;
699 KASSERT(len <= gl->dgl_length,
700 ("len=%d > dgl_length=%d in ddp_copy\n", len, gl->dgl_length));
703 err = uiomove_fromphys(gl->dgl_pages, offset, len, uio);
709 * Allocate n page pods. Returns -1 on failure or the page pod tag.
712 t3_alloc_ppods(struct tom_data *td, unsigned int n, int *ptag)
716 if (__predict_false(!td->ppod_map)) {
717 printf("ppod_map not set\n");
721 mtx_lock(&td->ppod_map_lock);
722 for (i = 0; i < td->nppods; ) {
724 for (j = 0; j < n; ++j) /* scan ppod_map[i..i+n-1] */
725 if (td->ppod_map[i + j]) {
729 memset(&td->ppod_map[i], 1, n); /* allocate range */
730 mtx_unlock(&td->ppod_map_lock);
732 "t3_alloc_ppods: n=%u tag=%u", n, i);
737 mtx_unlock(&td->ppod_map_lock);
742 t3_free_ppods(struct tom_data *td, unsigned int tag, unsigned int n)
744 /* No need to take ppod_lock here */
745 memset(&td->ppod_map[tag], 0, n);