1 /**************************************************************************
3 Copyright (c) 2007-2008, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/fcntl.h>
37 #include <sys/kernel.h>
39 #include <sys/limits.h>
42 #include <sys/condvar.h>
43 #include <sys/mutex.h>
45 #include <sys/sockstate.h>
46 #include <sys/sockopt.h>
47 #include <sys/socket.h>
48 #include <sys/sockbuf.h>
49 #include <sys/syslog.h>
52 #include <machine/bus.h>
55 #include <net/route.h>
57 #include <netinet/in.h>
58 #include <netinet/in_pcb.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
63 #include <cxgb_osdep.h>
64 #include <sys/mbufq.h>
66 #include <ulp/tom/cxgb_tcp_offload.h>
67 #include <netinet/tcp.h>
68 #include <netinet/tcp_var.h>
69 #include <netinet/tcp_fsm.h>
70 #include <netinet/tcp_offload.h>
71 #include <net/route.h>
74 #include <common/cxgb_firmware_exports.h>
75 #include <common/cxgb_t3_cpl.h>
76 #include <common/cxgb_tcb.h>
77 #include <common/cxgb_ctl_defs.h>
78 #include <cxgb_offload.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_extern.h>
87 #include <ulp/toecore/cxgb_toedev.h>
88 #include <ulp/tom/cxgb_defs.h>
89 #include <ulp/tom/cxgb_tom.h>
90 #include <ulp/tom/cxgb_t3_ddp.h>
91 #include <ulp/tom/cxgb_toepcb.h>
92 #include <ulp/tom/cxgb_tcp.h>
95 #define MAX_SCHEDULE_TIMEOUT 300
98 * Return the # of page pods needed to accommodate a # of pages.
100 static inline unsigned int
101 pages2ppods(unsigned int pages)
103 return (pages + PPOD_PAGES - 1) / PPOD_PAGES + NUM_SENTINEL_PPODS;
107 * t3_pin_pages - pin a user memory range and prepare it for DDP
108 * @addr - the starting address
109 * @len - the length of the range
110 * @newgl - contains the pages and physical addresses of the pinned range
111 * @gl - an existing gather list, may be %NULL
113 * Pins the pages in the user-space memory range [addr, addr + len) and
114 * maps them for DMA. Returns a gather list with the pinned pages and
115 * their physical addresses. If @gl is non NULL the pages it describes
116 * are compared against the pages for [addr, addr + len), and if the
117 * existing gather list already covers the range a new list is not
118 * allocated. Returns 0 on success, or a negative errno. On success if
119 * a new gather list was allocated it is returned in @newgl.
122 t3_pin_pages(bus_dma_tag_t tag, bus_dmamap_t dmamap, vm_offset_t addr,
123 size_t len, struct ddp_gather_list **newgl,
124 const struct ddp_gather_list *gl)
129 struct ddp_gather_list *p;
132 pg_off = addr & PAGE_MASK;
133 npages = (pg_off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
134 p = malloc(sizeof(struct ddp_gather_list) + npages * sizeof(vm_page_t *),
135 M_DEVBUF, M_NOWAIT|M_ZERO);
139 map = &curthread->td_proc->p_vmspace->vm_map;
140 if (vm_fault_quick_hold_pages(map, addr, len, VM_PROT_READ |
141 VM_PROT_WRITE, p->dgl_pages, npages) < 0) {
146 if (gl && gl->dgl_offset == pg_off && gl->dgl_nelem >= npages &&
147 gl->dgl_length >= len) {
148 for (i = 0; i < npages; i++)
149 if (p->dgl_pages[i] != gl->dgl_pages[i])
157 p->dgl_offset = pg_off;
158 p->dgl_nelem = npages;
160 p->phys_addr[0] = pci_map_page(pdev, p->pages[0], pg_off,
162 PCI_DMA_FROMDEVICE) - pg_off;
163 for (i = 1; i < npages; ++i)
164 p->phys_addr[i] = pci_map_page(pdev, p->pages[i], 0, PAGE_SIZE,
170 vm_page_unhold_pages(p->dgl_pages, npages);
180 unmap_ddp_gl(const struct ddp_gather_list *gl)
188 pci_unmap_page(pdev, gl->phys_addr[0] + gl->offset,
189 PAGE_SIZE - gl->offset, PCI_DMA_FROMDEVICE);
190 for (i = 1; i < gl->nelem; ++i)
191 pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
198 ddp_gl_free_pages(struct ddp_gather_list *gl, int dirty)
201 * XXX mark pages as dirty before unholding
203 vm_page_unhold_pages(gl->dgl_pages, gl->dgl_nelem);
207 t3_free_ddp_gl(struct ddp_gather_list *gl)
210 ddp_gl_free_pages(gl, 0);
214 /* Max # of page pods for a buffer, enough for 1MB buffer at 4KB page size */
215 #define MAX_PPODS 64U
218 * Allocate page pods for DDP buffer 1 (the user buffer) and set up the tag in
219 * the TCB. We allocate page pods in multiples of PPOD_CLUSTER_SIZE. First we
220 * try to allocate enough page pods to accommodate the whole buffer, subject to
221 * the MAX_PPODS limit. If that fails we try to allocate PPOD_CLUSTER_SIZE page
222 * pods before failing entirely.
225 alloc_buf1_ppods(struct toepcb *toep, struct ddp_state *p,
226 unsigned long addr, unsigned int len)
228 int err, tag, npages, nppods;
229 struct tom_data *d = TOM_DATA(toep->tp_toedev);
232 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
234 npages = ((addr & PAGE_MASK) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
235 nppods = min(pages2ppods(npages), MAX_PPODS);
236 nppods = roundup2(nppods, PPOD_CLUSTER_SIZE);
237 err = t3_alloc_ppods(d, nppods, &tag);
238 if (err && nppods > PPOD_CLUSTER_SIZE) {
239 nppods = PPOD_CLUSTER_SIZE;
240 err = t3_alloc_ppods(d, nppods, &tag);
245 p->ubuf_nppods = nppods;
247 #if NUM_DDP_KBUF == 1
248 t3_set_ddp_tag(toep, 1, tag << 6);
254 * Starting offset for the user DDP buffer. A non-0 value ensures a DDP flush
255 * won't block indefinitely if there's nothing to place (which should be rare).
257 #define UBUF_OFFSET 1
259 static __inline unsigned long
260 select_ddp_flags(const struct toepcb *toep, int buf_idx,
261 int nonblock, int rcv_flags)
264 if (__predict_false(rcv_flags & MSG_WAITALL))
265 return V_TF_DDP_PSH_NO_INVALIDATE0(1) |
266 V_TF_DDP_PSH_NO_INVALIDATE1(1) |
267 V_TF_DDP_PUSH_DISABLE_1(1);
269 return V_TF_DDP_BUF1_FLUSH(1);
271 return V_TF_DDP_BUF1_FLUSH(!TOM_TUNABLE(toep->tp_toedev,
275 if (__predict_false(rcv_flags & MSG_WAITALL))
276 return V_TF_DDP_PSH_NO_INVALIDATE0(1) |
277 V_TF_DDP_PSH_NO_INVALIDATE1(1) |
278 V_TF_DDP_PUSH_DISABLE_0(1);
280 return V_TF_DDP_BUF0_FLUSH(1);
282 return V_TF_DDP_BUF0_FLUSH(!TOM_TUNABLE(toep->tp_toedev, ddp_push_wait));
286 * Reposts the kernel DDP buffer after it has been previously become full and
287 * invalidated. We just need to reset the offset and adjust the DDP flags.
288 * Conveniently, we can set the flags and the offset with a single message.
289 * Note that this function does not set the buffer length. Again conveniently
290 * our kernel buffer is of fixed size. If the length needs to be changed it
291 * needs to be done separately.
294 t3_repost_kbuf(struct toepcb *toep, unsigned int bufidx, int modulate,
295 int activate, int nonblock)
297 struct ddp_state *p = &toep->tp_ddp_state;
301 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
303 p->buf_state[bufidx].cur_offset = p->kbuf[bufidx]->dgl_offset;
304 p->buf_state[bufidx].flags = p->kbuf_noinval ? DDP_BF_NOINVAL : 0;
305 p->buf_state[bufidx].gl = p->kbuf[bufidx];
307 p->kbuf_idx = bufidx;
309 flags = select_ddp_flags(toep, bufidx, nonblock, 0);
311 t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags |
312 V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) |
313 V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) |
314 V_TF_DDP_BUF0_VALID(1),
315 V_TF_DDP_BUF0_FLUSH(1) |
316 V_TF_DDP_PSH_NO_INVALIDATE0(1) |
317 V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) |
318 V_TF_DDP_BUF0_VALID(1) |
319 V_TF_DDP_ACTIVE_BUF(activate), modulate);
321 t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags |
322 V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) |
323 V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) |
324 V_TF_DDP_BUF1_VALID(1) |
325 V_TF_DDP_ACTIVE_BUF(activate),
326 V_TF_DDP_BUF1_FLUSH(1) |
327 V_TF_DDP_PSH_NO_INVALIDATE0(1) |
328 V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) |
329 V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1),
335 * setup_uio_ppods - setup HW page pods for a user iovec
336 * @sk: the associated socket
338 * @oft: additional bytes to map before the start of the buffer
340 * Pins a user iovec and sets up HW page pods for DDP into it. We allocate
341 * page pods for user buffers on the first call per socket. Afterwards we
342 * limit the buffer length to whatever the existing page pods can accommodate.
343 * Returns a negative error code or the length of the mapped buffer.
345 * The current implementation handles iovecs with only one entry.
348 setup_uio_ppods(struct toepcb *toep, const struct uio *uio, int oft, int *length)
352 struct ddp_gather_list *gl = NULL;
353 struct ddp_state *p = &toep->tp_ddp_state;
354 struct iovec *iov = uio->uio_iov;
355 vm_offset_t addr = (vm_offset_t)iov->iov_base - oft;
358 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
360 if (__predict_false(p->ubuf_nppods == 0)) {
361 err = alloc_buf1_ppods(toep, p, addr, iov->iov_len + oft);
366 len = (p->ubuf_nppods - NUM_SENTINEL_PPODS) * PPOD_PAGES * PAGE_SIZE;
367 len -= addr & PAGE_MASK;
368 if (len > M_TCB_RX_DDP_BUF0_LEN)
369 len = M_TCB_RX_DDP_BUF0_LEN;
370 len = min(len, toep->tp_tp->rcv_wnd - 32768);
371 len = min(len, iov->iov_len + oft);
373 if (len <= p->kbuf[0]->dgl_length) {
374 printf("length too short\n");
378 err = t3_pin_pages(toep->tp_rx_dmat, toep->tp_dmamap, addr, len, &gl, p->ubuf);
383 t3_free_ddp_gl(p->ubuf);
385 t3_setup_ppods(toep, gl, pages2ppods(gl->dgl_nelem), p->ubuf_tag, len,
396 t3_cancel_ubuf(struct toepcb *toep, struct sockbuf *rcv)
398 struct ddp_state *p = &toep->tp_ddp_state;
399 int ubuf_pending = t3_ddp_ubuf_pending(toep);
400 int err = 0, count = 0;
405 sockbuf_lock_assert(rcv);
408 while (ubuf_pending && !(rcv->sb_state & SBS_CANTRCVMORE)) {
410 "t3_cancel_ubuf: flags0 0x%x flags1 0x%x get_tcb_count %d",
411 p->buf_state[0].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY),
412 p->buf_state[1].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY),
414 if (p->get_tcb_count == 0)
415 t3_cancel_ddpbuf(toep, p->cur_buf);
417 CTR5(KTR_TOM, "waiting err=%d get_tcb_count=%d timeo=%d rcv=%p SBS_CANTRCVMORE=%d",
418 err, p->get_tcb_count, rcv->sb_timeo, rcv,
419 !!(rcv->sb_state & SBS_CANTRCVMORE));
421 while (p->get_tcb_count && !(rcv->sb_state & SBS_CANTRCVMORE)) {
422 if (count & 0xfffffff)
423 CTR5(KTR_TOM, "waiting err=%d get_tcb_count=%d timeo=%d rcv=%p count=%d",
424 err, p->get_tcb_count, rcv->sb_timeo, rcv, count);
428 ubuf_pending = t3_ddp_ubuf_pending(toep);
431 p->user_ddp_pending = 0;
435 #define OVERLAY_MASK (V_TF_DDP_PSH_NO_INVALIDATE0(1) | \
436 V_TF_DDP_PSH_NO_INVALIDATE1(1) | \
437 V_TF_DDP_BUF1_FLUSH(1) | \
438 V_TF_DDP_BUF0_FLUSH(1) | \
439 V_TF_DDP_PUSH_DISABLE_1(1) | \
440 V_TF_DDP_PUSH_DISABLE_0(1) | \
441 V_TF_DDP_INDICATE_OUT(1))
444 * Post a user buffer as an overlay on top of the current kernel buffer.
447 t3_overlay_ubuf(struct toepcb *toep, struct sockbuf *rcv,
448 const struct uio *uio, int nonblock, int rcv_flags,
449 int modulate, int post_kbuf)
451 int err, len, ubuf_idx;
453 struct ddp_state *p = &toep->tp_ddp_state;
455 if (p->kbuf[0] == NULL) {
459 err = setup_uio_ppods(toep, uio, 0, &len);
464 if ((rcv->sb_state & SBS_CANTRCVMORE) ||
465 (toep->tp_tp->t_flags & TF_TOE) == 0)
468 ubuf_idx = p->kbuf_idx;
469 p->buf_state[ubuf_idx].flags = DDP_BF_NOFLIP;
470 /* Use existing offset */
471 /* Don't need to update .gl, user buffer isn't copied. */
472 p->cur_buf = ubuf_idx;
474 flags = select_ddp_flags(toep, ubuf_idx, nonblock, rcv_flags);
477 struct ddp_buf_state *dbs = &p->buf_state[ubuf_idx ^ 1];
481 dbs->gl = p->kbuf[ubuf_idx ^ 1];
483 flags |= p->kbuf_idx ?
484 V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_PUSH_DISABLE_1(0) :
485 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_PUSH_DISABLE_0(0);
489 t3_overlay_ddpbuf(toep, 0, p->ubuf_tag << 6, p->kbuf_tag[1] << 6,
491 t3_setup_ddpbufs(toep, 0, 0, p->kbuf[1]->dgl_length, 0,
493 OVERLAY_MASK | flags, 1);
495 t3_overlay_ddpbuf(toep, 1, p->kbuf_tag[0] << 6, p->ubuf_tag << 6,
497 t3_setup_ddpbufs(toep, p->kbuf[0]->dgl_length, 0, 0, 0,
499 OVERLAY_MASK | flags, 1);
503 "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x ubuf_idx %d "
505 p->ubuf_tag, flags, OVERLAY_MASK, ubuf_idx, p->kbuf_idx);
508 "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x",
509 p->ubuf_tag, flags, OVERLAY_MASK);
511 "t3_overlay_ubuf: ubuf_idx %d kbuf_idx %d post_kbuf %d",
512 ubuf_idx, p->kbuf_idx, post_kbuf);
518 * Clean up DDP state that needs to survive until socket close time, such as the
519 * DDP buffers. The buffers are already unmapped at this point as unmapping
520 * needs the PCI device and a socket may close long after the device is removed.
523 t3_cleanup_ddp(struct toepcb *toep)
525 struct ddp_state *p = &toep->tp_ddp_state;
528 for (idx = 0; idx < NUM_DDP_KBUF; idx++)
530 ddp_gl_free_pages(p->kbuf[idx], 0);
531 free(p->kbuf[idx], M_DEVBUF);
534 ddp_gl_free_pages(p->ubuf, 0);
535 free(p->ubuf, M_DEVBUF);
538 toep->tp_ulp_mode = 0;
542 * This is a companion to t3_cleanup_ddp() and releases the HW resources
543 * associated with a connection's DDP state, such as the page pods.
544 * It's called when HW is done with a connection. The rest of the state
545 * remains available until both HW and the app are done with the connection.
548 t3_release_ddp_resources(struct toepcb *toep)
550 struct ddp_state *p = &toep->tp_ddp_state;
551 struct tom_data *d = TOM_DATA(toep->tp_toedev);
554 for (idx = 0; idx < NUM_DDP_KBUF; idx++) {
555 t3_free_ppods(d, p->kbuf_tag[idx],
556 p->kbuf_nppods[idx]);
557 unmap_ddp_gl(p->kbuf[idx]);
560 if (p->ubuf_nppods) {
561 t3_free_ppods(d, p->ubuf_tag, p->ubuf_nppods);
565 unmap_ddp_gl(p->ubuf);
570 t3_post_kbuf(struct toepcb *toep, int modulate, int nonblock)
572 struct ddp_state *p = &toep->tp_ddp_state;
574 t3_set_ddp_tag(toep, p->cur_buf, p->kbuf_tag[p->cur_buf] << 6);
575 t3_set_ddp_buf(toep, p->cur_buf, 0, p->kbuf[p->cur_buf]->dgl_length);
576 t3_repost_kbuf(toep, p->cur_buf, modulate, 1, nonblock);
579 "t3_post_kbuf: cur_buf = kbuf_idx = %u ", p->cur_buf);
582 "t3_post_kbuf: cur_buf = kbuf_idx = %u ", p->cur_buf);
586 * Prepare a socket for DDP. Must be called when the socket is known to be
590 t3_enter_ddp(struct toepcb *toep, unsigned int kbuf_size, unsigned int waitall, int nonblock)
593 static vm_pindex_t color;
594 unsigned int nppods, kbuf_pages, idx = 0;
595 struct ddp_state *p = &toep->tp_ddp_state;
596 struct tom_data *d = TOM_DATA(toep->tp_toedev);
599 if (kbuf_size > M_TCB_RX_DDP_BUF0_LEN)
603 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
605 kbuf_pages = (kbuf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
606 nppods = pages2ppods(kbuf_pages);
608 p->kbuf_noinval = !!waitall;
609 p->kbuf_tag[NUM_DDP_KBUF - 1] = -1;
610 for (idx = 0; idx < NUM_DDP_KBUF; idx++) {
612 malloc(sizeof (struct ddp_gather_list) + kbuf_pages *
613 sizeof(vm_page_t *), M_DEVBUF, M_NOWAIT|M_ZERO);
614 if (p->kbuf[idx] == NULL)
616 err = t3_alloc_ppods(d, nppods, &p->kbuf_tag[idx]);
618 printf("t3_alloc_ppods failed err=%d\n", err);
622 p->kbuf_nppods[idx] = nppods;
623 p->kbuf[idx]->dgl_length = kbuf_size;
624 p->kbuf[idx]->dgl_offset = 0;
625 p->kbuf[idx]->dgl_nelem = kbuf_pages;
627 for (i = 0; i < kbuf_pages; ++i) {
628 p->kbuf[idx]->dgl_pages[i] = vm_page_alloc(NULL, color,
629 VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED |
631 if (p->kbuf[idx]->dgl_pages[i] == NULL) {
632 p->kbuf[idx]->dgl_nelem = i;
633 printf("failed to allocate kbuf pages\n");
639 * XXX we'll need this for VT-d or any platform with an iommu :-/
642 for (i = 0; i < kbuf_pages; ++i)
643 p->kbuf[idx]->phys_addr[i] =
644 pci_map_page(p->pdev, p->kbuf[idx]->pages[i],
645 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
647 t3_setup_ppods(toep, p->kbuf[idx], nppods, p->kbuf_tag[idx],
648 p->kbuf[idx]->dgl_length, 0, 0);
650 cxgb_log_tcb(TOEP_T3C_DEV(toep)->adapter, toep->tp_tid);
652 t3_set_ddp_tag(toep, 0, p->kbuf_tag[0] << 6);
653 t3_set_ddp_buf(toep, 0, 0, p->kbuf[0]->dgl_length);
654 t3_repost_kbuf(toep, 0, 0, 1, nonblock);
656 t3_set_rcv_coalesce_enable(toep,
657 TOM_TUNABLE(toep->tp_toedev, ddp_rcvcoalesce));
658 t3_set_dack_mss(toep, TOM_TUNABLE(toep->tp_toedev, delack)>>1);
662 "t3_enter_ddp: kbuf_size %u waitall %u tag0 %d tag1 %d",
663 kbuf_size, waitall, p->kbuf_tag[0], p->kbuf_tag[1]);
666 "t3_enter_ddp: kbuf_size %u waitall %u tag0 %d tag1 %d",
667 kbuf_size, waitall, p->kbuf_tag[0], p->kbuf_tag[1]);
668 cxgb_log_tcb(TOEP_T3C_DEV(toep)->adapter, toep->tp_tid);
672 t3_release_ddp_resources(toep);
673 t3_cleanup_ddp(toep);
678 t3_ddp_copy(const struct mbuf *m, int offset, struct uio *uio, int len)
681 struct ddp_gather_list *gl = (struct ddp_gather_list *)m->m_ddp_gl;
683 resid_init = uio->uio_resid;
686 panic("pages not set\n");
688 CTR4(KTR_TOM, "t3_ddp_copy: offset=%d dgl_offset=%d cur_offset=%d len=%d",
689 offset, gl->dgl_offset, m->m_cur_offset, len);
690 offset += gl->dgl_offset + m->m_cur_offset;
691 KASSERT(len <= gl->dgl_length,
692 ("len=%d > dgl_length=%d in ddp_copy\n", len, gl->dgl_length));
695 err = uiomove_fromphys(gl->dgl_pages, offset, len, uio);
701 * Allocate n page pods. Returns -1 on failure or the page pod tag.
704 t3_alloc_ppods(struct tom_data *td, unsigned int n, int *ptag)
708 if (__predict_false(!td->ppod_map)) {
709 printf("ppod_map not set\n");
713 mtx_lock(&td->ppod_map_lock);
714 for (i = 0; i < td->nppods; ) {
716 for (j = 0; j < n; ++j) /* scan ppod_map[i..i+n-1] */
717 if (td->ppod_map[i + j]) {
721 memset(&td->ppod_map[i], 1, n); /* allocate range */
722 mtx_unlock(&td->ppod_map_lock);
724 "t3_alloc_ppods: n=%u tag=%u", n, i);
729 mtx_unlock(&td->ppod_map_lock);
734 t3_free_ppods(struct tom_data *td, unsigned int tag, unsigned int n)
736 /* No need to take ppod_lock here */
737 memset(&td->ppod_map[tag], 0, n);