2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2023 Chelsio Communications, Inc.
5 * Written by: John Baldwin <jhb@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
31 #include <sys/memdesc.h>
32 #include <sys/systm.h>
36 #include <vm/vm_page.h>
37 #include <vm/vm_param.h>
38 #include <machine/bus.h>
41 * memdesc_copyback copies data from a source buffer into a buffer
42 * described by a memory descriptor.
45 phys_copyback(vm_paddr_t pa, int off, int size, const void *src)
52 KASSERT(PMAP_HAS_DMAP, ("direct-map required"));
56 page_off = pa & PAGE_MASK;
58 todo = min(PAGE_SIZE - page_off, size);
59 p = (void *)PHYS_TO_DMAP(pa);
69 vlist_copyback(struct bus_dma_segment *vlist, int sglist_cnt, int off,
70 int size, const void *src)
75 while (vlist->ds_len <= off) {
76 KASSERT(sglist_cnt > 1, ("out of sglist entries"));
85 KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
88 if (todo > vlist->ds_len - off)
89 todo = vlist->ds_len - off;
91 memcpy((char *)(uintptr_t)vlist->ds_addr + off, p, todo);
101 plist_copyback(struct bus_dma_segment *plist, int sglist_cnt, int off,
102 int size, const void *src)
107 while (plist->ds_len <= off) {
108 KASSERT(sglist_cnt > 1, ("out of sglist entries"));
110 off -= plist->ds_len;
117 KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
120 if (todo > plist->ds_len - off)
121 todo = plist->ds_len - off;
123 phys_copyback(plist->ds_addr, off, todo, p);
133 vmpages_copyback(vm_page_t *m, int off, int size, const void *src)
137 int error __diagused;
139 iov[0].iov_base = __DECONST(void *, src);
140 iov[0].iov_len = size;
144 uio.uio_resid = size;
145 uio.uio_segflg = UIO_SYSSPACE;
146 uio.uio_rw = UIO_WRITE;
147 error = uiomove_fromphys(m, off, size, &uio);
148 KASSERT(error == 0 && uio.uio_resid == 0, ("copy failed"));
152 memdesc_copyback(struct memdesc *mem, int off, int size, const void *src)
154 KASSERT(off >= 0, ("%s: invalid offset %d", __func__, off));
155 KASSERT(size >= 0, ("%s: invalid size %d", __func__, off));
157 switch (mem->md_type) {
159 KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
160 memcpy((char *)mem->u.md_vaddr + off, src, size);
163 KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
164 phys_copyback(mem->u.md_paddr, off, size, src);
167 vlist_copyback(mem->u.md_list, mem->md_nseg, off, size, src);
170 plist_copyback(mem->u.md_list, mem->md_nseg, off, size, src);
173 panic("Use uiomove instead");
176 m_copyback(mem->u.md_mbuf, off, size, src);
178 case MEMDESC_VMPAGES:
179 KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
180 vmpages_copyback(mem->u.md_ma, mem->md_offset + off, size,
184 __assert_unreachable();
189 * memdesc_copydata copies data from a buffer described by a memory
190 * descriptor into a destination buffer.
193 phys_copydata(vm_paddr_t pa, int off, int size, void *dst)
200 KASSERT(PMAP_HAS_DMAP, ("direct-map required"));
204 page_off = pa & PAGE_MASK;
206 todo = min(PAGE_SIZE - page_off, size);
207 p = (const void *)PHYS_TO_DMAP(pa);
217 vlist_copydata(struct bus_dma_segment *vlist, int sglist_cnt, int off,
223 while (vlist->ds_len <= off) {
224 KASSERT(sglist_cnt > 1, ("out of sglist entries"));
226 off -= vlist->ds_len;
233 KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
236 if (todo > vlist->ds_len - off)
237 todo = vlist->ds_len - off;
239 memcpy(p, (char *)(uintptr_t)vlist->ds_addr + off, todo);
249 plist_copydata(struct bus_dma_segment *plist, int sglist_cnt, int off,
255 while (plist->ds_len <= off) {
256 KASSERT(sglist_cnt > 1, ("out of sglist entries"));
258 off -= plist->ds_len;
265 KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
268 if (todo > plist->ds_len - off)
269 todo = plist->ds_len - off;
271 phys_copydata(plist->ds_addr, off, todo, p);
281 vmpages_copydata(vm_page_t *m, int off, int size, void *dst)
285 int error __diagused;
287 iov[0].iov_base = dst;
288 iov[0].iov_len = size;
292 uio.uio_resid = size;
293 uio.uio_segflg = UIO_SYSSPACE;
294 uio.uio_rw = UIO_READ;
295 error = uiomove_fromphys(m, off, size, &uio);
296 KASSERT(error == 0 && uio.uio_resid == 0, ("copy failed"));
300 memdesc_copydata(struct memdesc *mem, int off, int size, void *dst)
302 KASSERT(off >= 0, ("%s: invalid offset %d", __func__, off));
303 KASSERT(size >= 0, ("%s: invalid size %d", __func__, off));
305 switch (mem->md_type) {
307 KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
308 memcpy(dst, (const char *)mem->u.md_vaddr + off, size);
311 KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
312 phys_copydata(mem->u.md_paddr, off, size, dst);
315 vlist_copydata(mem->u.md_list, mem->md_nseg, off, size, dst);
318 plist_copydata(mem->u.md_list, mem->md_nseg, off, size, dst);
321 panic("Use uiomove instead");
324 m_copydata(mem->u.md_mbuf, off, size, dst);
326 case MEMDESC_VMPAGES:
327 KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
328 vmpages_copydata(mem->u.md_ma, mem->md_offset + off, size,
332 __assert_unreachable();
337 * memdesc_alloc_ext_mbufs allocates a chain of external mbufs backed
338 * by the storage of a memory descriptor's data buffer.
341 vaddr_ext_mbuf(memdesc_alloc_ext_mbuf_t *ext_alloc, void *cb_arg, int how,
342 void *buf, size_t len, size_t *actual_len)
345 return (ext_alloc(cb_arg, how, buf, len));
349 can_append_paddr(struct mbuf *m, vm_paddr_t pa)
353 /* Can always append to an empty mbuf. */
354 if (m->m_epg_npgs == 0)
357 /* Can't append to a full mbuf. */
358 if (m->m_epg_npgs == MBUF_PEXT_MAX_PGS)
361 /* Can't append a non-page-aligned address to a non-empty mbuf. */
362 if ((pa & PAGE_MASK) != 0)
365 /* Can't append if the last page is not a full page. */
366 last_len = m->m_epg_last_len;
367 if (m->m_epg_npgs == 1)
368 last_len += m->m_epg_1st_off;
369 return (last_len == PAGE_SIZE);
373 * Returns amount of data added to an M_EXTPG mbuf.
376 append_paddr_range(struct mbuf *m, vm_paddr_t pa, size_t len)
382 /* Append the first page. */
383 if (m->m_epg_npgs == 0) {
384 m->m_epg_pa[0] = trunc_page(pa);
386 m->m_epg_1st_off = pa & PAGE_MASK;
387 m->m_epg_last_len = PAGE_SIZE - m->m_epg_1st_off;
388 if (m->m_epg_last_len > len)
389 m->m_epg_last_len = len;
390 m->m_len = m->m_epg_last_len;
391 len -= m->m_epg_last_len;
392 pa += m->m_epg_last_len;
393 appended += m->m_epg_last_len;
395 KASSERT(len == 0 || (pa & PAGE_MASK) == 0,
396 ("PA not aligned before full pages"));
399 while (len >= PAGE_SIZE && m->m_epg_npgs < MBUF_PEXT_MAX_PGS) {
400 m->m_epg_pa[m->m_epg_npgs] = pa;
402 m->m_epg_last_len = PAGE_SIZE;
403 m->m_len += PAGE_SIZE;
406 appended += PAGE_SIZE;
409 /* Final partial page. */
410 if (len > 0 && m->m_epg_npgs < MBUF_PEXT_MAX_PGS) {
411 KASSERT(len < PAGE_SIZE, ("final page is full page"));
412 m->m_epg_pa[m->m_epg_npgs] = pa;
414 m->m_epg_last_len = len;
423 paddr_ext_mbuf(memdesc_alloc_extpg_mbuf_t *extpg_alloc, void *cb_arg, int how,
424 vm_paddr_t pa, size_t len, size_t *actual_len, bool can_truncate)
426 struct mbuf *m, *tail;
433 * Trim any partial page at the end, but not if it's
436 end = trunc_page(pa + len);
442 m = tail = extpg_alloc(cb_arg, how);
446 if (!can_append_paddr(tail, pa)) {
447 MBUF_EXT_PGS_ASSERT_SANITY(tail);
448 tail->m_next = extpg_alloc(cb_arg, how);
449 if (tail->m_next == NULL)
454 appended = append_paddr_range(tail, pa, len);
455 KASSERT(appended > 0, ("did not append anything"));
456 KASSERT(appended <= len, ("appended too much"));
462 MBUF_EXT_PGS_ASSERT_SANITY(tail);
470 vlist_ext_mbuf(memdesc_alloc_ext_mbuf_t *ext_alloc, void *cb_arg, int how,
471 struct bus_dma_segment *vlist, u_int sglist_cnt, size_t offset,
472 size_t len, size_t *actual_len)
474 struct mbuf *m, *n, *tail;
479 while (vlist->ds_len <= offset) {
480 KASSERT(sglist_cnt > 1, ("out of sglist entries"));
482 offset -= vlist->ds_len;
489 KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
492 if (todo > vlist->ds_len - offset)
493 todo = vlist->ds_len - offset;
495 n = ext_alloc(cb_arg, how, (char *)(uintptr_t)vlist->ds_addr +
521 plist_ext_mbuf(memdesc_alloc_extpg_mbuf_t *extpg_alloc, void *cb_arg, int how,
522 struct bus_dma_segment *plist, u_int sglist_cnt, size_t offset, size_t len,
523 size_t *actual_len, bool can_truncate)
526 struct mbuf *m, *tail;
527 size_t appended, totlen, todo;
529 while (plist->ds_len <= offset) {
530 KASSERT(sglist_cnt > 1, ("out of sglist entries"));
532 offset -= plist->ds_len;
538 m = tail = extpg_alloc(cb_arg, how);
542 KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
544 pa = plist->ds_addr + offset;
546 if (todo > plist->ds_len - offset)
547 todo = plist->ds_len - offset;
550 * If truncation is enabled, avoid sending a final
551 * partial page, but only if there is more data
552 * available in the current segment. Also, at least
553 * some data must be sent, so only drop the final page
554 * for this segment if the segment spans multiple
555 * pages or some other data is already queued.
557 else if (can_truncate) {
560 end = trunc_page(pa + len);
561 if (end <= pa && totlen != 0) {
563 * This last segment is only a partial
577 if (!can_append_paddr(tail, pa)) {
578 MBUF_EXT_PGS_ASSERT_SANITY(tail);
579 tail->m_next = extpg_alloc(cb_arg, how);
580 if (tail->m_next == NULL)
585 appended = append_paddr_range(tail, pa, todo);
586 KASSERT(appended > 0, ("did not append anything"));
593 MBUF_EXT_PGS_ASSERT_SANITY(tail);
594 *actual_len = totlen;
602 vmpages_ext_mbuf(memdesc_alloc_extpg_mbuf_t *extpg_alloc, void *cb_arg, int how,
603 vm_page_t *ma, size_t offset, size_t len, size_t *actual_len,
606 struct mbuf *m, *tail;
608 while (offset >= PAGE_SIZE) {
617 * Trim any partial page at the end, but not if it's
620 end = trunc_page(offset + len);
626 m = tail = extpg_alloc(cb_arg, how);
631 m->m_epg_pa[0] = VM_PAGE_TO_PHYS(*ma);
634 m->m_epg_1st_off = offset;
635 m->m_epg_last_len = PAGE_SIZE - offset;
636 if (m->m_epg_last_len > len)
637 m->m_epg_last_len = len;
638 m->m_len = m->m_epg_last_len;
639 len -= m->m_epg_last_len;
642 while (len >= PAGE_SIZE) {
643 if (tail->m_epg_npgs == MBUF_PEXT_MAX_PGS) {
644 MBUF_EXT_PGS_ASSERT_SANITY(tail);
645 tail->m_next = extpg_alloc(cb_arg, how);
646 if (tail->m_next == NULL)
651 tail->m_epg_pa[tail->m_epg_npgs] = VM_PAGE_TO_PHYS(*ma);
654 tail->m_epg_last_len = PAGE_SIZE;
655 tail->m_len += PAGE_SIZE;
659 /* Last partial page. */
661 if (tail->m_epg_npgs == MBUF_PEXT_MAX_PGS) {
662 MBUF_EXT_PGS_ASSERT_SANITY(tail);
663 tail->m_next = extpg_alloc(cb_arg, how);
664 if (tail->m_next == NULL)
669 tail->m_epg_pa[tail->m_epg_npgs] = VM_PAGE_TO_PHYS(*ma);
672 tail->m_epg_last_len = len;
676 MBUF_EXT_PGS_ASSERT_SANITY(tail);
684 * Somewhat similar to m_copym but optionally avoids a partial mbuf at
688 mbuf_subchain(struct mbuf *m0, size_t offset, size_t len,
689 size_t *actual_len, bool can_truncate, int how)
691 struct mbuf *m, *tail;
694 while (offset >= m0->m_len) {
699 /* Always return at least one mbuf. */
700 totlen = m0->m_len - offset;
704 m = m_get(how, MT_DATA);
708 if (m0->m_flags & (M_EXT | M_EXTPG)) {
709 m->m_data = m0->m_data + offset;
712 memcpy(mtod(m, void *), mtodo(m0, offset), m->m_len);
719 * If truncation is enabled, don't send any partial
720 * mbufs besides the first one.
722 if (can_truncate && m0->m_len > len)
725 tail->m_next = m_get(how, MT_DATA);
726 if (tail->m_next == NULL)
729 tail->m_len = m0->m_len;
730 if (m0->m_flags & (M_EXT | M_EXTPG)) {
731 tail->m_data = m0->m_data;
734 memcpy(mtod(tail, void *), mtod(m0, void *),
737 totlen += tail->m_len;
741 *actual_len = totlen;
749 memdesc_alloc_ext_mbufs(struct memdesc *mem,
750 memdesc_alloc_ext_mbuf_t *ext_alloc,
751 memdesc_alloc_extpg_mbuf_t *extpg_alloc, void *cb_arg, int how,
752 size_t offset, size_t len, size_t *actual_len, bool can_truncate)
757 switch (mem->md_type) {
759 m = vaddr_ext_mbuf(ext_alloc, cb_arg, how,
760 (char *)mem->u.md_vaddr + offset, len, &done);
763 m = paddr_ext_mbuf(extpg_alloc, cb_arg, how, mem->u.md_paddr +
764 offset, len, &done, can_truncate);
767 m = vlist_ext_mbuf(ext_alloc, cb_arg, how, mem->u.md_list,
768 mem->md_nseg, offset, len, &done);
771 m = plist_ext_mbuf(extpg_alloc, cb_arg, how, mem->u.md_list,
772 mem->md_nseg, offset, len, &done, can_truncate);
775 panic("uio not supported");
777 m = mbuf_subchain(mem->u.md_mbuf, offset, len, &done,
780 case MEMDESC_VMPAGES:
781 m = vmpages_ext_mbuf(extpg_alloc, cb_arg, how, mem->u.md_ma,
782 mem->md_offset + offset, len, &done, can_truncate);
785 __assert_unreachable();
791 KASSERT(done <= len, ("chain too long"));
793 KASSERT(done == len, ("short chain with no limit"));
795 KASSERT(m_length(m, NULL) == done, ("length mismatch"));
796 if (actual_len != NULL)