2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2012 EMC Corp.
7 * Copyright (c) 1997, 1998 Justin T. Gibbs.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
34 #include "opt_iommu.h"
36 #include <sys/param.h>
38 #include <sys/systm.h>
41 #include <sys/callout.h>
44 #include <sys/memdesc.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_map.h>
54 #include <cam/cam_ccb.h>
56 #include <opencrypto/cryptodev.h>
58 #include <machine/bus.h>
61 * Load up data starting at offset within a region specified by a
62 * list of virtual address ranges until either length or the region
66 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map,
67 bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs,
68 int flags, size_t offset, size_t length)
73 for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) {
77 KASSERT((offset < list->ds_len),
78 ("Invalid mid-segment offset"));
79 addr = (char *)(uintptr_t)list->ds_addr + offset;
80 ds_len = list->ds_len - offset;
85 KASSERT((ds_len != 0), ("Segment length is zero"));
86 error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap,
95 * Load a list of physical addresses.
98 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map,
99 bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags)
104 for (; sglist_cnt > 0; sglist_cnt--, list++) {
105 error = _bus_dmamap_load_phys(dmat, map,
106 (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL,
115 * Load an unmapped mbuf
118 _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map,
119 struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags)
121 int error, i, off, len, pglen, pgoff, seglen, segoff;
128 /* Skip over any data removed from the front. */
129 off = mtod(m, vm_offset_t);
131 if (m->m_epg_hdrlen != 0) {
132 if (off >= m->m_epg_hdrlen) {
133 off -= m->m_epg_hdrlen;
135 seglen = m->m_epg_hdrlen - off;
137 seglen = min(seglen, len);
140 error = _bus_dmamap_load_buffer(dmat, map,
141 &m->m_epg_hdr[segoff], seglen, kernel_pmap,
145 pgoff = m->m_epg_1st_off;
146 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
147 pglen = m_epg_pagelen(m, i, pgoff);
153 seglen = pglen - off;
154 segoff = pgoff + off;
156 seglen = min(seglen, len);
158 error = _bus_dmamap_load_phys(dmat, map,
159 m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs);
162 if (len != 0 && error == 0) {
163 KASSERT((off + len) <= m->m_epg_trllen,
164 ("off + len > trail (%d + %d > %d)", off, len,
166 error = _bus_dmamap_load_buffer(dmat, map,
167 &m->m_epg_trail[off], len, kernel_pmap, flags, segs,
174 * Load a single mbuf.
177 _bus_dmamap_load_single_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
178 struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags)
183 if ((m->m_flags & M_EXTPG) != 0)
184 error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs,
187 error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len,
188 kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs);
189 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
190 __func__, dmat, flags, error, *nsegs);
195 * Load an mbuf chain.
198 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
199 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
205 for (m = m0; m != NULL && error == 0; m = m->m_next) {
207 if ((m->m_flags & M_EXTPG) != 0)
208 error = _bus_dmamap_load_mbuf_epg(dmat,
209 map, m, segs, nsegs, flags);
211 error = _bus_dmamap_load_buffer(dmat, map,
212 m->m_data, m->m_len, kernel_pmap,
213 flags | BUS_DMA_LOAD_MBUF, segs, nsegs);
216 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
217 __func__, dmat, flags, error, *nsegs);
222 * Load from block io.
225 _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
226 int *nsegs, int flags)
229 if ((bio->bio_flags & BIO_VLIST) != 0) {
230 bus_dma_segment_t *segs = (bus_dma_segment_t *)bio->bio_data;
231 return (_bus_dmamap_load_vlist(dmat, map, segs, bio->bio_ma_n,
232 kernel_pmap, nsegs, flags, bio->bio_ma_offset,
236 if ((bio->bio_flags & BIO_UNMAPPED) != 0)
237 return (_bus_dmamap_load_ma(dmat, map, bio->bio_ma,
238 bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs));
240 return (_bus_dmamap_load_buffer(dmat, map, bio->bio_data,
241 bio->bio_bcount, kernel_pmap, flags, NULL, nsegs));
245 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map,
246 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
247 bus_dma_segment_t *segs, int *segp)
254 for (i = 0; tlen > 0; i++, tlen -= len) {
255 len = min(PAGE_SIZE - ma_offs, tlen);
256 paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs;
257 error = _bus_dmamap_load_phys(dmat, map, paddr, len,
267 * Load a cam control block.
270 _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
271 int *nsegs, int flags)
273 struct ccb_hdr *ccb_h;
281 switch (ccb_h->func_code) {
283 struct ccb_scsiio *csio;
286 data_ptr = csio->data_ptr;
287 dxfer_len = csio->dxfer_len;
288 sglist_cnt = csio->sglist_cnt;
291 case XPT_CONT_TARGET_IO: {
292 struct ccb_scsiio *ctio;
295 data_ptr = ctio->data_ptr;
296 dxfer_len = ctio->dxfer_len;
297 sglist_cnt = ctio->sglist_cnt;
301 struct ccb_ataio *ataio;
304 data_ptr = ataio->data_ptr;
305 dxfer_len = ataio->dxfer_len;
310 case XPT_NVME_ADMIN: {
311 struct ccb_nvmeio *nvmeio;
313 nvmeio = &ccb->nvmeio;
314 data_ptr = nvmeio->data_ptr;
315 dxfer_len = nvmeio->dxfer_len;
316 sglist_cnt = nvmeio->sglist_cnt;
320 panic("_bus_dmamap_load_ccb: Unsupported func code %d",
324 switch ((ccb_h->flags & CAM_DATA_MASK)) {
326 error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len,
327 kernel_pmap, flags, NULL, nsegs);
330 error = _bus_dmamap_load_phys(dmat, map,
331 (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL,
335 error = _bus_dmamap_load_vlist(dmat, map,
336 (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap,
337 nsegs, flags, 0, dxfer_len);
339 case CAM_DATA_SG_PADDR:
340 error = _bus_dmamap_load_plist(dmat, map,
341 (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags);
344 error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr,
348 panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented",
358 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
359 int *nsegs, int flags)
368 if (uio->uio_segflg == UIO_USERSPACE) {
369 KASSERT(uio->uio_td != NULL,
370 ("bus_dmamap_load_uio: USERSPACE but no proc"));
371 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
374 resid = uio->uio_resid;
378 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
380 * Now at the first iovec to load. Load each iovec
381 * until we have exhausted the residual count.
384 addr = (caddr_t) iov[i].iov_base;
385 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
387 error = _bus_dmamap_load_buffer(dmat, map, addr,
388 minlen, pmap, flags, NULL, nsegs);
397 * Map the buffer buf into bus space using the dmamap map.
400 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
401 bus_size_t buflen, bus_dmamap_callback_t *callback,
402 void *callback_arg, int flags)
404 bus_dma_segment_t *segs;
409 if ((flags & BUS_DMA_NOWAIT) == 0) {
410 mem = memdesc_vaddr(buf, buflen);
411 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
415 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap,
416 flags, NULL, &nsegs);
419 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
420 __func__, dmat, flags, error, nsegs);
422 if (error == EINPROGRESS)
425 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
427 (*callback)(callback_arg, segs, 0, error);
429 (*callback)(callback_arg, segs, nsegs, 0);
432 * Return ENOMEM to the caller so that it can pass it up the stack.
433 * This error only happens when NOWAIT is set, so deferral is disabled.
442 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
443 bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
445 bus_dma_segment_t *segs;
450 flags |= BUS_DMA_NOWAIT;
452 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags);
455 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
457 (*callback)(callback_arg, segs, 0, 0, error);
459 (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error);
461 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
462 __func__, dmat, flags, error, nsegs);
467 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
468 bus_dma_segment_t *segs, int *nsegs, int flags)
472 flags |= BUS_DMA_NOWAIT;
474 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags);
476 _bus_dmamap_complete(dmat, map, segs, *nsegs, error);
481 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
482 bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
484 bus_dma_segment_t *segs;
487 flags |= BUS_DMA_NOWAIT;
489 error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags);
492 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
494 (*callback)(callback_arg, segs, 0, 0, error);
496 (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error);
498 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
499 __func__, dmat, flags, error, nsegs);
504 bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
505 bus_dmamap_callback_t *callback, void *callback_arg,
508 bus_dma_segment_t *segs;
509 struct ccb_hdr *ccb_h;
515 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
516 callback(callback_arg, NULL, 0, 0);
519 if ((flags & BUS_DMA_NOWAIT) == 0) {
520 mem = memdesc_ccb(ccb);
521 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
524 error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags);
527 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
528 __func__, dmat, flags, error, nsegs);
530 if (error == EINPROGRESS)
533 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
535 (*callback)(callback_arg, segs, 0, error);
537 (*callback)(callback_arg, segs, nsegs, error);
539 * Return ENOMEM to the caller so that it can pass it up the stack.
540 * This error only happens when NOWAIT is set, so deferral is disabled.
549 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
550 bus_dmamap_callback_t *callback, void *callback_arg,
553 bus_dma_segment_t *segs;
558 if ((flags & BUS_DMA_NOWAIT) == 0) {
559 mem = memdesc_bio(bio);
560 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
563 error = _bus_dmamap_load_bio(dmat, map, bio, &nsegs, flags);
566 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
567 __func__, dmat, flags, error, nsegs);
569 if (error == EINPROGRESS)
572 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
574 (*callback)(callback_arg, segs, 0, error);
576 (*callback)(callback_arg, segs, nsegs, error);
578 * Return ENOMEM to the caller so that it can pass it up the stack.
579 * This error only happens when NOWAIT is set, so deferral is disabled.
588 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
589 struct memdesc *mem, bus_dmamap_callback_t *callback,
590 void *callback_arg, int flags)
592 bus_dma_segment_t *segs;
596 if ((flags & BUS_DMA_NOWAIT) == 0)
597 _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg);
601 switch (mem->md_type) {
603 error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr,
604 mem->md_opaque, kernel_pmap, flags, NULL, &nsegs);
607 error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr,
608 mem->md_opaque, flags, NULL, &nsegs);
611 error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list,
612 mem->md_opaque, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX);
615 error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list,
616 mem->md_opaque, &nsegs, flags);
619 error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio,
623 error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio,
627 error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf,
628 NULL, &nsegs, flags);
631 error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs,
637 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
638 __func__, dmat, flags, error, nsegs);
640 if (error == EINPROGRESS)
643 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
645 (*callback)(callback_arg, segs, 0, error);
647 (*callback)(callback_arg, segs, nsegs, 0);
650 * Return ENOMEM to the caller so that it can pass it up the stack.
651 * This error only happens when NOWAIT is set, so deferral is disabled.
660 bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map,
661 struct crypto_buffer *cb, bus_dmamap_callback_t *callback,
662 void *callback_arg, int flags)
664 bus_dma_segment_t *segs;
668 flags |= BUS_DMA_NOWAIT;
671 switch (cb->cb_type) {
672 case CRYPTO_BUF_CONTIG:
673 error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf,
674 cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs);
676 case CRYPTO_BUF_MBUF:
677 error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf,
678 NULL, &nsegs, flags);
680 case CRYPTO_BUF_SINGLE_MBUF:
681 error = _bus_dmamap_load_single_mbuf(dmat, map, cb->cb_mbuf,
682 NULL, &nsegs, flags);
685 error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs,
688 case CRYPTO_BUF_VMPAGE:
689 error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page,
690 cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL,
698 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
699 __func__, dmat, flags, error, nsegs);
701 if (error == EINPROGRESS)
704 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
706 (*callback)(callback_arg, segs, 0, error);
708 (*callback)(callback_arg, segs, nsegs, 0);
711 * Return ENOMEM to the caller so that it can pass it up the stack.
712 * This error only happens when NOWAIT is set, so deferral is disabled.
721 bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp,
722 bus_dmamap_callback_t *callback, void *callback_arg, int flags)
724 return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback,
725 callback_arg, flags));
729 bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent)
738 t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR;
739 t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE;
740 t->nsegments = BUS_SPACE_UNRESTRICTED;
742 t->lockfuncarg = NULL;
747 bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat)
750 if (t == NULL || dmat == NULL)
753 return (bus_dma_tag_create(t->parent, t->alignment, t->boundary,
754 t->lowaddr, t->highaddr, NULL, NULL, t->maxsize,
755 t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg,
760 bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count)
762 bus_dma_param_t *pkv;
767 case BD_PARAM_PARENT:
768 t->parent = pkv->ptr;
770 case BD_PARAM_ALIGNMENT:
771 t->alignment = pkv->num;
773 case BD_PARAM_BOUNDARY:
774 t->boundary = pkv->num;
776 case BD_PARAM_LOWADDR:
777 t->lowaddr = pkv->pa;
779 case BD_PARAM_HIGHADDR:
780 t->highaddr = pkv->pa;
782 case BD_PARAM_MAXSIZE:
783 t->maxsize = pkv->num;
785 case BD_PARAM_NSEGMENTS:
786 t->nsegments = pkv->num;
788 case BD_PARAM_MAXSEGSIZE:
789 t->maxsegsize = pkv->num;
794 case BD_PARAM_LOCKFUNC:
795 t->lockfunc = pkv->ptr;
797 case BD_PARAM_LOCKFUNCARG:
798 t->lockfuncarg = pkv->ptr;
803 case BD_PARAM_INVALID:
805 KASSERT(0, ("Invalid key %d\n", pkv->key));
813 bool bus_dma_iommu_set_buswide(device_t dev);
814 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
815 vm_paddr_t start, vm_size_t length, int flags);
818 bus_dma_iommu_set_buswide(device_t dev)
824 bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
825 vm_paddr_t start, vm_size_t length, int flags)