2 * Copyright (c) 2012 EMC Corp.
5 * Copyright (c) 1997, 1998 Justin T. Gibbs.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
37 #include <sys/systm.h>
40 #include <sys/callout.h>
42 #include <sys/memdesc.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_map.h>
52 #include <cam/cam_ccb.h>
54 #include <machine/bus.h>
57 * Load up data starting at offset within a region specified by a
58 * list of virtual address ranges until either length or the region
62 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map,
63 bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs,
64 int flags, size_t offset, size_t length)
69 for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) {
73 KASSERT((offset < list->ds_len),
74 ("Invalid mid-segment offset"));
75 addr = (char *)(uintptr_t)list->ds_addr + offset;
76 ds_len = list->ds_len - offset;
81 KASSERT((ds_len != 0), ("Segment length is zero"));
82 error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap,
91 * Load a list of physical addresses.
94 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map,
95 bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags)
100 for (; sglist_cnt > 0; sglist_cnt--, list++) {
101 error = _bus_dmamap_load_phys(dmat, map,
102 (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL,
111 * Load an mbuf chain.
114 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
115 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
121 for (m = m0; m != NULL && error == 0; m = m->m_next) {
123 error = _bus_dmamap_load_buffer(dmat, map, m->m_data,
124 m->m_len, kernel_pmap, flags | BUS_DMA_LOAD_MBUF,
128 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
129 __func__, dmat, flags, error, *nsegs);
134 * Load from block io.
137 _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
138 int *nsegs, int flags)
141 if ((bio->bio_flags & BIO_VLIST) != 0) {
142 bus_dma_segment_t *segs = (bus_dma_segment_t *)bio->bio_data;
143 return (_bus_dmamap_load_vlist(dmat, map, segs, bio->bio_ma_n,
144 kernel_pmap, nsegs, flags, bio->bio_ma_offset,
148 if ((bio->bio_flags & BIO_UNMAPPED) != 0)
149 return (_bus_dmamap_load_ma(dmat, map, bio->bio_ma,
150 bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs));
152 return (_bus_dmamap_load_buffer(dmat, map, bio->bio_data,
153 bio->bio_bcount, kernel_pmap, flags, NULL, nsegs));
157 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map,
158 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
159 bus_dma_segment_t *segs, int *segp)
166 for (i = 0; tlen > 0; i++, tlen -= len) {
167 len = min(PAGE_SIZE - ma_offs, tlen);
168 paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs;
169 error = _bus_dmamap_load_phys(dmat, map, paddr, len,
179 * Load a cam control block.
182 _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
183 int *nsegs, int flags)
185 struct ccb_hdr *ccb_h;
193 switch (ccb_h->func_code) {
195 struct ccb_scsiio *csio;
198 data_ptr = csio->data_ptr;
199 dxfer_len = csio->dxfer_len;
200 sglist_cnt = csio->sglist_cnt;
203 case XPT_CONT_TARGET_IO: {
204 struct ccb_scsiio *ctio;
207 data_ptr = ctio->data_ptr;
208 dxfer_len = ctio->dxfer_len;
209 sglist_cnt = ctio->sglist_cnt;
213 struct ccb_ataio *ataio;
216 data_ptr = ataio->data_ptr;
217 dxfer_len = ataio->dxfer_len;
222 case XPT_NVME_ADMIN: {
223 struct ccb_nvmeio *nvmeio;
225 nvmeio = &ccb->nvmeio;
226 data_ptr = nvmeio->data_ptr;
227 dxfer_len = nvmeio->dxfer_len;
228 sglist_cnt = nvmeio->sglist_cnt;
232 panic("_bus_dmamap_load_ccb: Unsupported func code %d",
236 switch ((ccb_h->flags & CAM_DATA_MASK)) {
238 error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len,
239 kernel_pmap, flags, NULL, nsegs);
242 error = _bus_dmamap_load_phys(dmat, map,
243 (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL,
247 error = _bus_dmamap_load_vlist(dmat, map,
248 (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap,
249 nsegs, flags, 0, dxfer_len);
251 case CAM_DATA_SG_PADDR:
252 error = _bus_dmamap_load_plist(dmat, map,
253 (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags);
256 error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr,
260 panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented",
270 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
271 int *nsegs, int flags)
280 if (uio->uio_segflg == UIO_USERSPACE) {
281 KASSERT(uio->uio_td != NULL,
282 ("bus_dmamap_load_uio: USERSPACE but no proc"));
283 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
286 resid = uio->uio_resid;
290 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
292 * Now at the first iovec to load. Load each iovec
293 * until we have exhausted the residual count.
296 addr = (caddr_t) iov[i].iov_base;
297 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
299 error = _bus_dmamap_load_buffer(dmat, map, addr,
300 minlen, pmap, flags, NULL, nsegs);
309 * Map the buffer buf into bus space using the dmamap map.
312 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
313 bus_size_t buflen, bus_dmamap_callback_t *callback,
314 void *callback_arg, int flags)
316 bus_dma_segment_t *segs;
321 if ((flags & BUS_DMA_NOWAIT) == 0) {
322 mem = memdesc_vaddr(buf, buflen);
323 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
327 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap,
328 flags, NULL, &nsegs);
331 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
332 __func__, dmat, flags, error, nsegs);
334 if (error == EINPROGRESS)
337 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
339 (*callback)(callback_arg, segs, 0, error);
341 (*callback)(callback_arg, segs, nsegs, 0);
344 * Return ENOMEM to the caller so that it can pass it up the stack.
345 * This error only happens when NOWAIT is set, so deferral is disabled.
354 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
355 bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
357 bus_dma_segment_t *segs;
362 flags |= BUS_DMA_NOWAIT;
364 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags);
367 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
369 (*callback)(callback_arg, segs, 0, 0, error);
371 (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error);
373 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
374 __func__, dmat, flags, error, nsegs);
379 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
380 bus_dma_segment_t *segs, int *nsegs, int flags)
384 flags |= BUS_DMA_NOWAIT;
386 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags);
388 _bus_dmamap_complete(dmat, map, segs, *nsegs, error);
393 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
394 bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
396 bus_dma_segment_t *segs;
399 flags |= BUS_DMA_NOWAIT;
401 error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags);
404 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
406 (*callback)(callback_arg, segs, 0, 0, error);
408 (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error);
410 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
411 __func__, dmat, flags, error, nsegs);
416 bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
417 bus_dmamap_callback_t *callback, void *callback_arg,
420 bus_dma_segment_t *segs;
421 struct ccb_hdr *ccb_h;
427 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
428 callback(callback_arg, NULL, 0, 0);
431 if ((flags & BUS_DMA_NOWAIT) == 0) {
432 mem = memdesc_ccb(ccb);
433 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
436 error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags);
439 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
440 __func__, dmat, flags, error, nsegs);
442 if (error == EINPROGRESS)
445 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
447 (*callback)(callback_arg, segs, 0, error);
449 (*callback)(callback_arg, segs, nsegs, error);
451 * Return ENOMEM to the caller so that it can pass it up the stack.
452 * This error only happens when NOWAIT is set, so deferral is disabled.
461 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
462 bus_dmamap_callback_t *callback, void *callback_arg,
465 bus_dma_segment_t *segs;
470 if ((flags & BUS_DMA_NOWAIT) == 0) {
471 mem = memdesc_bio(bio);
472 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
475 error = _bus_dmamap_load_bio(dmat, map, bio, &nsegs, flags);
478 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
479 __func__, dmat, flags, error, nsegs);
481 if (error == EINPROGRESS)
484 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
486 (*callback)(callback_arg, segs, 0, error);
488 (*callback)(callback_arg, segs, nsegs, error);
490 * Return ENOMEM to the caller so that it can pass it up the stack.
491 * This error only happens when NOWAIT is set, so deferral is disabled.
500 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
501 struct memdesc *mem, bus_dmamap_callback_t *callback,
502 void *callback_arg, int flags)
504 bus_dma_segment_t *segs;
508 if ((flags & BUS_DMA_NOWAIT) == 0)
509 _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg);
513 switch (mem->md_type) {
515 error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr,
516 mem->md_opaque, kernel_pmap, flags, NULL, &nsegs);
519 error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr,
520 mem->md_opaque, flags, NULL, &nsegs);
523 error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list,
524 mem->md_opaque, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX);
527 error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list,
528 mem->md_opaque, &nsegs, flags);
531 error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio,
535 error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio,
539 error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf,
540 NULL, &nsegs, flags);
543 error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs,
549 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
550 __func__, dmat, flags, error, nsegs);
552 if (error == EINPROGRESS)
555 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
557 (*callback)(callback_arg, segs, 0, error);
559 (*callback)(callback_arg, segs, nsegs, 0);
562 * Return ENOMEM to the caller so that it can pass it up the stack.
563 * This error only happens when NOWAIT is set, so deferral is disabled.