2 * Copyright (c) 2012 EMC Corp.
5 * Copyright (c) 1997, 1998 Justin T. Gibbs.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
37 #include <sys/systm.h>
40 #include <sys/callout.h>
42 #include <sys/memdesc.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_map.h>
52 #include <cam/cam_ccb.h>
54 #include <machine/bus.h>
57 * Load a list of virtual addresses.
60 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map,
61 bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs,
67 for (; sglist_cnt > 0; sglist_cnt--, list++) {
68 error = _bus_dmamap_load_buffer(dmat, map,
69 (void *)(uintptr_t)list->ds_addr, list->ds_len, pmap,
78 * Load a list of physical addresses.
81 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map,
82 bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags)
87 for (; sglist_cnt > 0; sglist_cnt--, list++) {
88 error = _bus_dmamap_load_phys(dmat, map,
89 (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL,
101 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
102 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
108 for (m = m0; m != NULL && error == 0; m = m->m_next) {
110 error = _bus_dmamap_load_buffer(dmat, map, m->m_data,
111 m->m_len, kernel_pmap, flags | BUS_DMA_LOAD_MBUF,
115 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
116 __func__, dmat, flags, error, *nsegs);
121 * Load from block io.
124 _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
125 int *nsegs, int flags)
129 if ((bio->bio_flags & BIO_UNMAPPED) == 0) {
130 error = _bus_dmamap_load_buffer(dmat, map, bio->bio_data,
131 bio->bio_bcount, kernel_pmap, flags, NULL, nsegs);
133 error = _bus_dmamap_load_ma(dmat, map, bio->bio_ma,
134 bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs);
140 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map,
141 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
142 bus_dma_segment_t *segs, int *segp)
149 for (i = 0; tlen > 0; i++, tlen -= len) {
150 len = min(PAGE_SIZE - ma_offs, tlen);
151 paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs;
152 error = _bus_dmamap_load_phys(dmat, map, paddr, len,
162 * Load a cam control block.
165 _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
166 int *nsegs, int flags)
168 struct ccb_hdr *ccb_h;
176 switch (ccb_h->func_code) {
178 struct ccb_scsiio *csio;
181 data_ptr = csio->data_ptr;
182 dxfer_len = csio->dxfer_len;
183 sglist_cnt = csio->sglist_cnt;
186 case XPT_CONT_TARGET_IO: {
187 struct ccb_scsiio *ctio;
190 data_ptr = ctio->data_ptr;
191 dxfer_len = ctio->dxfer_len;
192 sglist_cnt = ctio->sglist_cnt;
196 struct ccb_ataio *ataio;
199 data_ptr = ataio->data_ptr;
200 dxfer_len = ataio->dxfer_len;
205 panic("_bus_dmamap_load_ccb: Unsupported func code %d",
209 switch ((ccb_h->flags & CAM_DATA_MASK)) {
211 error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len,
212 kernel_pmap, flags, NULL, nsegs);
215 error = _bus_dmamap_load_phys(dmat, map,
216 (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL,
220 error = _bus_dmamap_load_vlist(dmat, map,
221 (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap,
224 case CAM_DATA_SG_PADDR:
225 error = _bus_dmamap_load_plist(dmat, map,
226 (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags);
229 error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr,
233 panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented",
243 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
244 int *nsegs, int flags)
253 if (uio->uio_segflg == UIO_USERSPACE) {
254 KASSERT(uio->uio_td != NULL,
255 ("bus_dmamap_load_uio: USERSPACE but no proc"));
256 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
259 resid = uio->uio_resid;
263 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
265 * Now at the first iovec to load. Load each iovec
266 * until we have exhausted the residual count.
269 addr = (caddr_t) iov[i].iov_base;
270 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
272 error = _bus_dmamap_load_buffer(dmat, map, addr,
273 minlen, pmap, flags, NULL, nsegs);
282 * Map the buffer buf into bus space using the dmamap map.
285 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
286 bus_size_t buflen, bus_dmamap_callback_t *callback,
287 void *callback_arg, int flags)
289 bus_dma_segment_t *segs;
294 if ((flags & BUS_DMA_NOWAIT) == 0) {
295 mem = memdesc_vaddr(buf, buflen);
296 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
300 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap,
301 flags, NULL, &nsegs);
304 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
305 __func__, dmat, flags, error, nsegs);
307 if (error == EINPROGRESS)
310 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
312 (*callback)(callback_arg, segs, 0, error);
314 (*callback)(callback_arg, segs, nsegs, 0);
317 * Return ENOMEM to the caller so that it can pass it up the stack.
318 * This error only happens when NOWAIT is set, so deferral is disabled.
327 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
328 bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
330 bus_dma_segment_t *segs;
335 flags |= BUS_DMA_NOWAIT;
337 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags);
340 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
342 (*callback)(callback_arg, segs, 0, 0, error);
344 (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error);
346 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
347 __func__, dmat, flags, error, nsegs);
352 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
353 bus_dma_segment_t *segs, int *nsegs, int flags)
357 flags |= BUS_DMA_NOWAIT;
359 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags);
361 _bus_dmamap_complete(dmat, map, segs, *nsegs, error);
366 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
367 bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
369 bus_dma_segment_t *segs;
372 flags |= BUS_DMA_NOWAIT;
374 error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags);
377 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
379 (*callback)(callback_arg, segs, 0, 0, error);
381 (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error);
383 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
384 __func__, dmat, flags, error, nsegs);
389 bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
390 bus_dmamap_callback_t *callback, void *callback_arg,
393 bus_dma_segment_t *segs;
394 struct ccb_hdr *ccb_h;
400 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
401 callback(callback_arg, NULL, 0, 0);
404 if ((flags & BUS_DMA_NOWAIT) == 0) {
405 mem = memdesc_ccb(ccb);
406 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
409 error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags);
412 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
413 __func__, dmat, flags, error, nsegs);
415 if (error == EINPROGRESS)
418 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
420 (*callback)(callback_arg, segs, 0, error);
422 (*callback)(callback_arg, segs, nsegs, error);
424 * Return ENOMEM to the caller so that it can pass it up the stack.
425 * This error only happens when NOWAIT is set, so deferral is disabled.
434 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
435 bus_dmamap_callback_t *callback, void *callback_arg,
438 bus_dma_segment_t *segs;
443 if ((flags & BUS_DMA_NOWAIT) == 0) {
444 mem = memdesc_bio(bio);
445 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
448 error = _bus_dmamap_load_bio(dmat, map, bio, &nsegs, flags);
451 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
452 __func__, dmat, flags, error, nsegs);
454 if (error == EINPROGRESS)
457 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
459 (*callback)(callback_arg, segs, 0, error);
461 (*callback)(callback_arg, segs, nsegs, error);
463 * Return ENOMEM to the caller so that it can pass it up the stack.
464 * This error only happens when NOWAIT is set, so deferral is disabled.
473 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
474 struct memdesc *mem, bus_dmamap_callback_t *callback,
475 void *callback_arg, int flags)
477 bus_dma_segment_t *segs;
481 if ((flags & BUS_DMA_NOWAIT) == 0)
482 _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg);
486 switch (mem->md_type) {
488 error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr,
489 mem->md_opaque, kernel_pmap, flags, NULL, &nsegs);
492 error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr,
493 mem->md_opaque, flags, NULL, &nsegs);
496 error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list,
497 mem->md_opaque, kernel_pmap, &nsegs, flags);
500 error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list,
501 mem->md_opaque, &nsegs, flags);
504 error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio,
508 error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio,
512 error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf,
513 NULL, &nsegs, flags);
516 error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs,
522 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
523 __func__, dmat, flags, error, nsegs);
525 if (error == EINPROGRESS)
528 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
530 (*callback)(callback_arg, segs, 0, error);
532 (*callback)(callback_arg, segs, nsegs, 0);
535 * Return ENOMEM to the caller so that it can pass it up the stack.
536 * This error only happens when NOWAIT is set, so deferral is disabled.