2 * Copyright (c) 1999, 2000 Matthew R. Green
3 * Copyright (c) 2001-2003 Thomas Moestl
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Copyright (c) 1998 The NetBSD Foundation, Inc.
31 * All rights reserved.
33 * This code is derived from software contributed to The NetBSD Foundation
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the NetBSD
47 * Foundation, Inc. and its contributors.
48 * 4. Neither the name of The NetBSD Foundation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
53 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
65 * Copyright (c) 1992, 1993
66 * The Regents of the University of California. All rights reserved.
68 * This software was developed by the Computer Systems Engineering group
69 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
70 * contributed to Berkeley.
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions
75 * 1. Redistributions of source code must retain the above copyright
76 * notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce the above copyright
78 * notice, this list of conditions and the following disclaimer in the
79 * documentation and/or other materials provided with the distribution.
80 * 4. Neither the name of the University nor the names of its contributors
81 * may be used to endorse or promote products derived from this software
82 * without specific prior written permission.
84 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
85 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
88 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
89 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
90 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
96 * from: NetBSD: sbus.c,v 1.13 1999/05/23 07:24:02 mrg Exp
97 * from: @(#)sbus.c 8.1 (Berkeley) 6/11/93
98 * from: NetBSD: iommu.c,v 1.42 2001/08/06 22:02:58 eeh Exp
103 #include <sys/param.h>
104 #include <sys/kernel.h>
105 #include <sys/lock.h>
106 #include <sys/malloc.h>
107 #include <sys/mbuf.h>
108 #include <sys/mutex.h>
109 #include <sys/proc.h>
114 #include <vm/vm_map.h>
116 #include <machine/bus.h>
117 #include <machine/bus_private.h>
118 #include <machine/hviommu.h>
119 #include <machine/pmap.h>
120 #include <machine/resource.h>
122 #include <machine/hypervisorvar.h>
123 #include <machine/hv_api.h>
126 #include <sys/rman.h>
131 #define IOMMU_MAX_PRE (32 * 1024)
132 #define IOMMU_MAX_PRE_SEG 3
134 #define IO_PAGE_SIZE PAGE_SIZE_8K
135 #define IO_PAGE_MASK PAGE_MASK_8K
136 #define IO_PAGE_SHIFT PAGE_SHIFT_8K
137 #define round_io_page(x) round_page(x)
138 #define trunc_io_page(x) trunc_page(x)
141 MALLOC_DEFINE(M_HVIOMMU, "hviommu", "HyperVisor IOMMU");
143 TAILQ_HEAD(hviommu_maplruq_head, bus_dmamap);
148 devhandle_t him_handle;
152 struct hviommu_maplruq_head him_maplruq;
153 struct rman him_rman;
156 #define VA_TO_TSBID(him, va) ((va - (him)->him_dvmabase) >> IO_PAGE_SHIFT)
159 #define DPRINTF printf
165 * Always overallocate one page; this is needed to handle alignment of the
166 * buffer, so it makes sense using a lazy allocation scheme.
168 #define IOMMU_SIZE_ROUNDUP(sz) \
169 (round_io_page(sz) + IO_PAGE_SIZE)
171 /* Resource helpers */
172 #define IOMMU_RES_TO(v) ((v) >> IO_PAGE_SHIFT)
173 #define IOMMU_RES_START(res) \
174 ((bus_addr_t)rman_get_start(res) << IO_PAGE_SHIFT)
175 #define IOMMU_RES_END(res) \
176 ((bus_addr_t)(rman_get_end(res) + 1) << IO_PAGE_SHIFT)
177 #define IOMMU_RES_SIZE(res) \
178 ((bus_size_t)rman_get_size(res) << IO_PAGE_SHIFT)
180 /* Helpers for struct bus_dmamap_res */
181 #define BDR_START(r) IOMMU_RES_START((r)->dr_res)
182 #define BDR_END(r) IOMMU_RES_END((r)->dr_res)
183 #define BDR_SIZE(r) IOMMU_RES_SIZE((r)->dr_res)
185 /* Locking macros. */
186 #define HIM_LOCK(him) mtx_lock(&him->him_mtx)
187 #define HIM_LOCK_ASSERT(him) mtx_assert(&him->him_mtx, MA_OWNED)
188 #define HIM_UNLOCK(him) mtx_unlock(&him->him_mtx)
190 /* LRU queue handling for lazy resource allocation. */
192 hviommu_map_insq(struct hviommu *him, bus_dmamap_t map)
195 HIM_LOCK_ASSERT(him);
196 if (!SLIST_EMPTY(&map->dm_reslist)) {
198 TAILQ_REMOVE(&him->him_maplruq, map, dm_maplruq);
199 TAILQ_INSERT_TAIL(&him->him_maplruq, map, dm_maplruq);
205 hviommu_map_remq(struct hviommu *him, bus_dmamap_t map)
208 HIM_LOCK_ASSERT(him);
210 TAILQ_REMOVE(&him->him_maplruq, map, dm_maplruq);
215 hviommu_init(devhandle_t dh, u_long dvmabase, u_long dvmasize)
220 him = malloc(sizeof *him, M_HVIOMMU, M_WAITOK|M_ZERO);
222 mtx_init(&him->him_mtx, "hviommu", NULL, MTX_DEF);
223 him->him_handle = dh;
224 him->him_dvmabase = dvmabase;
225 him->him_dvmasize = dvmasize;
227 TAILQ_INIT(&him->him_maplruq);
228 him->him_rman.rm_type = RMAN_ARRAY;
229 him->him_rman.rm_descr = "HyperVisor IOMMU Memory";
230 end = him->him_dvmabase + him->him_dvmasize - 1;
231 if (rman_init(&him->him_rman) != 0 ||
232 rman_manage_region(&him->him_rman, him->him_dvmabase >>
233 IO_PAGE_SHIFT, end >> IO_PAGE_SHIFT) != 0)
234 panic("%s: can't initalize rman", __func__);
240 hviommu_remove(struct hviommu *him, vm_offset_t va, vm_size_t len)
245 KASSERT(va >= him->him_dvmabase,
246 ("%s: va 0x%lx not in DVMA space", __func__, (u_long)va));
247 KASSERT(va + len >= va,
248 ("%s: va 0x%lx + len 0x%lx wraps", __func__, (long)va, (long)len));
249 KASSERT((va & IO_PAGE_MASK) == 0 && (len & IO_PAGE_MASK) == 0,
250 ("%s: va %#lx or len %#lx not page aligned", __func__, va, len));
252 if ((error = hv_pci_iommu_demap(him->him_handle,
253 VA_TO_TSBID(him, va), len >> IO_PAGE_SHIFT, &demapped))) {
254 printf("%s: demap: va: %#lx, npages: %#lx, err: %ld\n",
255 __func__, va, len >> IO_PAGE_SHIFT, error);
258 va += demapped << IO_PAGE_SHIFT;
259 len -= demapped << IO_PAGE_SHIFT;
264 * Allocate DVMA virtual memory for a map. The map may not be on a queue, so
265 * that it can be freely modified.
268 hviommu_dvma_valloc(bus_dma_tag_t t, struct hviommu *him, bus_dmamap_t map,
271 struct resource *res;
272 struct bus_dmamap_res *bdr;
273 bus_size_t align, sgsize;
275 KASSERT(!map->dm_onq, ("hviommu_dvma_valloc: map on queue!"));
276 if ((bdr = malloc(sizeof(*bdr), M_HVIOMMU, M_NOWAIT)) == NULL)
280 * If a boundary is specified, a map cannot be larger than it; however
281 * we do not clip currently, as that does not play well with the lazy
283 * Alignment to a page boundary is always enforced.
285 align = (t->dt_alignment + IO_PAGE_MASK) >> IO_PAGE_SHIFT;
286 sgsize = IOMMU_RES_TO(round_io_page(size));
287 if (t->dt_boundary > 0 && t->dt_boundary < IO_PAGE_SIZE)
288 panic("hviommu_dvmamap_load: illegal boundary specified");
289 res = rman_reserve_resource_bound(&him->him_rman, 0L,
290 IOMMU_RES_TO(t->dt_lowaddr), sgsize,
291 IOMMU_RES_TO(t->dt_boundary),
292 RF_ACTIVE | rman_make_alignment_flags(align), NULL);
294 free(bdr, M_HVIOMMU);
300 SLIST_INSERT_HEAD(&map->dm_reslist, bdr, dr_link);
304 /* Unload the map and mark all resources as unused, but do not free them. */
306 hviommu_dvmamap_vunload(struct hviommu *him, bus_dmamap_t map)
308 struct bus_dmamap_res *r;
310 SLIST_FOREACH(r, &map->dm_reslist, dr_link) {
311 hviommu_remove(him, BDR_START(r), BDR_SIZE(r));
316 /* Free a DVMA virtual memory resource. */
318 hviommu_dvma_vfree_res(bus_dmamap_t map, struct bus_dmamap_res *r)
321 KASSERT(r->dr_used == 0, ("hviommu_dvma_vfree_res: resource busy!"));
322 if (r->dr_res != NULL && rman_release_resource(r->dr_res) != 0)
323 printf("warning: DVMA space lost\n");
324 SLIST_REMOVE(&map->dm_reslist, r, bus_dmamap_res, dr_link);
328 /* Free all DVMA virtual memory for a map. */
330 hviommu_dvma_vfree(struct hviommu *him, bus_dmamap_t map)
334 hviommu_map_remq(him, map);
335 hviommu_dvmamap_vunload(him, map);
337 while (!SLIST_EMPTY(&map->dm_reslist))
338 hviommu_dvma_vfree_res(map, SLIST_FIRST(&map->dm_reslist));
341 /* Prune a map, freeing all unused DVMA resources. */
343 hviommu_dvma_vprune(struct hviommu *him, bus_dmamap_t map)
345 struct bus_dmamap_res *r, *n;
346 bus_size_t freed = 0;
348 HIM_LOCK_ASSERT(him);
349 for (r = SLIST_FIRST(&map->dm_reslist); r != NULL; r = n) {
350 n = SLIST_NEXT(r, dr_link);
351 if (r->dr_used == 0) {
352 freed += BDR_SIZE(r);
353 hviommu_dvma_vfree_res(map, r);
356 if (SLIST_EMPTY(&map->dm_reslist))
357 hviommu_map_remq(him, map);
362 * Try to find a suitably-sized (and if requested, -aligned) slab of DVMA
363 * memory with IO page offset voffs.
366 hviommu_dvma_vfindseg(bus_dmamap_t map, vm_offset_t voffs, bus_size_t size,
369 struct bus_dmamap_res *r;
370 bus_addr_t dvmaddr, dvmend;
372 KASSERT(!map->dm_onq, ("hviommu_dvma_vfindseg: map on queue!"));
373 SLIST_FOREACH(r, &map->dm_reslist, dr_link) {
374 dvmaddr = round_io_page(BDR_START(r) + r->dr_used);
375 /* Alignment can only work with voffs == 0. */
376 dvmaddr = (dvmaddr + amask) & ~amask;
378 dvmend = dvmaddr + size;
379 if (dvmend <= BDR_END(r)) {
380 r->dr_used = dvmend - BDR_START(r);
381 r->dr_offset = voffs;
389 * Try to find or allocate a slab of DVMA space; see above.
392 hviommu_dvma_vallocseg(bus_dma_tag_t dt, struct hviommu *him, bus_dmamap_t map,
393 vm_offset_t voffs, bus_size_t size, bus_addr_t amask, bus_addr_t *addr)
395 bus_dmamap_t tm, last;
396 bus_addr_t dvmaddr, freed;
397 int error, complete = 0;
399 dvmaddr = hviommu_dvma_vfindseg(map, voffs, size, amask);
401 /* Need to allocate. */
403 while ((error = hviommu_dvma_valloc(dt, him, map,
404 voffs + size)) == ENOMEM && !complete) {
406 * Free the allocated DVMA of a few maps until
407 * the required size is reached. This is an
408 * approximation to not have to call the allocation
409 * function too often; most likely one free run
410 * will not suffice if not one map was large enough
411 * itself due to fragmentation.
415 last = TAILQ_LAST(&him->him_maplruq, hviommu_maplruq_head);
417 tm = TAILQ_FIRST(&him->him_maplruq);
418 complete = tm == last;
421 freed += hviommu_dvma_vprune(him, tm);
422 /* Move to the end. */
423 hviommu_map_insq(him, tm);
424 } while (freed < size && !complete);
429 dvmaddr = hviommu_dvma_vfindseg(map, voffs, size, amask);
430 KASSERT(dvmaddr != 0,
431 ("hviommu_dvma_vallocseg: allocation failed unexpectedly!"));
438 hviommu_dvmamem_alloc(bus_dma_tag_t dt, void **vaddr, int flags,
441 struct hviommu *him = dt->dt_cookie;
445 * XXX: This will break for 32 bit transfers on machines with more than
446 * 16G (1 << 34 bytes) of memory.
448 if ((error = sparc64_dma_alloc_map(dt, mapp)) != 0)
451 if ((flags & BUS_DMA_NOWAIT) != 0)
455 if ((flags & BUS_DMA_ZERO) != 0)
458 if ((*vaddr = malloc(dt->dt_maxsize, M_HVIOMMU, mflags)) == NULL) {
460 sparc64_dma_free_map(dt, *mapp);
463 if ((flags & BUS_DMA_COHERENT) != 0)
464 (*mapp)->dm_flags |= DMF_COHERENT;
466 * Try to preallocate DVMA space. If this fails, it is retried at load
469 hviommu_dvma_valloc(dt, him, *mapp, IOMMU_SIZE_ROUNDUP(dt->dt_maxsize));
471 hviommu_map_insq(him, *mapp);
477 hviommu_dvmamem_free(bus_dma_tag_t dt, void *vaddr, bus_dmamap_t map)
479 struct hviommu *him = dt->dt_cookie;
481 hviommu_dvma_vfree(him, map);
482 sparc64_dma_free_map(dt, map);
483 free(vaddr, M_HVIOMMU);
487 hviommu_dvmamap_create(bus_dma_tag_t dt, int flags, bus_dmamap_t *mapp)
489 struct hviommu *him = dt->dt_cookie;
490 bus_size_t totsz, presz, currsz;
491 int error, i, maxpre;
493 if ((error = sparc64_dma_alloc_map(dt, mapp)) != 0)
495 if ((flags & BUS_DMA_COHERENT) != 0)
496 (*mapp)->dm_flags |= DMF_COHERENT;
498 * Preallocate DVMA space; if this fails now, it is retried at load
499 * time. Through bus_dmamap_load_mbuf() and bus_dmamap_load_uio(), it
500 * is possible to have multiple discontiguous segments in a single map,
501 * which is handled by allocating additional resources, instead of
502 * increasing the size, to avoid fragmentation.
503 * Clamp preallocation to IOMMU_MAX_PRE. In some situations we can
504 * handle more; that case is handled by reallocating at map load time.
506 totsz = ulmin(IOMMU_SIZE_ROUNDUP(dt->dt_maxsize), IOMMU_MAX_PRE);
507 error = hviommu_dvma_valloc(dt, him, *mapp, totsz);
511 * Try to be smart about preallocating some additional segments if
514 maxpre = imin(dt->dt_nsegments, IOMMU_MAX_PRE_SEG);
515 presz = dt->dt_maxsize / maxpre;
516 KASSERT(presz != 0, ("hviommu_dvmamap_create: bogus preallocation size "
517 ", nsegments = %d, maxpre = %d, maxsize = %lu", dt->dt_nsegments,
518 maxpre, dt->dt_maxsize));
519 for (i = 1; i < maxpre && totsz < IOMMU_MAX_PRE; i++) {
520 currsz = round_io_page(ulmin(presz, IOMMU_MAX_PRE - totsz));
521 error = hviommu_dvma_valloc(dt, him, *mapp, currsz);
527 hviommu_map_insq(him, *mapp);
533 hviommu_dvmamap_destroy(bus_dma_tag_t dt, bus_dmamap_t map)
535 struct hviommu *him = dt->dt_cookie;
537 hviommu_dvma_vfree(him, map);
538 sparc64_dma_free_map(dt, map);
545 hviommu_map_pages(struct hviommu *him, bus_addr_t dvmaddr, uint64_t *iottes, pages_t iottecnt)
550 io_attributes_t ioattr;
556 DPRINTF("mapping: dh: %#lx, dvmaddr: %#lx, tsbid: %#lx, cnt: %d\n",
557 him->him_handle, dvmaddr, VA_TO_TSBID(him, dvmaddr), iottecnt);
558 for (i = 0; i < iottecnt; i++) {
559 DPRINTF("iotte:%#lx\n", iottes[i]);
564 while (cntdone < iottecnt) {
565 if ((err = hv_pci_iommu_map(him->him_handle, VA_TO_TSBID(him,
566 dvmaddr), iottecnt, PCI_MAP_ATTR_READ | PCI_MAP_ATTR_WRITE,
567 (io_page_list_t)pmap_kextract((vm_offset_t)&iottes[0]),
569 DPRINTF("iommu_map: err: %ld\n", err);
574 for (i = 0; i < iottecnt; i++) {
575 DPRINTF("err: %ld", hv_pci_iommu_getmap(him->him_handle,
576 VA_TO_TSBID(him, dvmaddr + i * IO_PAGE_SIZE),
578 DPRINTF(", ioattr: %d, raddr: %#lx\n", ioattr, ra);
583 * IOMMU DVMA operations, common to SBUS and PCI.
586 hviommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct hviommu *him,
587 bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
588 int flags, bus_dma_segment_t *segs, int *segp, int align)
590 uint64_t iottes[IOTTE_CNT];
591 bus_addr_t amask, dvmaddr, iottebase;
592 bus_size_t sgsize, esize;
593 vm_offset_t vaddr, voffs;
595 int error, sgcnt, firstpg;
599 KASSERT(buflen != 0, ("hviommu_dvmamap_load_buffer: buflen == 0!"));
600 if (buflen > dt->dt_maxsize)
604 pmap = vmspace_pmap(td->td_proc->p_vmspace);
606 vaddr = (vm_offset_t)buf;
607 voffs = vaddr & IO_PAGE_MASK;
608 amask = align ? dt->dt_alignment - 1 : 0;
610 /* Try to find a slab that is large enough. */
611 error = hviommu_dvma_vallocseg(dt, him, map, voffs, buflen, amask,
616 DPRINTF("vallocseg: dvmaddr: %#lx, voffs: %#lx, buflen: %#lx\n",
617 dvmaddr, voffs, buflen);
621 iottebase = 0; /* shutup gcc */
622 for (; buflen > 0; ) {
624 * Get the physical address for this page.
627 curaddr = pmap_extract(pmap, vaddr);
629 curaddr = pmap_kextract(vaddr);
632 * Compute the segment size, and adjust counts.
634 sgsize = IO_PAGE_SIZE - ((u_long)vaddr & IO_PAGE_MASK);
642 hviommu_enter(him, trunc_io_page(dvmaddr), trunc_io_page(curaddr),
646 iottebase = trunc_io_page(dvmaddr);
647 DPRINTF("adding: %#lx\n", trunc_io_page(curaddr));
648 iottes[iottecnt++] = trunc_io_page(curaddr);
650 if (iottecnt >= IOTTE_CNT) {
651 hviommu_map_pages(him, iottebase, iottes, iottecnt);
657 * Chop the chunk up into segments of at most maxsegsz, but try
658 * to fill each segment as well as possible.
661 esize = ulmin(sgsize,
662 dt->dt_maxsegsz - segs[sgcnt].ds_len);
663 segs[sgcnt].ds_len += esize;
669 if (sgcnt >= dt->dt_nsegments)
672 * No extra alignment here - the common practice in the
673 * busdma code seems to be that only the first segment
674 * needs to satisfy the alignment constraints (and that
675 * only for bus_dmamem_alloc()ed maps). It is assumed
676 * that such tags have maxsegsize >= maxsize.
678 esize = ulmin(sgsize, dt->dt_maxsegsz);
679 segs[sgcnt].ds_addr = dvmaddr;
680 segs[sgcnt].ds_len = esize;
687 hviommu_map_pages(him, iottebase, iottes, iottecnt);
693 hviommu_dvmamap_load(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
694 bus_size_t buflen, bus_dmamap_callback_t *cb, void *cba,
697 struct hviommu *him = dt->dt_cookie;
700 if ((map->dm_flags & DMF_LOADED) != 0) {
702 printf("hviommu_dvmamap_load: map still in use\n");
704 bus_dmamap_unload(dt, map);
708 * Make sure that the map is not on a queue so that the resource list
709 * may be safely accessed and modified without needing the lock to
710 * cover the whole operation.
713 hviommu_map_remq(him, map);
716 error = hviommu_dvmamap_load_buffer(dt, him, map, buf, buflen, NULL,
717 flags, dt->dt_segments, &seg, 1);
720 hviommu_map_insq(him, map);
722 hviommu_dvmamap_vunload(him, map);
724 (*cb)(cba, dt->dt_segments, 0, error);
727 map->dm_flags |= DMF_LOADED;
728 (*cb)(cba, dt->dt_segments, seg + 1, 0);
735 hviommu_dvmamap_load_mbuf(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
736 bus_dmamap_callback2_t *cb, void *cba, int flags)
738 struct hviommu *him = dt->dt_cookie;
740 int error = 0, first = 1, nsegs = -1;
744 if ((map->dm_flags & DMF_LOADED) != 0) {
746 printf("hviommu_dvmamap_load_mbuf: map still in use\n");
748 bus_dmamap_unload(dt, map);
752 hviommu_map_remq(him, map);
755 if (m0->m_pkthdr.len <= dt->dt_maxsize) {
756 for (m = m0; m != NULL && error == 0; m = m->m_next) {
759 error = hviommu_dvmamap_load_buffer(dt, him, map,
760 m->m_data, m->m_len, NULL, flags, dt->dt_segments,
768 hviommu_map_insq(him, map);
770 hviommu_dvmamap_vunload(him, map);
772 /* force "no valid mappings" in callback */
773 (*cb)(cba, dt->dt_segments, 0, 0, error);
776 map->dm_flags |= DMF_LOADED;
777 (*cb)(cba, dt->dt_segments, nsegs + 1, m0->m_pkthdr.len, 0);
783 hviommu_dvmamap_load_mbuf_sg(bus_dma_tag_t dt, bus_dmamap_t map,
784 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
786 struct hviommu *him = dt->dt_cookie;
788 int error = 0, first = 1;
793 if ((map->dm_flags & DMF_LOADED) != 0) {
795 printf("hviommu_dvmamap_load_mbuf: map still in use\n");
797 bus_dmamap_unload(dt, map);
801 hviommu_map_remq(him, map);
804 if (m0->m_pkthdr.len <= dt->dt_maxsize) {
805 for (m = m0; m != NULL && error == 0; m = m->m_next) {
808 error = hviommu_dvmamap_load_buffer(dt, him, map,
809 m->m_data, m->m_len, NULL, flags, segs,
817 hviommu_map_insq(him, map);
819 hviommu_dvmamap_vunload(him, map);
821 map->dm_flags |= DMF_LOADED;
829 hviommu_dvmamap_load_uio(bus_dma_tag_t dt, bus_dmamap_t map, struct uio *uio,
830 bus_dmamap_callback2_t *cb, void *cba, int flags)
832 struct hviommu *him = dt->dt_cookie;
834 struct thread *td = NULL;
835 bus_size_t minlen, resid;
836 int nsegs = -1, error = 0, first = 1, i;
838 if ((map->dm_flags & DMF_LOADED) != 0) {
840 printf("hviommu_dvmamap_load_uio: map still in use\n");
842 bus_dmamap_unload(dt, map);
846 hviommu_map_remq(him, map);
849 resid = uio->uio_resid;
852 if (uio->uio_segflg == UIO_USERSPACE) {
855 ("%s: USERSPACE but no proc", __func__));
858 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
860 * Now at the first iovec to load. Load each iovec
861 * until we have exhausted the residual count.
863 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
867 error = hviommu_dvmamap_load_buffer(dt, him, map,
868 iov[i].iov_base, minlen, td, flags, dt->dt_segments,
876 hviommu_map_insq(him, map);
878 hviommu_dvmamap_vunload(him, map);
880 /* force "no valid mappings" in callback */
881 (*cb)(cba, dt->dt_segments, 0, 0, error);
884 map->dm_flags |= DMF_LOADED;
885 (*cb)(cba, dt->dt_segments, nsegs + 1, uio->uio_resid, 0);
891 hviommu_dvmamap_unload(bus_dma_tag_t dt, bus_dmamap_t map)
893 struct hviommu *him = dt->dt_cookie;
895 if ((map->dm_flags & DMF_LOADED) == 0)
898 hviommu_dvmamap_vunload(him, map);
899 hviommu_map_insq(him, map);
901 map->dm_flags &= ~DMF_LOADED;
905 hviommu_dvmamap_sync(bus_dma_tag_t dt, bus_dmamap_t map, bus_dmasync_op_t op)
907 struct hviommu *him = dt->dt_cookie;
908 struct bus_dmamap_res *r;
914 io_attributes_t ioattr;
916 io_sync_direction_t iodir;
918 if ((map->dm_flags & DMF_LOADED) == 0)
923 if (op & (BUS_DMASYNC_POSTREAD))
924 iodir |= IO_SYNC_CPU;
925 if (op & (BUS_DMASYNC_PREWRITE))
926 iodir |= IO_SYNC_DEVICE;
928 if ((op & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTWRITE)) != 0)
931 /* nothing to be done */
936 SLIST_FOREACH(r, &map->dm_reslist, dr_link) {
937 va = (vm_offset_t)BDR_START(r) + r->dr_offset ;
940 if ((err = hv_pci_iommu_getmap(him->him_handle,
941 VA_TO_TSBID(him, va), &ioattr, &ra))) {
943 printf("failed to _g=etmap: err: %ld, handle: %#lx, tsbid: %#lx\n",
944 err, him->him_handle, VA_TO_TSBID(him, va));
947 if ((err = hv_pci_dma_sync(him->him_handle, ra,
948 ulmin(len, (trunc_io_page(ra) + IO_PAGE_SIZE) - ra),
950 printf("failed to dma_sync: err: %ld, handle: %#lx, ra: %#lx, len: %#lx, dir: %d\n",
951 err, him->him_handle, ra, ulmin(len,
952 (trunc_io_page(ra) + IO_PAGE_SIZE) - ra),
954 synced = ulmin(len, (trunc_io_page(ra) + IO_PAGE_SIZE) - ra);
955 printf("err: %ld", hv_pci_iommu_getmap(him->him_handle, VA_TO_TSBID(him, va),
957 printf(", ioattr: %d, raddr: %#lx\n", ioattr, raddr);
965 if ((op & BUS_DMASYNC_PREWRITE) != 0)
969 struct bus_dma_methods hviommu_dma_methods = {
970 .dm_dmamap_create = hviommu_dvmamap_create,
971 .dm_dmamap_destroy = hviommu_dvmamap_destroy,
972 .dm_dmamap_load = hviommu_dvmamap_load,
973 .dm_dmamap_load_mbuf = hviommu_dvmamap_load_mbuf,
974 .dm_dmamap_load_mbuf_sg = hviommu_dvmamap_load_mbuf_sg,
975 .dm_dmamap_load_uio = hviommu_dvmamap_load_uio,
976 .dm_dmamap_unload = hviommu_dvmamap_unload,
977 .dm_dmamap_sync = hviommu_dvmamap_sync,
978 .dm_dmamem_alloc = hviommu_dvmamem_alloc,
979 .dm_dmamem_free = hviommu_dvmamem_free,