2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2013 The FreeBSD Foundation
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
35 #include <sys/interrupt.h>
36 #include <sys/kernel.h>
39 #include <sys/memdesc.h>
40 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
44 #include <sys/sf_buf.h>
45 #include <sys/sysctl.h>
46 #include <sys/taskqueue.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_pager.h>
56 #include <vm/vm_map.h>
57 #include <dev/pci/pcireg.h>
58 #include <machine/atomic.h>
59 #include <machine/bus.h>
60 #include <machine/cpu.h>
61 #include <machine/md_var.h>
62 #include <machine/specialreg.h>
63 #include <x86/include/busdma_impl.h>
64 #include <dev/iommu/busdma_iommu.h>
65 #include <x86/iommu/intel_reg.h>
66 #include <x86/iommu/intel_dmar.h>
68 static int domain_unmap_buf_locked(struct dmar_domain *domain,
69 iommu_gaddr_t base, iommu_gaddr_t size, int flags);
72 * The cache of the identity mapping page tables for the DMARs. Using
73 * the cache saves significant amount of memory for page tables by
74 * reusing the page tables, since usually DMARs are identical and have
75 * the same capabilities. Still, cache records the information needed
76 * to match DMAR capabilities and page table format, to correctly
77 * handle different DMARs.
81 iommu_gaddr_t maxaddr; /* Page table covers the guest address
83 int pglvl; /* Total page table levels ignoring
85 int leaf; /* The last materialized page table
86 level, it is non-zero if superpages
88 vm_object_t pgtbl_obj; /* The page table pages */
89 LIST_ENTRY(idpgtbl) link;
92 static struct sx idpgtbl_lock;
93 SX_SYSINIT(idpgtbl, &idpgtbl_lock, "idpgtbl");
94 static LIST_HEAD(, idpgtbl) idpgtbls = LIST_HEAD_INITIALIZER(idpgtbls);
95 static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl",
96 "Intel DMAR Identity mappings cache elements");
99 * Build the next level of the page tables for the identity mapping.
100 * - lvl is the level to build;
101 * - idx is the index of the page table page in the pgtbl_obj, which is
102 * being allocated filled now;
103 * - addr is the starting address in the bus address space which is
104 * mapped by the page table page.
107 domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
113 iommu_gaddr_t f, pg_sz;
117 VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj);
118 if (addr >= tbl->maxaddr)
120 (void)dmar_pgalloc(tbl->pgtbl_obj, idx, IOMMU_PGF_OBJL |
121 IOMMU_PGF_WAITOK | IOMMU_PGF_ZERO);
122 base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */
123 pg_sz = pglvl_page_size(tbl->pglvl, lvl);
124 if (lvl != tbl->leaf) {
125 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz)
126 domain_idmap_nextlvl(tbl, lvl + 1, base + i, f);
128 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
129 pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, IOMMU_PGF_WAITOK, &sf);
130 if (lvl == tbl->leaf) {
131 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
132 if (f >= tbl->maxaddr)
134 pte[i].pte = (DMAR_PTE_ADDR_MASK & f) |
135 DMAR_PTE_R | DMAR_PTE_W;
138 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
139 if (f >= tbl->maxaddr)
141 m1 = dmar_pgalloc(tbl->pgtbl_obj, base + i,
143 KASSERT(m1 != NULL, ("lost page table page"));
144 pte[i].pte = (DMAR_PTE_ADDR_MASK &
145 VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W;
148 /* domain_get_idmap_pgtbl flushes CPU cache if needed. */
149 dmar_unmap_pgtbl(sf);
150 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
154 * Find a ready and compatible identity-mapping page table in the
155 * cache. If not found, populate the identity-mapping page table for
156 * the context, up to the maxaddr. The maxaddr byte is allowed to be
157 * not mapped, which is aligned with the definition of Maxmem as the
158 * highest usable physical address + 1. If superpages are used, the
159 * maxaddr is typically mapped.
162 domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr)
164 struct dmar_unit *unit;
170 leaf = 0; /* silence gcc */
173 * First, determine where to stop the paging structures.
175 for (i = 0; i < domain->pglvl; i++) {
176 if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) {
183 * Search the cache for a compatible page table. Qualified
184 * page table must map up to maxaddr, its level must be
185 * supported by the DMAR and leaf should be equal to the
186 * calculated value. The later restriction could be lifted
187 * but I believe it is currently impossible to have any
188 * deviations for existing hardware.
190 sx_slock(&idpgtbl_lock);
191 LIST_FOREACH(tbl, &idpgtbls, link) {
192 if (tbl->maxaddr >= maxaddr &&
193 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
195 res = tbl->pgtbl_obj;
196 vm_object_reference(res);
197 sx_sunlock(&idpgtbl_lock);
198 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
204 * Not found in cache, relock the cache into exclusive mode to
205 * be able to add element, and recheck cache again after the
208 sx_sunlock(&idpgtbl_lock);
209 sx_xlock(&idpgtbl_lock);
210 LIST_FOREACH(tbl, &idpgtbls, link) {
211 if (tbl->maxaddr >= maxaddr &&
212 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
214 res = tbl->pgtbl_obj;
215 vm_object_reference(res);
216 sx_xunlock(&idpgtbl_lock);
217 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
223 * Still not found, create new page table.
225 tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK);
226 tbl->pglvl = domain->pglvl;
228 tbl->maxaddr = maxaddr;
229 tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
230 IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL);
231 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
232 domain_idmap_nextlvl(tbl, 0, 0, 0);
233 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
234 LIST_INSERT_HEAD(&idpgtbls, tbl, link);
235 res = tbl->pgtbl_obj;
236 vm_object_reference(res);
237 sx_xunlock(&idpgtbl_lock);
241 * Table was found or created.
243 * If DMAR does not snoop paging structures accesses, flush
244 * CPU cache to memory. Note that dmar_unmap_pgtbl() coherent
245 * argument was possibly invalid at the time of the identity
246 * page table creation, since DMAR which was passed at the
247 * time of creation could be coherent, while current DMAR is
250 * If DMAR cannot look into the chipset write buffer, flush it
254 if (!DMAR_IS_COHERENT(unit)) {
255 VM_OBJECT_WLOCK(res);
256 for (m = vm_page_lookup(res, 0); m != NULL;
258 pmap_invalidate_cache_pages(&m, 1);
259 VM_OBJECT_WUNLOCK(res);
261 if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
263 dmar_flush_write_bufs(unit);
271 * Return a reference to the identity mapping page table to the cache.
274 put_idmap_pgtbl(vm_object_t obj)
276 struct idpgtbl *tbl, *tbl1;
279 sx_slock(&idpgtbl_lock);
280 KASSERT(obj->ref_count >= 2, ("lost cache reference"));
281 vm_object_deallocate(obj);
284 * Cache always owns one last reference on the page table object.
285 * If there is an additional reference, object must stay.
287 if (obj->ref_count > 1) {
288 sx_sunlock(&idpgtbl_lock);
293 * Cache reference is the last, remove cache element and free
294 * page table object, returning the page table pages to the
297 sx_sunlock(&idpgtbl_lock);
298 sx_xlock(&idpgtbl_lock);
299 LIST_FOREACH_SAFE(tbl, &idpgtbls, link, tbl1) {
300 rmobj = tbl->pgtbl_obj;
301 if (rmobj->ref_count == 1) {
302 LIST_REMOVE(tbl, link);
303 atomic_subtract_int(&dmar_tbl_pagecnt,
304 rmobj->resident_page_count);
305 vm_object_deallocate(rmobj);
306 free(tbl, M_DMAR_IDPGTBL);
309 sx_xunlock(&idpgtbl_lock);
313 * The core routines to map and unmap host pages at the given guest
314 * address. Support superpages.
318 * Index of the pte for the guest address base in the page table at
322 domain_pgtbl_pte_off(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
325 base >>= DMAR_PAGE_SHIFT + (domain->pglvl - lvl - 1) *
327 return (base & DMAR_PTEMASK);
331 * Returns the page index of the page table page in the page table
332 * object, which maps the given address base at the page table level
336 domain_pgtbl_get_pindex(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
338 vm_pindex_t idx, pidx;
341 KASSERT(lvl >= 0 && lvl < domain->pglvl,
342 ("wrong lvl %p %d", domain, lvl));
344 for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx) {
345 idx = domain_pgtbl_pte_off(domain, base, i) +
346 pidx * DMAR_NPTEPG + 1;
352 domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
353 int flags, vm_pindex_t *idxp, struct sf_buf **sf)
357 dmar_pte_t *pte, *ptep;
358 vm_pindex_t idx, idx1;
360 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
361 KASSERT((flags & IOMMU_PGF_OBJL) != 0, ("lost PGF_OBJL"));
363 idx = domain_pgtbl_get_pindex(domain, base, lvl);
364 if (*sf != NULL && idx == *idxp) {
365 pte = (dmar_pte_t *)sf_buf_kva(*sf);
368 dmar_unmap_pgtbl(*sf);
371 pte = dmar_map_pgtbl(domain->pgtbl_obj, idx, flags, sf);
374 ("lost root page table page %p", domain));
376 * Page table page does not exist, allocate
377 * it and create a pte in the preceeding page level
378 * to reference the allocated page table page.
380 m = dmar_pgalloc(domain->pgtbl_obj, idx, flags |
386 * Prevent potential free while pgtbl_obj is
387 * unlocked in the recursive call to
388 * domain_pgtbl_map_pte(), if other thread did
389 * pte write and clean while the lock is
395 ptep = domain_pgtbl_map_pte(domain, base, lvl - 1,
398 KASSERT(m->pindex != 0,
399 ("loosing root page %p", domain));
401 dmar_pgfree(domain->pgtbl_obj, m->pindex,
405 dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
407 dmar_flush_pte_to_ram(domain->dmar, ptep);
408 sf_buf_page(sfp)->ref_count += 1;
410 dmar_unmap_pgtbl(sfp);
411 /* Only executed once. */
415 pte += domain_pgtbl_pte_off(domain, base, lvl);
420 domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
421 iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
425 iommu_gaddr_t pg_sz, base1;
426 vm_pindex_t pi, c, idx, run_sz;
430 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
433 flags |= IOMMU_PGF_OBJL;
434 TD_PREP_PINNED_ASSERT;
436 for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz,
438 for (lvl = 0, c = 0, superpage = false;; lvl++) {
439 pg_sz = domain_page_size(domain, lvl);
440 run_sz = pg_sz >> DMAR_PAGE_SHIFT;
441 if (lvl == domain->pglvl - 1)
444 * Check if the current base suitable for the
445 * superpage mapping. First, verify the level.
447 if (!domain_is_sp_lvl(domain, lvl))
450 * Next, look at the size of the mapping and
451 * alignment of both guest and host addresses.
453 if (size < pg_sz || (base & (pg_sz - 1)) != 0 ||
454 (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0)
456 /* All passed, check host pages contiguouty. */
458 for (c = 1; c < run_sz; c++) {
459 if (VM_PAGE_TO_PHYS(ma[pi + c]) !=
460 VM_PAGE_TO_PHYS(ma[pi + c - 1]) +
470 KASSERT(size >= pg_sz,
471 ("mapping loop overflow %p %jx %jx %jx", domain,
472 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
473 KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl));
474 pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
476 KASSERT((flags & IOMMU_PGF_WAITOK) == 0,
477 ("failed waitable pte alloc %p", domain));
479 dmar_unmap_pgtbl(sf);
480 domain_unmap_buf_locked(domain, base1, base - base1,
485 dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
486 (superpage ? DMAR_PTE_SP : 0));
487 dmar_flush_pte_to_ram(domain->dmar, pte);
488 sf_buf_page(sf)->ref_count += 1;
491 dmar_unmap_pgtbl(sf);
497 domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
498 iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
500 struct dmar_domain *domain;
501 struct dmar_unit *unit;
505 pflags = ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
506 ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
507 ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
508 ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0);
510 domain = IODOM2DOM(iodom);
513 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
514 ("modifying idmap pagetable domain %p", domain));
515 KASSERT((base & DMAR_PAGE_MASK) == 0,
516 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
518 KASSERT((size & DMAR_PAGE_MASK) == 0,
519 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
521 KASSERT(size > 0, ("zero size %p %jx %jx", domain, (uintmax_t)base,
523 KASSERT(base < (1ULL << domain->agaw),
524 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
525 (uintmax_t)size, domain->agaw));
526 KASSERT(base + size < (1ULL << domain->agaw),
527 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
528 (uintmax_t)size, domain->agaw));
529 KASSERT(base + size > base,
530 ("size overflow %p %jx %jx", domain, (uintmax_t)base,
532 KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0,
533 ("neither read nor write %jx", (uintmax_t)pflags));
534 KASSERT((pflags & ~(DMAR_PTE_R | DMAR_PTE_W | DMAR_PTE_SNP |
536 ("invalid pte flags %jx", (uintmax_t)pflags));
537 KASSERT((pflags & DMAR_PTE_SNP) == 0 ||
538 (unit->hw_ecap & DMAR_ECAP_SC) != 0,
539 ("PTE_SNP for dmar without snoop control %p %jx",
540 domain, (uintmax_t)pflags));
541 KASSERT((pflags & DMAR_PTE_TM) == 0 ||
542 (unit->hw_ecap & DMAR_ECAP_DI) != 0,
543 ("PTE_TM for dmar without DIOTLB %p %jx",
544 domain, (uintmax_t)pflags));
545 KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags));
547 DMAR_DOMAIN_PGLOCK(domain);
548 error = domain_map_buf_locked(domain, base, size, ma, pflags, flags);
549 DMAR_DOMAIN_PGUNLOCK(domain);
553 if ((unit->hw_cap & DMAR_CAP_CM) != 0)
554 domain_flush_iotlb_sync(domain, base, size);
555 else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
556 /* See 11.1 Write Buffer Flushing. */
558 dmar_flush_write_bufs(unit);
564 static void domain_unmap_clear_pte(struct dmar_domain *domain,
565 iommu_gaddr_t base, int lvl, int flags, dmar_pte_t *pte,
566 struct sf_buf **sf, bool free_fs);
569 domain_free_pgtbl_pde(struct dmar_domain *domain, iommu_gaddr_t base,
577 pde = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
578 domain_unmap_clear_pte(domain, base, lvl, flags, pde, &sf, true);
582 domain_unmap_clear_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
583 int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf)
587 dmar_pte_clear(&pte->pte);
588 dmar_flush_pte_to_ram(domain->dmar, pte);
589 m = sf_buf_page(*sf);
591 dmar_unmap_pgtbl(*sf);
595 if (m->ref_count != 0)
598 ("lost reference (lvl) on root pg domain %p base %jx lvl %d",
599 domain, (uintmax_t)base, lvl));
600 KASSERT(m->pindex != 0,
601 ("lost reference (idx) on root pg domain %p base %jx lvl %d",
602 domain, (uintmax_t)base, lvl));
603 dmar_pgfree(domain->pgtbl_obj, m->pindex, flags);
604 domain_free_pgtbl_pde(domain, base, lvl - 1, flags);
608 * Assumes that the unmap is never partial.
611 domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
612 iommu_gaddr_t size, int flags)
620 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
624 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
625 ("modifying idmap pagetable domain %p", domain));
626 KASSERT((base & DMAR_PAGE_MASK) == 0,
627 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
629 KASSERT((size & DMAR_PAGE_MASK) == 0,
630 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
632 KASSERT(base < (1ULL << domain->agaw),
633 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
634 (uintmax_t)size, domain->agaw));
635 KASSERT(base + size < (1ULL << domain->agaw),
636 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
637 (uintmax_t)size, domain->agaw));
638 KASSERT(base + size > base,
639 ("size overflow %p %jx %jx", domain, (uintmax_t)base,
641 KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags));
643 pg_sz = 0; /* silence gcc */
644 flags |= IOMMU_PGF_OBJL;
645 TD_PREP_PINNED_ASSERT;
647 for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) {
648 for (lvl = 0; lvl < domain->pglvl; lvl++) {
649 if (lvl != domain->pglvl - 1 &&
650 !domain_is_sp_lvl(domain, lvl))
652 pg_sz = domain_page_size(domain, lvl);
655 pte = domain_pgtbl_map_pte(domain, base, lvl, flags,
658 ("sleeping or page missed %p %jx %d 0x%x",
659 domain, (uintmax_t)base, lvl, flags));
660 if ((pte->pte & DMAR_PTE_SP) != 0 ||
661 lvl == domain->pglvl - 1) {
662 domain_unmap_clear_pte(domain, base, lvl,
663 flags, pte, &sf, false);
667 KASSERT(size >= pg_sz,
668 ("unmapping loop overflow %p %jx %jx %jx", domain,
669 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
672 dmar_unmap_pgtbl(sf);
674 * See 11.1 Write Buffer Flushing for an explanation why RWBF
675 * can be ignored there.
683 domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
684 iommu_gaddr_t size, int flags)
686 struct dmar_domain *domain;
689 domain = IODOM2DOM(iodom);
691 DMAR_DOMAIN_PGLOCK(domain);
692 error = domain_unmap_buf_locked(domain, base, size, flags);
693 DMAR_DOMAIN_PGUNLOCK(domain);
698 domain_alloc_pgtbl(struct dmar_domain *domain)
702 KASSERT(domain->pgtbl_obj == NULL,
703 ("already initialized %p", domain));
705 domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
706 IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL);
707 DMAR_DOMAIN_PGLOCK(domain);
708 m = dmar_pgalloc(domain->pgtbl_obj, 0, IOMMU_PGF_WAITOK |
709 IOMMU_PGF_ZERO | IOMMU_PGF_OBJL);
710 /* No implicit free of the top level page table page. */
712 DMAR_DOMAIN_PGUNLOCK(domain);
713 DMAR_LOCK(domain->dmar);
714 domain->iodom.flags |= IOMMU_DOMAIN_PGTBL_INITED;
715 DMAR_UNLOCK(domain->dmar);
720 domain_free_pgtbl(struct dmar_domain *domain)
725 obj = domain->pgtbl_obj;
727 KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
728 (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0,
729 ("lost pagetable object domain %p", domain));
732 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
733 domain->pgtbl_obj = NULL;
735 if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0) {
736 put_idmap_pgtbl(obj);
737 domain->iodom.flags &= ~IOMMU_DOMAIN_IDMAP;
741 /* Obliterate ref_counts */
742 VM_OBJECT_ASSERT_WLOCKED(obj);
743 for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m))
745 VM_OBJECT_WUNLOCK(obj);
746 vm_object_deallocate(obj);
749 static inline uint64_t
750 domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
754 dmar_write8(unit, iro + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT |
755 DMAR_IOTLB_DR | DMAR_IOTLB_DW | wt);
757 iotlbr = dmar_read8(unit, iro + DMAR_IOTLB_REG_OFF);
758 if ((iotlbr & DMAR_IOTLB_IVT) == 0)
766 domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
769 struct dmar_unit *unit;
775 KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
777 iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
779 if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
780 iotlbr = domain_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM |
781 DMAR_IOTLB_DID(domain->domain), iro);
782 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
783 DMAR_IOTLB_IAIG_INVLD,
784 ("dmar%d: invalidation failed %jx", unit->iommu.unit,
787 for (; size > 0; base += isize, size -= isize) {
788 am = calc_am(unit, base, size, &isize);
789 dmar_write8(unit, iro, base | am);
790 iotlbr = domain_wait_iotlb_flush(unit,
791 DMAR_IOTLB_IIRG_PAGE |
792 DMAR_IOTLB_DID(domain->domain), iro);
793 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
794 DMAR_IOTLB_IAIG_INVLD,
795 ("dmar%d: PSI invalidation failed "
796 "iotlbr 0x%jx base 0x%jx size 0x%jx am %d",
797 unit->iommu.unit, (uintmax_t)iotlbr,
798 (uintmax_t)base, (uintmax_t)size, am));
800 * Any non-page granularity covers whole guest
801 * address space for the domain.
803 if ((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
804 DMAR_IOTLB_IAIG_PAGE)
811 const struct iommu_domain_map_ops dmar_domain_map_ops = {
812 .map = domain_map_buf,
813 .unmap = domain_unmap_buf,