2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2013 The FreeBSD Foundation
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
36 #include <sys/interrupt.h>
37 #include <sys/kernel.h>
40 #include <sys/memdesc.h>
41 #include <sys/mutex.h>
43 #include <sys/rwlock.h>
45 #include <sys/sf_buf.h>
46 #include <sys/sysctl.h>
47 #include <sys/taskqueue.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_pager.h>
57 #include <vm/vm_map.h>
58 #include <dev/pci/pcireg.h>
59 #include <machine/atomic.h>
60 #include <machine/bus.h>
61 #include <machine/cpu.h>
62 #include <machine/md_var.h>
63 #include <machine/specialreg.h>
64 #include <x86/include/busdma_impl.h>
65 #include <dev/iommu/busdma_iommu.h>
66 #include <x86/iommu/intel_reg.h>
67 #include <x86/iommu/intel_dmar.h>
69 static int domain_unmap_buf_locked(struct dmar_domain *domain,
70 iommu_gaddr_t base, iommu_gaddr_t size, int flags);
73 * The cache of the identity mapping page tables for the DMARs. Using
74 * the cache saves significant amount of memory for page tables by
75 * reusing the page tables, since usually DMARs are identical and have
76 * the same capabilities. Still, cache records the information needed
77 * to match DMAR capabilities and page table format, to correctly
78 * handle different DMARs.
82 iommu_gaddr_t maxaddr; /* Page table covers the guest address
84 int pglvl; /* Total page table levels ignoring
86 int leaf; /* The last materialized page table
87 level, it is non-zero if superpages
89 vm_object_t pgtbl_obj; /* The page table pages */
90 LIST_ENTRY(idpgtbl) link;
93 static struct sx idpgtbl_lock;
94 SX_SYSINIT(idpgtbl, &idpgtbl_lock, "idpgtbl");
95 static LIST_HEAD(, idpgtbl) idpgtbls = LIST_HEAD_INITIALIZER(idpgtbls);
96 static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl",
97 "Intel DMAR Identity mappings cache elements");
100 * Build the next level of the page tables for the identity mapping.
101 * - lvl is the level to build;
102 * - idx is the index of the page table page in the pgtbl_obj, which is
103 * being allocated filled now;
104 * - addr is the starting address in the bus address space which is
105 * mapped by the page table page.
108 domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
114 iommu_gaddr_t f, pg_sz;
118 VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj);
119 if (addr >= tbl->maxaddr)
121 (void)dmar_pgalloc(tbl->pgtbl_obj, idx, IOMMU_PGF_OBJL |
122 IOMMU_PGF_WAITOK | IOMMU_PGF_ZERO);
123 base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */
124 pg_sz = pglvl_page_size(tbl->pglvl, lvl);
125 if (lvl != tbl->leaf) {
126 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz)
127 domain_idmap_nextlvl(tbl, lvl + 1, base + i, f);
129 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
130 pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, IOMMU_PGF_WAITOK, &sf);
131 if (lvl == tbl->leaf) {
132 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
133 if (f >= tbl->maxaddr)
135 pte[i].pte = (DMAR_PTE_ADDR_MASK & f) |
136 DMAR_PTE_R | DMAR_PTE_W;
139 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
140 if (f >= tbl->maxaddr)
142 m1 = dmar_pgalloc(tbl->pgtbl_obj, base + i,
144 KASSERT(m1 != NULL, ("lost page table page"));
145 pte[i].pte = (DMAR_PTE_ADDR_MASK &
146 VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W;
149 /* domain_get_idmap_pgtbl flushes CPU cache if needed. */
150 dmar_unmap_pgtbl(sf);
151 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
155 * Find a ready and compatible identity-mapping page table in the
156 * cache. If not found, populate the identity-mapping page table for
157 * the context, up to the maxaddr. The maxaddr byte is allowed to be
158 * not mapped, which is aligned with the definition of Maxmem as the
159 * highest usable physical address + 1. If superpages are used, the
160 * maxaddr is typically mapped.
163 domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr)
165 struct dmar_unit *unit;
171 leaf = 0; /* silence gcc */
174 * First, determine where to stop the paging structures.
176 for (i = 0; i < domain->pglvl; i++) {
177 if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) {
184 * Search the cache for a compatible page table. Qualified
185 * page table must map up to maxaddr, its level must be
186 * supported by the DMAR and leaf should be equal to the
187 * calculated value. The later restriction could be lifted
188 * but I believe it is currently impossible to have any
189 * deviations for existing hardware.
191 sx_slock(&idpgtbl_lock);
192 LIST_FOREACH(tbl, &idpgtbls, link) {
193 if (tbl->maxaddr >= maxaddr &&
194 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
196 res = tbl->pgtbl_obj;
197 vm_object_reference(res);
198 sx_sunlock(&idpgtbl_lock);
199 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
205 * Not found in cache, relock the cache into exclusive mode to
206 * be able to add element, and recheck cache again after the
209 sx_sunlock(&idpgtbl_lock);
210 sx_xlock(&idpgtbl_lock);
211 LIST_FOREACH(tbl, &idpgtbls, link) {
212 if (tbl->maxaddr >= maxaddr &&
213 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
215 res = tbl->pgtbl_obj;
216 vm_object_reference(res);
217 sx_xunlock(&idpgtbl_lock);
218 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
224 * Still not found, create new page table.
226 tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK);
227 tbl->pglvl = domain->pglvl;
229 tbl->maxaddr = maxaddr;
230 tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
231 IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL);
232 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
233 domain_idmap_nextlvl(tbl, 0, 0, 0);
234 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
235 LIST_INSERT_HEAD(&idpgtbls, tbl, link);
236 res = tbl->pgtbl_obj;
237 vm_object_reference(res);
238 sx_xunlock(&idpgtbl_lock);
242 * Table was found or created.
244 * If DMAR does not snoop paging structures accesses, flush
245 * CPU cache to memory. Note that dmar_unmap_pgtbl() coherent
246 * argument was possibly invalid at the time of the identity
247 * page table creation, since DMAR which was passed at the
248 * time of creation could be coherent, while current DMAR is
251 * If DMAR cannot look into the chipset write buffer, flush it
255 if (!DMAR_IS_COHERENT(unit)) {
256 VM_OBJECT_WLOCK(res);
257 for (m = vm_page_lookup(res, 0); m != NULL;
259 pmap_invalidate_cache_pages(&m, 1);
260 VM_OBJECT_WUNLOCK(res);
262 if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
264 dmar_flush_write_bufs(unit);
272 * Return a reference to the identity mapping page table to the cache.
275 put_idmap_pgtbl(vm_object_t obj)
277 struct idpgtbl *tbl, *tbl1;
280 sx_slock(&idpgtbl_lock);
281 KASSERT(obj->ref_count >= 2, ("lost cache reference"));
282 vm_object_deallocate(obj);
285 * Cache always owns one last reference on the page table object.
286 * If there is an additional reference, object must stay.
288 if (obj->ref_count > 1) {
289 sx_sunlock(&idpgtbl_lock);
294 * Cache reference is the last, remove cache element and free
295 * page table object, returning the page table pages to the
298 sx_sunlock(&idpgtbl_lock);
299 sx_xlock(&idpgtbl_lock);
300 LIST_FOREACH_SAFE(tbl, &idpgtbls, link, tbl1) {
301 rmobj = tbl->pgtbl_obj;
302 if (rmobj->ref_count == 1) {
303 LIST_REMOVE(tbl, link);
304 atomic_subtract_int(&dmar_tbl_pagecnt,
305 rmobj->resident_page_count);
306 vm_object_deallocate(rmobj);
307 free(tbl, M_DMAR_IDPGTBL);
310 sx_xunlock(&idpgtbl_lock);
314 * The core routines to map and unmap host pages at the given guest
315 * address. Support superpages.
319 * Index of the pte for the guest address base in the page table at
323 domain_pgtbl_pte_off(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
326 base >>= DMAR_PAGE_SHIFT + (domain->pglvl - lvl - 1) *
328 return (base & DMAR_PTEMASK);
332 * Returns the page index of the page table page in the page table
333 * object, which maps the given address base at the page table level
337 domain_pgtbl_get_pindex(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
339 vm_pindex_t idx, pidx;
342 KASSERT(lvl >= 0 && lvl < domain->pglvl,
343 ("wrong lvl %p %d", domain, lvl));
345 for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx) {
346 idx = domain_pgtbl_pte_off(domain, base, i) +
347 pidx * DMAR_NPTEPG + 1;
353 domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
354 int flags, vm_pindex_t *idxp, struct sf_buf **sf)
358 dmar_pte_t *pte, *ptep;
359 vm_pindex_t idx, idx1;
361 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
362 KASSERT((flags & IOMMU_PGF_OBJL) != 0, ("lost PGF_OBJL"));
364 idx = domain_pgtbl_get_pindex(domain, base, lvl);
365 if (*sf != NULL && idx == *idxp) {
366 pte = (dmar_pte_t *)sf_buf_kva(*sf);
369 dmar_unmap_pgtbl(*sf);
372 pte = dmar_map_pgtbl(domain->pgtbl_obj, idx, flags, sf);
375 ("lost root page table page %p", domain));
377 * Page table page does not exist, allocate
378 * it and create a pte in the preceeding page level
379 * to reference the allocated page table page.
381 m = dmar_pgalloc(domain->pgtbl_obj, idx, flags |
387 * Prevent potential free while pgtbl_obj is
388 * unlocked in the recursive call to
389 * domain_pgtbl_map_pte(), if other thread did
390 * pte write and clean while the lock is
396 ptep = domain_pgtbl_map_pte(domain, base, lvl - 1,
399 KASSERT(m->pindex != 0,
400 ("loosing root page %p", domain));
402 dmar_pgfree(domain->pgtbl_obj, m->pindex,
406 dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
408 dmar_flush_pte_to_ram(domain->dmar, ptep);
409 sf_buf_page(sfp)->ref_count += 1;
411 dmar_unmap_pgtbl(sfp);
412 /* Only executed once. */
416 pte += domain_pgtbl_pte_off(domain, base, lvl);
421 domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
422 iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
426 iommu_gaddr_t pg_sz, base1;
427 vm_pindex_t pi, c, idx, run_sz;
431 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
434 flags |= IOMMU_PGF_OBJL;
435 TD_PREP_PINNED_ASSERT;
437 for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz,
439 for (lvl = 0, c = 0, superpage = false;; lvl++) {
440 pg_sz = domain_page_size(domain, lvl);
441 run_sz = pg_sz >> DMAR_PAGE_SHIFT;
442 if (lvl == domain->pglvl - 1)
445 * Check if the current base suitable for the
446 * superpage mapping. First, verify the level.
448 if (!domain_is_sp_lvl(domain, lvl))
451 * Next, look at the size of the mapping and
452 * alignment of both guest and host addresses.
454 if (size < pg_sz || (base & (pg_sz - 1)) != 0 ||
455 (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0)
457 /* All passed, check host pages contiguouty. */
459 for (c = 1; c < run_sz; c++) {
460 if (VM_PAGE_TO_PHYS(ma[pi + c]) !=
461 VM_PAGE_TO_PHYS(ma[pi + c - 1]) +
471 KASSERT(size >= pg_sz,
472 ("mapping loop overflow %p %jx %jx %jx", domain,
473 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
474 KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl));
475 pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
477 KASSERT((flags & IOMMU_PGF_WAITOK) == 0,
478 ("failed waitable pte alloc %p", domain));
480 dmar_unmap_pgtbl(sf);
481 domain_unmap_buf_locked(domain, base1, base - base1,
486 dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
487 (superpage ? DMAR_PTE_SP : 0));
488 dmar_flush_pte_to_ram(domain->dmar, pte);
489 sf_buf_page(sf)->ref_count += 1;
492 dmar_unmap_pgtbl(sf);
498 domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
499 iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
501 struct dmar_domain *domain;
502 struct dmar_unit *unit;
506 pflags = ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
507 ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
508 ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
509 ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0);
511 domain = IODOM2DOM(iodom);
514 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
515 ("modifying idmap pagetable domain %p", domain));
516 KASSERT((base & DMAR_PAGE_MASK) == 0,
517 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
519 KASSERT((size & DMAR_PAGE_MASK) == 0,
520 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
522 KASSERT(size > 0, ("zero size %p %jx %jx", domain, (uintmax_t)base,
524 KASSERT(base < (1ULL << domain->agaw),
525 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
526 (uintmax_t)size, domain->agaw));
527 KASSERT(base + size < (1ULL << domain->agaw),
528 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
529 (uintmax_t)size, domain->agaw));
530 KASSERT(base + size > base,
531 ("size overflow %p %jx %jx", domain, (uintmax_t)base,
533 KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0,
534 ("neither read nor write %jx", (uintmax_t)pflags));
535 KASSERT((pflags & ~(DMAR_PTE_R | DMAR_PTE_W | DMAR_PTE_SNP |
537 ("invalid pte flags %jx", (uintmax_t)pflags));
538 KASSERT((pflags & DMAR_PTE_SNP) == 0 ||
539 (unit->hw_ecap & DMAR_ECAP_SC) != 0,
540 ("PTE_SNP for dmar without snoop control %p %jx",
541 domain, (uintmax_t)pflags));
542 KASSERT((pflags & DMAR_PTE_TM) == 0 ||
543 (unit->hw_ecap & DMAR_ECAP_DI) != 0,
544 ("PTE_TM for dmar without DIOTLB %p %jx",
545 domain, (uintmax_t)pflags));
546 KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags));
548 DMAR_DOMAIN_PGLOCK(domain);
549 error = domain_map_buf_locked(domain, base, size, ma, pflags, flags);
550 DMAR_DOMAIN_PGUNLOCK(domain);
554 if ((unit->hw_cap & DMAR_CAP_CM) != 0)
555 domain_flush_iotlb_sync(domain, base, size);
556 else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
557 /* See 11.1 Write Buffer Flushing. */
559 dmar_flush_write_bufs(unit);
565 static void domain_unmap_clear_pte(struct dmar_domain *domain,
566 iommu_gaddr_t base, int lvl, int flags, dmar_pte_t *pte,
567 struct sf_buf **sf, bool free_fs);
570 domain_free_pgtbl_pde(struct dmar_domain *domain, iommu_gaddr_t base,
578 pde = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
579 domain_unmap_clear_pte(domain, base, lvl, flags, pde, &sf, true);
583 domain_unmap_clear_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
584 int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf)
588 dmar_pte_clear(&pte->pte);
589 dmar_flush_pte_to_ram(domain->dmar, pte);
590 m = sf_buf_page(*sf);
592 dmar_unmap_pgtbl(*sf);
596 if (m->ref_count != 0)
599 ("lost reference (lvl) on root pg domain %p base %jx lvl %d",
600 domain, (uintmax_t)base, lvl));
601 KASSERT(m->pindex != 0,
602 ("lost reference (idx) on root pg domain %p base %jx lvl %d",
603 domain, (uintmax_t)base, lvl));
604 dmar_pgfree(domain->pgtbl_obj, m->pindex, flags);
605 domain_free_pgtbl_pde(domain, base, lvl - 1, flags);
609 * Assumes that the unmap is never partial.
612 domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
613 iommu_gaddr_t size, int flags)
621 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
625 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
626 ("modifying idmap pagetable domain %p", domain));
627 KASSERT((base & DMAR_PAGE_MASK) == 0,
628 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
630 KASSERT((size & DMAR_PAGE_MASK) == 0,
631 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
633 KASSERT(base < (1ULL << domain->agaw),
634 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
635 (uintmax_t)size, domain->agaw));
636 KASSERT(base + size < (1ULL << domain->agaw),
637 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
638 (uintmax_t)size, domain->agaw));
639 KASSERT(base + size > base,
640 ("size overflow %p %jx %jx", domain, (uintmax_t)base,
642 KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags));
644 pg_sz = 0; /* silence gcc */
645 flags |= IOMMU_PGF_OBJL;
646 TD_PREP_PINNED_ASSERT;
648 for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) {
649 for (lvl = 0; lvl < domain->pglvl; lvl++) {
650 if (lvl != domain->pglvl - 1 &&
651 !domain_is_sp_lvl(domain, lvl))
653 pg_sz = domain_page_size(domain, lvl);
656 pte = domain_pgtbl_map_pte(domain, base, lvl, flags,
659 ("sleeping or page missed %p %jx %d 0x%x",
660 domain, (uintmax_t)base, lvl, flags));
661 if ((pte->pte & DMAR_PTE_SP) != 0 ||
662 lvl == domain->pglvl - 1) {
663 domain_unmap_clear_pte(domain, base, lvl,
664 flags, pte, &sf, false);
668 KASSERT(size >= pg_sz,
669 ("unmapping loop overflow %p %jx %jx %jx", domain,
670 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
673 dmar_unmap_pgtbl(sf);
675 * See 11.1 Write Buffer Flushing for an explanation why RWBF
676 * can be ignored there.
684 domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
685 iommu_gaddr_t size, int flags)
687 struct dmar_domain *domain;
690 domain = IODOM2DOM(iodom);
692 DMAR_DOMAIN_PGLOCK(domain);
693 error = domain_unmap_buf_locked(domain, base, size, flags);
694 DMAR_DOMAIN_PGUNLOCK(domain);
699 domain_alloc_pgtbl(struct dmar_domain *domain)
703 KASSERT(domain->pgtbl_obj == NULL,
704 ("already initialized %p", domain));
706 domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
707 IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL);
708 DMAR_DOMAIN_PGLOCK(domain);
709 m = dmar_pgalloc(domain->pgtbl_obj, 0, IOMMU_PGF_WAITOK |
710 IOMMU_PGF_ZERO | IOMMU_PGF_OBJL);
711 /* No implicit free of the top level page table page. */
713 DMAR_DOMAIN_PGUNLOCK(domain);
714 DMAR_LOCK(domain->dmar);
715 domain->iodom.flags |= IOMMU_DOMAIN_PGTBL_INITED;
716 DMAR_UNLOCK(domain->dmar);
721 domain_free_pgtbl(struct dmar_domain *domain)
726 obj = domain->pgtbl_obj;
728 KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
729 (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0,
730 ("lost pagetable object domain %p", domain));
733 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
734 domain->pgtbl_obj = NULL;
736 if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0) {
737 put_idmap_pgtbl(obj);
738 domain->iodom.flags &= ~IOMMU_DOMAIN_IDMAP;
742 /* Obliterate ref_counts */
743 VM_OBJECT_ASSERT_WLOCKED(obj);
744 for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m))
746 VM_OBJECT_WUNLOCK(obj);
747 vm_object_deallocate(obj);
750 static inline uint64_t
751 domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
755 dmar_write8(unit, iro + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT |
756 DMAR_IOTLB_DR | DMAR_IOTLB_DW | wt);
758 iotlbr = dmar_read8(unit, iro + DMAR_IOTLB_REG_OFF);
759 if ((iotlbr & DMAR_IOTLB_IVT) == 0)
767 domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
770 struct dmar_unit *unit;
776 KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
778 iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
780 if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
781 iotlbr = domain_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM |
782 DMAR_IOTLB_DID(domain->domain), iro);
783 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
784 DMAR_IOTLB_IAIG_INVLD,
785 ("dmar%d: invalidation failed %jx", unit->iommu.unit,
788 for (; size > 0; base += isize, size -= isize) {
789 am = calc_am(unit, base, size, &isize);
790 dmar_write8(unit, iro, base | am);
791 iotlbr = domain_wait_iotlb_flush(unit,
792 DMAR_IOTLB_IIRG_PAGE |
793 DMAR_IOTLB_DID(domain->domain), iro);
794 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
795 DMAR_IOTLB_IAIG_INVLD,
796 ("dmar%d: PSI invalidation failed "
797 "iotlbr 0x%jx base 0x%jx size 0x%jx am %d",
798 unit->iommu.unit, (uintmax_t)iotlbr,
799 (uintmax_t)base, (uintmax_t)size, am));
801 * Any non-page granularity covers whole guest
802 * address space for the domain.
804 if ((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
805 DMAR_IOTLB_IAIG_PAGE)
812 const struct iommu_domain_map_ops dmar_domain_map_ops = {
813 .map = domain_map_buf,
814 .unmap = domain_unmap_buf,