2 * Copyright (c) 2013 The FreeBSD Foundation
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
41 #include <sys/memdesc.h>
42 #include <sys/mutex.h>
44 #include <sys/rwlock.h>
46 #include <sys/sf_buf.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_pager.h>
57 #include <vm/vm_map.h>
58 #include <machine/atomic.h>
59 #include <machine/bus.h>
60 #include <machine/cpu.h>
61 #include <machine/md_var.h>
62 #include <machine/specialreg.h>
63 #include <x86/include/busdma_impl.h>
64 #include <x86/iommu/intel_reg.h>
65 #include <x86/iommu/busdma_dmar.h>
66 #include <x86/iommu/intel_dmar.h>
68 static int ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
69 dmar_gaddr_t size, int flags);
72 * The cache of the identity mapping page tables for the DMARs. Using
73 * the cache saves significant amount of memory for page tables by
74 * reusing the page tables, since usually DMARs are identical and have
75 * the same capabilities. Still, cache records the information needed
76 * to match DMAR capabilities and page table format, to correctly
77 * handle different DMARs.
81 dmar_gaddr_t maxaddr; /* Page table covers the guest address
83 int pglvl; /* Total page table levels ignoring
85 int leaf; /* The last materialized page table
86 level, it is non-zero if superpages
88 vm_object_t pgtbl_obj; /* The page table pages */
89 LIST_ENTRY(idpgtbl) link;
92 static struct sx idpgtbl_lock;
93 SX_SYSINIT(idpgtbl, &idpgtbl_lock, "idpgtbl");
94 static LIST_HEAD(, idpgtbl) idpgtbls = LIST_HEAD_INITIALIZER(idpgtbls);
95 static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl",
96 "Intel DMAR Identity mappings cache elements");
99 * Build the next level of the page tables for the identity mapping.
100 * - lvl is the level to build;
101 * - idx is the index of the page table page in the pgtbl_obj, which is
102 * being allocated filled now;
103 * - addr is the starting address in the bus address space which is
104 * mapped by the page table page.
107 ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
113 dmar_gaddr_t f, pg_sz;
117 VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj);
118 if (addr >= tbl->maxaddr)
120 (void)dmar_pgalloc(tbl->pgtbl_obj, idx, DMAR_PGF_OBJL | DMAR_PGF_WAITOK |
122 base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */
123 pg_sz = pglvl_page_size(tbl->pglvl, lvl);
124 if (lvl != tbl->leaf) {
125 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz)
126 ctx_idmap_nextlvl(tbl, lvl + 1, base + i, f);
128 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
129 pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, DMAR_PGF_WAITOK, &sf);
130 if (lvl == tbl->leaf) {
131 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
132 if (f >= tbl->maxaddr)
134 pte[i].pte = (DMAR_PTE_ADDR_MASK & f) |
135 DMAR_PTE_R | DMAR_PTE_W;
138 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
139 if (f >= tbl->maxaddr)
141 m1 = dmar_pgalloc(tbl->pgtbl_obj, base + i,
143 KASSERT(m1 != NULL, ("lost page table page"));
144 pte[i].pte = (DMAR_PTE_ADDR_MASK &
145 VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W;
148 /* ctx_get_idmap_pgtbl flushes CPU cache if needed. */
149 dmar_unmap_pgtbl(sf);
150 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
154 * Find a ready and compatible identity-mapping page table in the
155 * cache. If not found, populate the identity-mapping page table for
156 * the context, up to the maxaddr. The maxaddr byte is allowed to be
157 * not mapped, which is aligned with the definition of Maxmem as the
158 * highest usable physical address + 1. If superpages are used, the
159 * maxaddr is typically mapped.
162 ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr)
164 struct dmar_unit *unit;
170 leaf = 0; /* silence gcc */
173 * First, determine where to stop the paging structures.
175 for (i = 0; i < ctx->pglvl; i++) {
176 if (i == ctx->pglvl - 1 || ctx_is_sp_lvl(ctx, i)) {
183 * Search the cache for a compatible page table. Qualified
184 * page table must map up to maxaddr, its level must be
185 * supported by the DMAR and leaf should be equal to the
186 * calculated value. The later restriction could be lifted
187 * but I believe it is currently impossible to have any
188 * deviations for existing hardware.
190 sx_slock(&idpgtbl_lock);
191 LIST_FOREACH(tbl, &idpgtbls, link) {
192 if (tbl->maxaddr >= maxaddr &&
193 dmar_pglvl_supported(ctx->dmar, tbl->pglvl) &&
195 res = tbl->pgtbl_obj;
196 vm_object_reference(res);
197 sx_sunlock(&idpgtbl_lock);
198 ctx->pglvl = tbl->pglvl; /* XXXKIB ? */
204 * Not found in cache, relock the cache into exclusive mode to
205 * be able to add element, and recheck cache again after the
208 sx_sunlock(&idpgtbl_lock);
209 sx_xlock(&idpgtbl_lock);
210 LIST_FOREACH(tbl, &idpgtbls, link) {
211 if (tbl->maxaddr >= maxaddr &&
212 dmar_pglvl_supported(ctx->dmar, tbl->pglvl) &&
214 res = tbl->pgtbl_obj;
215 vm_object_reference(res);
216 sx_xunlock(&idpgtbl_lock);
217 ctx->pglvl = tbl->pglvl; /* XXXKIB ? */
223 * Still not found, create new page table.
225 tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK);
226 tbl->pglvl = ctx->pglvl;
228 tbl->maxaddr = maxaddr;
229 tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
230 IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL);
231 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
232 ctx_idmap_nextlvl(tbl, 0, 0, 0);
233 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
234 LIST_INSERT_HEAD(&idpgtbls, tbl, link);
235 res = tbl->pgtbl_obj;
236 vm_object_reference(res);
237 sx_xunlock(&idpgtbl_lock);
241 * Table was found or created.
243 * If DMAR does not snoop paging structures accesses, flush
244 * CPU cache to memory. Note that dmar_unmap_pgtbl() coherent
245 * argument was possibly invalid at the time of the identity
246 * page table creation, since DMAR which was passed at the
247 * time of creation could be coherent, while current DMAR is
250 * If DMAR cannot look into the chipset write buffer, flush it
254 if (!DMAR_IS_COHERENT(unit)) {
255 VM_OBJECT_WLOCK(res);
256 for (m = vm_page_lookup(res, 0); m != NULL;
258 pmap_invalidate_cache_pages(&m, 1);
259 VM_OBJECT_WUNLOCK(res);
261 if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
263 dmar_flush_write_bufs(unit);
271 * Return a reference to the identity mapping page table to the cache.
274 put_idmap_pgtbl(vm_object_t obj)
276 struct idpgtbl *tbl, *tbl1;
279 sx_slock(&idpgtbl_lock);
280 KASSERT(obj->ref_count >= 2, ("lost cache reference"));
281 vm_object_deallocate(obj);
284 * Cache always owns one last reference on the page table object.
285 * If there is an additional reference, object must stay.
287 if (obj->ref_count > 1) {
288 sx_sunlock(&idpgtbl_lock);
293 * Cache reference is the last, remove cache element and free
294 * page table object, returning the page table pages to the
297 sx_sunlock(&idpgtbl_lock);
298 sx_xlock(&idpgtbl_lock);
299 LIST_FOREACH_SAFE(tbl, &idpgtbls, link, tbl1) {
300 rmobj = tbl->pgtbl_obj;
301 if (rmobj->ref_count == 1) {
302 LIST_REMOVE(tbl, link);
303 atomic_subtract_int(&dmar_tbl_pagecnt,
304 rmobj->resident_page_count);
305 vm_object_deallocate(rmobj);
306 free(tbl, M_DMAR_IDPGTBL);
309 sx_xunlock(&idpgtbl_lock);
313 * The core routines to map and unmap host pages at the given guest
314 * address. Support superpages.
318 * Index of the pte for the guest address base in the page table at
322 ctx_pgtbl_pte_off(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl)
325 base >>= DMAR_PAGE_SHIFT + (ctx->pglvl - lvl - 1) * DMAR_NPTEPGSHIFT;
326 return (base & DMAR_PTEMASK);
330 * Returns the page index of the page table page in the page table
331 * object, which maps the given address base at the page table level
335 ctx_pgtbl_get_pindex(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl)
337 vm_pindex_t idx, pidx;
340 KASSERT(lvl >= 0 && lvl < ctx->pglvl, ("wrong lvl %p %d", ctx, lvl));
342 for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx)
343 idx = ctx_pgtbl_pte_off(ctx, base, i) + pidx * DMAR_NPTEPG + 1;
348 ctx_pgtbl_map_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags,
349 vm_pindex_t *idxp, struct sf_buf **sf)
353 dmar_pte_t *pte, *ptep;
354 vm_pindex_t idx, idx1;
356 DMAR_CTX_ASSERT_PGLOCKED(ctx);
357 KASSERT((flags & DMAR_PGF_OBJL) != 0, ("lost PGF_OBJL"));
359 idx = ctx_pgtbl_get_pindex(ctx, base, lvl);
360 if (*sf != NULL && idx == *idxp) {
361 pte = (dmar_pte_t *)sf_buf_kva(*sf);
364 dmar_unmap_pgtbl(*sf);
367 pte = dmar_map_pgtbl(ctx->pgtbl_obj, idx, flags, sf);
369 KASSERT(lvl > 0, ("lost root page table page %p", ctx));
371 * Page table page does not exists, allocate
372 * it and create pte in the up level.
374 m = dmar_pgalloc(ctx->pgtbl_obj, idx, flags |
380 * Prevent potential free while pgtbl_obj is
381 * unlocked in the recursive call to
382 * ctx_pgtbl_map_pte(), if other thread did
383 * pte write and clean while the lock if
389 ptep = ctx_pgtbl_map_pte(ctx, base, lvl - 1, flags,
392 KASSERT(m->pindex != 0,
393 ("loosing root page %p", ctx));
395 dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags);
398 dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
400 dmar_flush_pte_to_ram(ctx->dmar, ptep);
401 sf_buf_page(sfp)->wire_count += 1;
403 dmar_unmap_pgtbl(sfp);
404 /* Only executed once. */
408 pte += ctx_pgtbl_pte_off(ctx, base, lvl);
413 ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
414 vm_page_t *ma, uint64_t pflags, int flags)
418 dmar_gaddr_t pg_sz, base1, size1;
419 vm_pindex_t pi, c, idx, run_sz;
423 DMAR_CTX_ASSERT_PGLOCKED(ctx);
427 flags |= DMAR_PGF_OBJL;
428 TD_PREP_PINNED_ASSERT;
430 for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz,
432 for (lvl = 0, c = 0, superpage = false;; lvl++) {
433 pg_sz = ctx_page_size(ctx, lvl);
434 run_sz = pg_sz >> DMAR_PAGE_SHIFT;
435 if (lvl == ctx->pglvl - 1)
438 * Check if the current base suitable for the
439 * superpage mapping. First, verify the level.
441 if (!ctx_is_sp_lvl(ctx, lvl))
444 * Next, look at the size of the mapping and
445 * alignment of both guest and host addresses.
447 if (size < pg_sz || (base & (pg_sz - 1)) != 0 ||
448 (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0)
450 /* All passed, check host pages contiguouty. */
452 for (c = 1; c < run_sz; c++) {
453 if (VM_PAGE_TO_PHYS(ma[pi + c]) !=
454 VM_PAGE_TO_PHYS(ma[pi + c - 1]) +
464 KASSERT(size >= pg_sz,
465 ("mapping loop overflow %p %jx %jx %jx", ctx,
466 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
467 KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl));
468 pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf);
470 KASSERT((flags & DMAR_PGF_WAITOK) == 0,
471 ("failed waitable pte alloc %p", ctx));
473 dmar_unmap_pgtbl(sf);
474 ctx_unmap_buf_locked(ctx, base1, base - base1, flags);
478 dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
479 (superpage ? DMAR_PTE_SP : 0));
480 dmar_flush_pte_to_ram(ctx->dmar, pte);
481 sf_buf_page(sf)->wire_count += 1;
484 dmar_unmap_pgtbl(sf);
490 ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
491 vm_page_t *ma, uint64_t pflags, int flags)
493 struct dmar_unit *unit;
498 KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0,
499 ("modifying idmap pagetable ctx %p", ctx));
500 KASSERT((base & DMAR_PAGE_MASK) == 0,
501 ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base,
503 KASSERT((size & DMAR_PAGE_MASK) == 0,
504 ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base,
506 KASSERT(size > 0, ("zero size %p %jx %jx", ctx, (uintmax_t)base,
508 KASSERT(base < (1ULL << ctx->agaw),
509 ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
510 (uintmax_t)size, ctx->agaw));
511 KASSERT(base + size < (1ULL << ctx->agaw),
512 ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
513 (uintmax_t)size, ctx->agaw));
514 KASSERT(base + size > base,
515 ("size overflow %p %jx %jx", ctx, (uintmax_t)base,
517 KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0,
518 ("neither read nor write %jx", (uintmax_t)pflags));
519 KASSERT((pflags & ~(DMAR_PTE_R | DMAR_PTE_W | DMAR_PTE_SNP |
521 ("invalid pte flags %jx", (uintmax_t)pflags));
522 KASSERT((pflags & DMAR_PTE_SNP) == 0 ||
523 (unit->hw_ecap & DMAR_ECAP_SC) != 0,
524 ("PTE_SNP for dmar without snoop control %p %jx",
525 ctx, (uintmax_t)pflags));
526 KASSERT((pflags & DMAR_PTE_TM) == 0 ||
527 (unit->hw_ecap & DMAR_ECAP_DI) != 0,
528 ("PTE_TM for dmar without DIOTLB %p %jx",
529 ctx, (uintmax_t)pflags));
530 KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
532 DMAR_CTX_PGLOCK(ctx);
533 error = ctx_map_buf_locked(ctx, base, size, ma, pflags, flags);
534 DMAR_CTX_PGUNLOCK(ctx);
538 if ((unit->hw_cap & DMAR_CAP_CM) != 0)
539 ctx_flush_iotlb_sync(ctx, base, size);
540 else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
541 /* See 11.1 Write Buffer Flushing. */
543 dmar_flush_write_bufs(unit);
549 static void ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base,
550 int lvl, int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_fs);
553 ctx_free_pgtbl_pde(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags)
560 pde = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf);
561 ctx_unmap_clear_pte(ctx, base, lvl, flags, pde, &sf, true);
565 ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl,
566 int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf)
570 dmar_pte_clear(&pte->pte);
571 dmar_flush_pte_to_ram(ctx->dmar, pte);
572 m = sf_buf_page(*sf);
574 dmar_unmap_pgtbl(*sf);
578 if (m->wire_count != 0)
581 ("lost reference (lvl) on root pg ctx %p base %jx lvl %d",
582 ctx, (uintmax_t)base, lvl));
583 KASSERT(m->pindex != 0,
584 ("lost reference (idx) on root pg ctx %p base %jx lvl %d",
585 ctx, (uintmax_t)base, lvl));
586 dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags);
587 ctx_free_pgtbl_pde(ctx, base, lvl - 1, flags);
591 * Assumes that the unmap is never partial.
594 ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
595 dmar_gaddr_t size, int flags)
603 DMAR_CTX_ASSERT_PGLOCKED(ctx);
607 KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0,
608 ("modifying idmap pagetable ctx %p", ctx));
609 KASSERT((base & DMAR_PAGE_MASK) == 0,
610 ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base,
612 KASSERT((size & DMAR_PAGE_MASK) == 0,
613 ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base,
615 KASSERT(base < (1ULL << ctx->agaw),
616 ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
617 (uintmax_t)size, ctx->agaw));
618 KASSERT(base + size < (1ULL << ctx->agaw),
619 ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
620 (uintmax_t)size, ctx->agaw));
621 KASSERT(base + size > base,
622 ("size overflow %p %jx %jx", ctx, (uintmax_t)base,
624 KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
626 pg_sz = 0; /* silence gcc */
627 flags |= DMAR_PGF_OBJL;
628 TD_PREP_PINNED_ASSERT;
630 for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) {
631 for (lvl = 0; lvl < ctx->pglvl; lvl++) {
632 if (lvl != ctx->pglvl - 1 && !ctx_is_sp_lvl(ctx, lvl))
634 pg_sz = ctx_page_size(ctx, lvl);
637 pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags,
640 ("sleeping or page missed %p %jx %d 0x%x",
641 ctx, (uintmax_t)base, lvl, flags));
642 if ((pte->pte & DMAR_PTE_SP) != 0 ||
643 lvl == ctx->pglvl - 1) {
644 ctx_unmap_clear_pte(ctx, base, lvl, flags,
649 KASSERT(size >= pg_sz,
650 ("unmapping loop overflow %p %jx %jx %jx", ctx,
651 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
654 dmar_unmap_pgtbl(sf);
656 * See 11.1 Write Buffer Flushing for an explanation why RWBF
657 * can be ignored there.
665 ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
670 DMAR_CTX_PGLOCK(ctx);
671 error = ctx_unmap_buf_locked(ctx, base, size, flags);
672 DMAR_CTX_PGUNLOCK(ctx);
677 ctx_alloc_pgtbl(struct dmar_ctx *ctx)
681 KASSERT(ctx->pgtbl_obj == NULL, ("already initialized %p", ctx));
683 ctx->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
684 IDX_TO_OFF(pglvl_max_pages(ctx->pglvl)), 0, 0, NULL);
685 DMAR_CTX_PGLOCK(ctx);
686 m = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_WAITOK |
687 DMAR_PGF_ZERO | DMAR_PGF_OBJL);
688 /* No implicit free of the top level page table page. */
690 DMAR_CTX_PGUNLOCK(ctx);
695 ctx_free_pgtbl(struct dmar_ctx *ctx)
700 obj = ctx->pgtbl_obj;
702 KASSERT((ctx->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
703 (ctx->flags & DMAR_CTX_IDMAP) != 0,
704 ("lost pagetable object ctx %p", ctx));
707 DMAR_CTX_ASSERT_PGLOCKED(ctx);
708 ctx->pgtbl_obj = NULL;
710 if ((ctx->flags & DMAR_CTX_IDMAP) != 0) {
711 put_idmap_pgtbl(obj);
712 ctx->flags &= ~DMAR_CTX_IDMAP;
716 /* Obliterate wire_counts */
717 VM_OBJECT_ASSERT_WLOCKED(obj);
718 for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m))
720 VM_OBJECT_WUNLOCK(obj);
721 vm_object_deallocate(obj);
724 static inline uint64_t
725 ctx_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
729 dmar_write8(unit, iro + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT |
730 DMAR_IOTLB_DR | DMAR_IOTLB_DW | wt);
732 iotlbr = dmar_read8(unit, iro + DMAR_IOTLB_REG_OFF);
733 if ((iotlbr & DMAR_IOTLB_IVT) == 0)
741 ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size)
743 struct dmar_unit *unit;
749 KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
751 iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
753 if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
754 iotlbr = ctx_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM |
755 DMAR_IOTLB_DID(ctx->domain), iro);
756 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
757 DMAR_IOTLB_IAIG_INVLD,
758 ("dmar%d: invalidation failed %jx", unit->unit,
761 for (; size > 0; base += isize, size -= isize) {
762 am = calc_am(unit, base, size, &isize);
763 dmar_write8(unit, iro, base | am);
764 iotlbr = ctx_wait_iotlb_flush(unit,
765 DMAR_IOTLB_IIRG_PAGE | DMAR_IOTLB_DID(ctx->domain),
767 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
768 DMAR_IOTLB_IAIG_INVLD,
769 ("dmar%d: PSI invalidation failed "
770 "iotlbr 0x%jx base 0x%jx size 0x%jx am %d",
771 unit->unit, (uintmax_t)iotlbr,
772 (uintmax_t)base, (uintmax_t)size, am));
774 * Any non-page granularity covers whole guest
775 * address space for the domain.
777 if ((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
778 DMAR_IOTLB_IAIG_PAGE)