2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 The FreeBSD Foundation
7 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
8 * under sponsorship from the FreeBSD Foundation.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
39 #include <sys/interrupt.h>
40 #include <sys/kernel.h>
42 #include <sys/limits.h>
44 #include <sys/memdesc.h>
45 #include <sys/mutex.h>
47 #include <sys/rwlock.h>
49 #include <sys/sysctl.h>
50 #include <sys/taskqueue.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pager.h>
60 #include <vm/vm_map.h>
61 #include <machine/atomic.h>
62 #include <machine/bus.h>
63 #include <machine/md_var.h>
64 #include <machine/specialreg.h>
65 #include <contrib/dev/acpica/include/acpi.h>
66 #include <contrib/dev/acpica/include/accommon.h>
67 #include <x86/include/busdma_impl.h>
68 #include <x86/iommu/intel_reg.h>
69 #include <x86/iommu/busdma_dmar.h>
70 #include <x86/iommu/intel_dmar.h>
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
74 static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context");
75 static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain");
77 static void dmar_domain_unload_task(void *arg, int pending);
78 static void dmar_unref_domain_locked(struct dmar_unit *dmar,
79 struct dmar_domain *domain);
80 static void dmar_domain_destroy(struct dmar_domain *domain);
83 dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
86 dmar_root_entry_t *re;
90 * Allocated context page must be linked.
92 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC);
97 * Page not present, allocate and link. Note that other
98 * thread might execute this sequence in parallel. This
99 * should be safe, because the context entries written by both
102 TD_PREP_PINNED_ASSERT;
103 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO |
105 re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf);
107 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK &
108 VM_PAGE_TO_PHYS(ctxm)));
109 dmar_flush_root_to_ram(dmar, re);
110 dmar_unmap_pgtbl(sf);
114 static dmar_ctx_entry_t *
115 dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
117 dmar_ctx_entry_t *ctxp;
119 ctxp = dmar_map_pgtbl(ctx->domain->dmar->ctx_obj, 1 +
120 PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp);
121 ctxp += ctx->rid & 0xff;
126 ctx_tag_init(struct dmar_ctx *ctx, device_t dev)
130 maxaddr = MIN(ctx->domain->end, BUS_SPACE_MAXADDR);
131 ctx->ctx_tag.common.ref_count = 1; /* Prevent free */
132 ctx->ctx_tag.common.impl = &bus_dma_dmar_impl;
133 ctx->ctx_tag.common.boundary = 0;
134 ctx->ctx_tag.common.lowaddr = maxaddr;
135 ctx->ctx_tag.common.highaddr = maxaddr;
136 ctx->ctx_tag.common.maxsize = maxaddr;
137 ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED;
138 ctx->ctx_tag.common.maxsegsz = maxaddr;
139 ctx->ctx_tag.ctx = ctx;
140 ctx->ctx_tag.owner = dev;
144 ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move)
146 struct dmar_unit *unit;
147 struct dmar_domain *domain;
150 domain = ctx->domain;
152 KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0),
153 ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx",
154 unit->unit, pci_get_bus(ctx->ctx_tag.owner),
155 pci_get_slot(ctx->ctx_tag.owner),
156 pci_get_function(ctx->ctx_tag.owner),
157 ctxp->ctx1, ctxp->ctx2));
159 * For update due to move, the store is not atomic. It is
160 * possible that DMAR read upper doubleword, while low
161 * doubleword is not yet updated. The domain id is stored in
162 * the upper doubleword, while the table pointer in the lower.
164 * There is no good solution, for the same reason it is wrong
165 * to clear P bit in the ctx entry for update.
167 dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) |
169 if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0 &&
170 (unit->hw_ecap & DMAR_ECAP_PT) != 0) {
171 KASSERT(domain->pgtbl_obj == NULL,
172 ("ctx %p non-null pgtbl_obj", ctx));
173 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P);
175 ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_NOALLOC);
176 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR |
177 (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) |
180 dmar_flush_ctx_to_ram(unit, ctxp);
184 dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force)
189 * If dmar declares Caching Mode as Set, follow 11.5 "Caching
190 * Mode Consideration" and do the (global) invalidation of the
191 * negative TLB entries.
193 if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force)
195 if (dmar->qi_enabled) {
196 dmar_qi_invalidate_ctx_glob_locked(dmar);
197 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)
198 dmar_qi_invalidate_iotlb_glob_locked(dmar);
201 error = dmar_inv_ctx_glob(dmar);
202 if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force))
203 error = dmar_inv_iotlb_glob(dmar);
208 domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
209 int slot, int func, int dev_domain, int dev_busno,
210 const void *dev_path, int dev_path_len)
212 struct dmar_map_entries_tailq rmrr_entries;
213 struct dmar_map_entry *entry, *entry1;
215 dmar_gaddr_t start, end;
220 TAILQ_INIT(&rmrr_entries);
221 dmar_dev_parse_rmrr(domain, dev_domain, dev_busno, dev_path,
222 dev_path_len, &rmrr_entries);
223 TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) {
225 * VT-d specification requires that the start of an
226 * RMRR entry is 4k-aligned. Buggy BIOSes put
227 * anything into the start and end fields. Truncate
228 * and round as neccesary.
230 * We also allow the overlapping RMRR entries, see
231 * dmar_gas_alloc_region().
233 start = entry->start;
236 printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n",
237 domain->dmar->unit, bus, slot, func,
238 (uintmax_t)start, (uintmax_t)end);
239 entry->start = trunc_page(start);
240 entry->end = round_page(end);
241 if (entry->start == entry->end) {
242 /* Workaround for some AMI (?) BIOSes */
245 device_printf(dev, "");
246 printf("pci%d:%d:%d ", bus, slot, func);
247 printf("BIOS bug: dmar%d RMRR "
248 "region (%jx, %jx) corrected\n",
249 domain->dmar->unit, start, end);
251 entry->end += DMAR_PAGE_SIZE * 0x20;
253 size = OFF_TO_IDX(entry->end - entry->start);
254 ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK);
255 for (i = 0; i < size; i++) {
256 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
259 error1 = dmar_gas_map_region(domain, entry,
260 DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE,
261 DMAR_GM_CANWAIT, ma);
263 * Non-failed RMRR entries are owned by context rb
264 * tree. Get rid of the failed entry, but do not stop
265 * the loop. Rest of the parsed RMRR entries are
266 * loaded and removed on the context destruction.
268 if (error1 == 0 && entry->end != entry->start) {
269 DMAR_LOCK(domain->dmar);
270 domain->refs++; /* XXXKIB prevent free */
271 domain->flags |= DMAR_DOMAIN_RMRR;
272 DMAR_UNLOCK(domain->dmar);
276 device_printf(dev, "");
277 printf("pci%d:%d:%d ", bus, slot, func);
279 "dmar%d failed to map RMRR region (%jx, %jx) %d\n",
280 domain->dmar->unit, start, end,
284 TAILQ_REMOVE(&rmrr_entries, entry, unroll_link);
285 dmar_gas_free_entry(domain, entry);
287 for (i = 0; i < size; i++)
288 vm_page_putfake(ma[i]);
294 static struct dmar_domain *
295 dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
297 struct dmar_domain *domain;
300 id = alloc_unr(dmar->domids);
303 domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO);
305 LIST_INIT(&domain->contexts);
306 RB_INIT(&domain->rb_root);
307 TAILQ_INIT(&domain->unload_entries);
308 TASK_INIT(&domain->unload_task, 0, dmar_domain_unload_task, domain);
309 mtx_init(&domain->lock, "dmardom", NULL, MTX_DEF);
313 * For now, use the maximal usable physical address of the
314 * installed memory to calculate the mgaw on id_mapped domain.
315 * It is useful for the identity mapping, and less so for the
316 * virtualized bus address space.
318 domain->end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR;
319 mgaw = dmar_maxaddr2mgaw(dmar, domain->end, !id_mapped);
320 error = domain_set_agaw(domain, mgaw);
324 /* Use all supported address space for remapping. */
325 domain->end = 1ULL << (domain->agaw - 1);
327 dmar_gas_init_domain(domain);
330 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) {
331 domain->pgtbl_obj = domain_get_idmap_pgtbl(domain,
334 domain->flags |= DMAR_DOMAIN_IDMAP;
336 error = domain_alloc_pgtbl(domain);
339 /* Disable local apic region access */
340 error = dmar_gas_reserve_region(domain, 0xfee00000,
348 dmar_domain_destroy(domain);
352 static struct dmar_ctx *
353 dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid)
355 struct dmar_ctx *ctx;
357 ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO);
358 ctx->domain = domain;
365 dmar_ctx_link(struct dmar_ctx *ctx)
367 struct dmar_domain *domain;
369 domain = ctx->domain;
370 DMAR_ASSERT_LOCKED(domain->dmar);
371 KASSERT(domain->refs >= domain->ctx_cnt,
372 ("dom %p ref underflow %d %d", domain, domain->refs,
376 LIST_INSERT_HEAD(&domain->contexts, ctx, link);
380 dmar_ctx_unlink(struct dmar_ctx *ctx)
382 struct dmar_domain *domain;
384 domain = ctx->domain;
385 DMAR_ASSERT_LOCKED(domain->dmar);
386 KASSERT(domain->refs > 0,
387 ("domain %p ctx dtr refs %d", domain, domain->refs));
388 KASSERT(domain->ctx_cnt >= domain->refs,
389 ("domain %p ctx dtr refs %d ctx_cnt %d", domain,
390 domain->refs, domain->ctx_cnt));
393 LIST_REMOVE(ctx, link);
397 dmar_domain_destroy(struct dmar_domain *domain)
400 KASSERT(TAILQ_EMPTY(&domain->unload_entries),
401 ("unfinished unloads %p", domain));
402 KASSERT(LIST_EMPTY(&domain->contexts),
403 ("destroying dom %p with contexts", domain));
404 KASSERT(domain->ctx_cnt == 0,
405 ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt));
406 KASSERT(domain->refs == 0,
407 ("destroying dom %p with refs %d", domain, domain->refs));
408 if ((domain->flags & DMAR_DOMAIN_GAS_INITED) != 0) {
409 DMAR_DOMAIN_LOCK(domain);
410 dmar_gas_fini_domain(domain);
411 DMAR_DOMAIN_UNLOCK(domain);
413 if ((domain->flags & DMAR_DOMAIN_PGTBL_INITED) != 0) {
414 if (domain->pgtbl_obj != NULL)
415 DMAR_DOMAIN_PGLOCK(domain);
416 domain_free_pgtbl(domain);
418 mtx_destroy(&domain->lock);
419 free_unr(domain->dmar->domids, domain->domain);
420 free(domain, M_DMAR_DOMAIN);
423 static struct dmar_ctx *
424 dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
425 int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
426 bool id_mapped, bool rmrr_init)
428 struct dmar_domain *domain, *domain1;
429 struct dmar_ctx *ctx, *ctx1;
430 dmar_ctx_entry_t *ctxp;
432 int bus, slot, func, error;
436 bus = pci_get_bus(dev);
437 slot = pci_get_slot(dev);
438 func = pci_get_function(dev);
440 bus = PCI_RID2BUS(rid);
441 slot = PCI_RID2SLOT(rid);
442 func = PCI_RID2FUNC(rid);
445 TD_PREP_PINNED_ASSERT;
447 ctx = dmar_find_ctx_locked(dmar, rid);
451 * Perform the allocations which require sleep or have
452 * higher chance to succeed if the sleep is allowed.
455 dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid));
456 domain1 = dmar_domain_alloc(dmar, id_mapped);
457 if (domain1 == NULL) {
462 error = domain_init_rmrr(domain1, dev, bus,
463 slot, func, dev_domain, dev_busno, dev_path,
466 dmar_domain_destroy(domain1);
471 ctx1 = dmar_ctx_alloc(domain1, rid);
472 ctxp = dmar_map_ctx_entry(ctx1, &sf);
476 * Recheck the contexts, other thread might have
477 * already allocated needed one.
479 ctx = dmar_find_ctx_locked(dmar, rid);
484 ctx->ctx_tag.owner = dev;
485 ctx_tag_init(ctx, dev);
488 * This is the first activated context for the
489 * DMAR unit. Enable the translation after
490 * everything is set up.
492 if (LIST_EMPTY(&dmar->domains))
494 LIST_INSERT_HEAD(&dmar->domains, domain, link);
495 ctx_id_entry_init(ctx, ctxp, false);
498 "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d "
499 "agaw %d %s-mapped\n",
500 dmar->unit, dmar->segment, bus, slot,
501 func, rid, domain->domain, domain->mgaw,
502 domain->agaw, id_mapped ? "id" : "re");
504 dmar_unmap_pgtbl(sf);
506 dmar_unmap_pgtbl(sf);
507 dmar_domain_destroy(domain1);
508 /* Nothing needs to be done to destroy ctx1. */
509 free(ctx1, M_DMAR_CTX);
510 domain = ctx->domain;
511 ctx->refs++; /* tag referenced us */
514 domain = ctx->domain;
515 if (ctx->ctx_tag.owner == NULL)
516 ctx->ctx_tag.owner = dev;
517 ctx->refs++; /* tag referenced us */
520 error = dmar_flush_for_ctx_entry(dmar, enable);
522 dmar_free_ctx_locked(dmar, ctx);
528 * The dmar lock was potentially dropped between check for the
529 * empty context list and now. Recheck the state of GCMD_TE
530 * to avoid unneeded command.
532 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) {
533 error = dmar_enable_translation(dmar);
536 printf("dmar%d: enabled translation\n",
540 printf("dmar%d: enabling translation failed, "
541 "error %d\n", dmar->unit, error);
542 dmar_free_ctx_locked(dmar, ctx);
553 dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid,
554 bool id_mapped, bool rmrr_init)
556 int dev_domain, dev_path_len, dev_busno;
558 dev_domain = pci_get_domain(dev);
559 dev_path_len = dmar_dev_depth(dev);
560 ACPI_DMAR_PCI_PATH dev_path[dev_path_len];
561 dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len);
562 return (dmar_get_ctx_for_dev1(dmar, dev, rid, dev_domain, dev_busno,
563 dev_path, dev_path_len, id_mapped, rmrr_init));
567 dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
568 int dev_domain, int dev_busno,
569 const void *dev_path, int dev_path_len,
570 bool id_mapped, bool rmrr_init)
573 return (dmar_get_ctx_for_dev1(dmar, NULL, rid, dev_domain, dev_busno,
574 dev_path, dev_path_len, id_mapped, rmrr_init));
578 dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx)
580 struct dmar_unit *dmar;
581 struct dmar_domain *old_domain;
582 dmar_ctx_entry_t *ctxp;
587 old_domain = ctx->domain;
588 if (domain == old_domain)
590 KASSERT(old_domain->dmar == dmar,
591 ("domain %p %u moving between dmars %u %u", domain,
592 domain->domain, old_domain->dmar->unit, domain->dmar->unit));
593 TD_PREP_PINNED_ASSERT;
595 ctxp = dmar_map_ctx_entry(ctx, &sf);
597 dmar_ctx_unlink(ctx);
598 ctx->domain = domain;
600 ctx_id_entry_init(ctx, ctxp, true);
601 dmar_unmap_pgtbl(sf);
602 error = dmar_flush_for_ctx_entry(dmar, true);
603 /* If flush failed, rolling back would not work as well. */
604 printf("dmar%d rid %x domain %d->%d %s-mapped\n",
605 dmar->unit, ctx->rid, old_domain->domain, domain->domain,
606 (domain->flags & DMAR_DOMAIN_IDMAP) != 0 ? "id" : "re");
607 dmar_unref_domain_locked(dmar, old_domain);
613 dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain)
616 DMAR_ASSERT_LOCKED(dmar);
617 KASSERT(domain->refs >= 1,
618 ("dmar %d domain %p refs %u", dmar->unit, domain, domain->refs));
619 KASSERT(domain->refs > domain->ctx_cnt,
620 ("dmar %d domain %p refs %d ctx_cnt %d", dmar->unit, domain,
621 domain->refs, domain->ctx_cnt));
623 if (domain->refs > 1) {
629 KASSERT((domain->flags & DMAR_DOMAIN_RMRR) == 0,
630 ("lost ref on RMRR domain %p", domain));
632 LIST_REMOVE(domain, link);
635 taskqueue_drain(dmar->delayed_taskqueue, &domain->unload_task);
636 dmar_domain_destroy(domain);
640 dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
643 dmar_ctx_entry_t *ctxp;
644 struct dmar_domain *domain;
646 DMAR_ASSERT_LOCKED(dmar);
647 KASSERT(ctx->refs >= 1,
648 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
651 * If our reference is not last, only the dereference should
660 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
661 ("lost ref on disabled ctx %p", ctx));
664 * Otherwise, the context entry must be cleared before the
665 * page table is destroyed. The mapping of the context
666 * entries page could require sleep, unlock the dmar.
669 TD_PREP_PINNED_ASSERT;
670 ctxp = dmar_map_ctx_entry(ctx, &sf);
672 KASSERT(ctx->refs >= 1,
673 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
676 * Other thread might have referenced the context, in which
677 * case again only the dereference should be performed.
682 dmar_unmap_pgtbl(sf);
687 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
688 ("lost ref on disabled ctx %p", ctx));
691 * Clear the context pointer and flush the caches.
692 * XXXKIB: cannot do this if any RMRR entries are still present.
694 dmar_pte_clear(&ctxp->ctx1);
696 dmar_flush_ctx_to_ram(dmar, ctxp);
697 dmar_inv_ctx_glob(dmar);
698 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) {
699 if (dmar->qi_enabled)
700 dmar_qi_invalidate_iotlb_glob_locked(dmar);
702 dmar_inv_iotlb_glob(dmar);
704 dmar_unmap_pgtbl(sf);
705 domain = ctx->domain;
706 dmar_ctx_unlink(ctx);
707 free(ctx, M_DMAR_CTX);
708 dmar_unref_domain_locked(dmar, domain);
713 dmar_free_ctx(struct dmar_ctx *ctx)
715 struct dmar_unit *dmar;
717 dmar = ctx->domain->dmar;
719 dmar_free_ctx_locked(dmar, ctx);
723 * Returns with the domain locked.
726 dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid)
728 struct dmar_domain *domain;
729 struct dmar_ctx *ctx;
731 DMAR_ASSERT_LOCKED(dmar);
733 LIST_FOREACH(domain, &dmar->domains, link) {
734 LIST_FOREACH(ctx, &domain->contexts, link) {
743 dmar_domain_free_entry(struct dmar_map_entry *entry, bool free)
745 struct dmar_domain *domain;
747 domain = entry->domain;
748 DMAR_DOMAIN_LOCK(domain);
749 if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0)
750 dmar_gas_free_region(domain, entry);
752 dmar_gas_free_space(domain, entry);
753 DMAR_DOMAIN_UNLOCK(domain);
755 dmar_gas_free_entry(domain, entry);
761 dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free)
763 struct dmar_unit *unit;
765 unit = entry->domain->dmar;
766 if (unit->qi_enabled) {
768 dmar_qi_invalidate_locked(entry->domain, entry->start,
769 entry->end - entry->start, &entry->gseq, true);
771 entry->flags |= DMAR_MAP_ENTRY_QI_NF;
772 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
775 domain_flush_iotlb_sync(entry->domain, entry->start,
776 entry->end - entry->start);
777 dmar_domain_free_entry(entry, free);
782 dmar_domain_unload_emit_wait(struct dmar_domain *domain,
783 struct dmar_map_entry *entry)
786 if (TAILQ_NEXT(entry, dmamap_link) == NULL)
788 return (domain->batch_no++ % dmar_batch_coalesce == 0);
792 dmar_domain_unload(struct dmar_domain *domain,
793 struct dmar_map_entries_tailq *entries, bool cansleep)
795 struct dmar_unit *unit;
796 struct dmar_map_entry *entry, *entry1;
801 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
802 KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0,
803 ("not mapped entry %p %p", domain, entry));
804 error = domain_unmap_buf(domain, entry->start, entry->end -
805 entry->start, cansleep ? DMAR_PGF_WAITOK : 0);
806 KASSERT(error == 0, ("unmap %p error %d", domain, error));
807 if (!unit->qi_enabled) {
808 domain_flush_iotlb_sync(domain, entry->start,
809 entry->end - entry->start);
810 TAILQ_REMOVE(entries, entry, dmamap_link);
811 dmar_domain_free_entry(entry, true);
814 if (TAILQ_EMPTY(entries))
817 KASSERT(unit->qi_enabled, ("loaded entry left"));
819 TAILQ_FOREACH(entry, entries, dmamap_link) {
820 dmar_qi_invalidate_locked(domain, entry->start, entry->end -
821 entry->start, &entry->gseq,
822 dmar_domain_unload_emit_wait(domain, entry));
824 TAILQ_CONCAT(&unit->tlb_flush_entries, entries, dmamap_link);
829 dmar_domain_unload_task(void *arg, int pending)
831 struct dmar_domain *domain;
832 struct dmar_map_entries_tailq entries;
835 TAILQ_INIT(&entries);
838 DMAR_DOMAIN_LOCK(domain);
839 TAILQ_SWAP(&domain->unload_entries, &entries, dmar_map_entry,
841 DMAR_DOMAIN_UNLOCK(domain);
842 if (TAILQ_EMPTY(&entries))
844 dmar_domain_unload(domain, &entries, true);