2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 The FreeBSD Foundation
7 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
8 * under sponsorship from the FreeBSD Foundation.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
39 #include <sys/interrupt.h>
40 #include <sys/kernel.h>
42 #include <sys/limits.h>
44 #include <sys/memdesc.h>
45 #include <sys/mutex.h>
47 #include <sys/rwlock.h>
49 #include <sys/sysctl.h>
50 #include <sys/taskqueue.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pager.h>
60 #include <vm/vm_map.h>
61 #include <contrib/dev/acpica/include/acpi.h>
62 #include <contrib/dev/acpica/include/accommon.h>
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 #include <machine/atomic.h>
66 #include <machine/bus.h>
67 #include <machine/md_var.h>
68 #include <machine/specialreg.h>
69 #include <x86/include/busdma_impl.h>
70 #include <dev/iommu/busdma_iommu.h>
71 #include <x86/iommu/intel_reg.h>
72 #include <x86/iommu/intel_dmar.h>
74 static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context");
75 static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain");
77 static void dmar_unref_domain_locked(struct dmar_unit *dmar,
78 struct dmar_domain *domain);
79 static void dmar_domain_destroy(struct dmar_domain *domain);
82 dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
85 dmar_root_entry_t *re;
89 * Allocated context page must be linked.
91 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_NOALLOC);
96 * Page not present, allocate and link. Note that other
97 * thread might execute this sequence in parallel. This
98 * should be safe, because the context entries written by both
101 TD_PREP_PINNED_ASSERT;
102 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_ZERO |
104 re = dmar_map_pgtbl(dmar->ctx_obj, 0, IOMMU_PGF_NOALLOC, &sf);
106 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK &
107 VM_PAGE_TO_PHYS(ctxm)));
108 dmar_flush_root_to_ram(dmar, re);
109 dmar_unmap_pgtbl(sf);
113 static dmar_ctx_entry_t *
114 dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
116 struct dmar_unit *dmar;
117 dmar_ctx_entry_t *ctxp;
119 dmar = CTX2DMAR(ctx);
121 ctxp = dmar_map_pgtbl(dmar->ctx_obj, 1 +
122 PCI_RID2BUS(ctx->rid), IOMMU_PGF_NOALLOC | IOMMU_PGF_WAITOK, sfp);
123 ctxp += ctx->rid & 0xff;
128 device_tag_init(struct dmar_ctx *ctx, device_t dev)
130 struct dmar_domain *domain;
133 domain = CTX2DOM(ctx);
134 maxaddr = MIN(domain->iodom.end, BUS_SPACE_MAXADDR);
135 ctx->context.tag->common.ref_count = 1; /* Prevent free */
136 ctx->context.tag->common.impl = &bus_dma_iommu_impl;
137 ctx->context.tag->common.boundary = 0;
138 ctx->context.tag->common.lowaddr = maxaddr;
139 ctx->context.tag->common.highaddr = maxaddr;
140 ctx->context.tag->common.maxsize = maxaddr;
141 ctx->context.tag->common.nsegments = BUS_SPACE_UNRESTRICTED;
142 ctx->context.tag->common.maxsegsz = maxaddr;
143 ctx->context.tag->ctx = CTX2IOCTX(ctx);
144 ctx->context.tag->owner = dev;
148 ctx_id_entry_init_one(dmar_ctx_entry_t *ctxp, struct dmar_domain *domain,
152 * For update due to move, the store is not atomic. It is
153 * possible that DMAR read upper doubleword, while low
154 * doubleword is not yet updated. The domain id is stored in
155 * the upper doubleword, while the table pointer in the lower.
157 * There is no good solution, for the same reason it is wrong
158 * to clear P bit in the ctx entry for update.
160 dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) |
162 if (ctx_root == NULL) {
163 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P);
165 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR |
166 (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) |
172 ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move,
175 struct dmar_unit *unit;
176 struct dmar_domain *domain;
180 domain = CTX2DOM(ctx);
181 unit = DOM2DMAR(domain);
182 KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0),
183 ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx",
184 unit->iommu.unit, busno, pci_get_slot(ctx->context.tag->owner),
185 pci_get_function(ctx->context.tag->owner),
186 ctxp->ctx1, ctxp->ctx2));
188 if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 &&
189 (unit->hw_ecap & DMAR_ECAP_PT) != 0) {
190 KASSERT(domain->pgtbl_obj == NULL,
191 ("ctx %p non-null pgtbl_obj", ctx));
194 ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0,
198 if (iommu_is_buswide_ctx(DMAR2IOMMU(unit), busno)) {
200 for (i = 0; i <= PCI_BUSMAX; i++) {
201 ctx_id_entry_init_one(&ctxp[i], domain, ctx_root);
204 ctx_id_entry_init_one(ctxp, domain, ctx_root);
206 dmar_flush_ctx_to_ram(unit, ctxp);
210 dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force)
215 * If dmar declares Caching Mode as Set, follow 11.5 "Caching
216 * Mode Consideration" and do the (global) invalidation of the
217 * negative TLB entries.
219 if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force)
221 if (dmar->qi_enabled) {
222 dmar_qi_invalidate_ctx_glob_locked(dmar);
223 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)
224 dmar_qi_invalidate_iotlb_glob_locked(dmar);
227 error = dmar_inv_ctx_glob(dmar);
228 if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force))
229 error = dmar_inv_iotlb_glob(dmar);
234 domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
235 int slot, int func, int dev_domain, int dev_busno,
236 const void *dev_path, int dev_path_len)
238 struct iommu_map_entries_tailq rmrr_entries;
239 struct iommu_map_entry *entry, *entry1;
241 iommu_gaddr_t start, end;
246 TAILQ_INIT(&rmrr_entries);
247 dmar_dev_parse_rmrr(domain, dev_domain, dev_busno, dev_path,
248 dev_path_len, &rmrr_entries);
249 TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) {
251 * VT-d specification requires that the start of an
252 * RMRR entry is 4k-aligned. Buggy BIOSes put
253 * anything into the start and end fields. Truncate
254 * and round as neccesary.
256 * We also allow the overlapping RMRR entries, see
257 * iommu_gas_alloc_region().
259 start = entry->start;
262 printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n",
263 domain->iodom.iommu->unit, bus, slot, func,
264 (uintmax_t)start, (uintmax_t)end);
265 entry->start = trunc_page(start);
266 entry->end = round_page(end);
267 if (entry->start == entry->end) {
268 /* Workaround for some AMI (?) BIOSes */
271 device_printf(dev, "");
272 printf("pci%d:%d:%d ", bus, slot, func);
273 printf("BIOS bug: dmar%d RMRR "
274 "region (%jx, %jx) corrected\n",
275 domain->iodom.iommu->unit, start, end);
277 entry->end += DMAR_PAGE_SIZE * 0x20;
279 size = OFF_TO_IDX(entry->end - entry->start);
280 ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK);
281 for (i = 0; i < size; i++) {
282 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
285 error1 = iommu_gas_map_region(DOM2IODOM(domain), entry,
286 IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE,
287 IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma);
289 * Non-failed RMRR entries are owned by context rb
290 * tree. Get rid of the failed entry, but do not stop
291 * the loop. Rest of the parsed RMRR entries are
292 * loaded and removed on the context destruction.
294 if (error1 == 0 && entry->end != entry->start) {
295 IOMMU_LOCK(domain->iodom.iommu);
296 domain->refs++; /* XXXKIB prevent free */
297 domain->iodom.flags |= IOMMU_DOMAIN_RMRR;
298 IOMMU_UNLOCK(domain->iodom.iommu);
302 device_printf(dev, "");
303 printf("pci%d:%d:%d ", bus, slot, func);
305 "dmar%d failed to map RMRR region (%jx, %jx) %d\n",
306 domain->iodom.iommu->unit, start, end,
310 TAILQ_REMOVE(&rmrr_entries, entry, unroll_link);
311 iommu_gas_free_entry(DOM2IODOM(domain), entry);
313 for (i = 0; i < size; i++)
314 vm_page_putfake(ma[i]);
320 static struct dmar_domain *
321 dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
323 struct iommu_domain *iodom;
324 struct iommu_unit *unit;
325 struct dmar_domain *domain;
328 id = alloc_unr(dmar->domids);
331 domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO);
332 iodom = DOM2IODOM(domain);
333 unit = DMAR2IOMMU(dmar);
335 LIST_INIT(&domain->contexts);
336 iommu_domain_init(unit, iodom, &dmar_domain_map_ops);
341 * For now, use the maximal usable physical address of the
342 * installed memory to calculate the mgaw on id_mapped domain.
343 * It is useful for the identity mapping, and less so for the
344 * virtualized bus address space.
346 domain->iodom.end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR;
347 mgaw = dmar_maxaddr2mgaw(dmar, domain->iodom.end, !id_mapped);
348 error = domain_set_agaw(domain, mgaw);
352 /* Use all supported address space for remapping. */
353 domain->iodom.end = 1ULL << (domain->agaw - 1);
355 iommu_gas_init_domain(DOM2IODOM(domain));
358 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) {
359 domain->pgtbl_obj = domain_get_idmap_pgtbl(domain,
362 domain->iodom.flags |= IOMMU_DOMAIN_IDMAP;
364 error = domain_alloc_pgtbl(domain);
367 /* Disable local apic region access */
368 error = iommu_gas_reserve_region(iodom, 0xfee00000,
376 dmar_domain_destroy(domain);
380 static struct dmar_ctx *
381 dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid)
383 struct dmar_ctx *ctx;
385 ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO);
386 ctx->context.domain = DOM2IODOM(domain);
387 ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu),
388 M_DMAR_CTX, M_WAITOK | M_ZERO);
395 dmar_ctx_link(struct dmar_ctx *ctx)
397 struct dmar_domain *domain;
399 domain = CTX2DOM(ctx);
400 IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
401 KASSERT(domain->refs >= domain->ctx_cnt,
402 ("dom %p ref underflow %d %d", domain, domain->refs,
406 LIST_INSERT_HEAD(&domain->contexts, ctx, link);
410 dmar_ctx_unlink(struct dmar_ctx *ctx)
412 struct dmar_domain *domain;
414 domain = CTX2DOM(ctx);
415 IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
416 KASSERT(domain->refs > 0,
417 ("domain %p ctx dtr refs %d", domain, domain->refs));
418 KASSERT(domain->ctx_cnt >= domain->refs,
419 ("domain %p ctx dtr refs %d ctx_cnt %d", domain,
420 domain->refs, domain->ctx_cnt));
423 LIST_REMOVE(ctx, link);
427 dmar_domain_destroy(struct dmar_domain *domain)
429 struct iommu_domain *iodom;
430 struct dmar_unit *dmar;
432 iodom = DOM2IODOM(domain);
434 KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries),
435 ("unfinished unloads %p", domain));
436 KASSERT(LIST_EMPTY(&domain->contexts),
437 ("destroying dom %p with contexts", domain));
438 KASSERT(domain->ctx_cnt == 0,
439 ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt));
440 KASSERT(domain->refs == 0,
441 ("destroying dom %p with refs %d", domain, domain->refs));
442 if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
443 DMAR_DOMAIN_LOCK(domain);
444 iommu_gas_fini_domain(iodom);
445 DMAR_DOMAIN_UNLOCK(domain);
447 if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) {
448 if (domain->pgtbl_obj != NULL)
449 DMAR_DOMAIN_PGLOCK(domain);
450 domain_free_pgtbl(domain);
452 iommu_domain_fini(iodom);
453 dmar = DOM2DMAR(domain);
454 free_unr(dmar->domids, domain->domain);
455 free(domain, M_DMAR_DOMAIN);
458 static struct dmar_ctx *
459 dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
460 int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
461 bool id_mapped, bool rmrr_init)
463 struct dmar_domain *domain, *domain1;
464 struct dmar_ctx *ctx, *ctx1;
465 struct iommu_unit *unit;
466 dmar_ctx_entry_t *ctxp;
468 int bus, slot, func, error;
472 bus = pci_get_bus(dev);
473 slot = pci_get_slot(dev);
474 func = pci_get_function(dev);
476 bus = PCI_RID2BUS(rid);
477 slot = PCI_RID2SLOT(rid);
478 func = PCI_RID2FUNC(rid);
481 TD_PREP_PINNED_ASSERT;
482 unit = DMAR2IOMMU(dmar);
484 KASSERT(!iommu_is_buswide_ctx(unit, bus) || (slot == 0 && func == 0),
485 ("iommu%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus,
487 ctx = dmar_find_ctx_locked(dmar, rid);
491 * Perform the allocations which require sleep or have
492 * higher chance to succeed if the sleep is allowed.
495 dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid));
496 domain1 = dmar_domain_alloc(dmar, id_mapped);
497 if (domain1 == NULL) {
502 error = domain_init_rmrr(domain1, dev, bus,
503 slot, func, dev_domain, dev_busno, dev_path,
506 dmar_domain_destroy(domain1);
511 ctx1 = dmar_ctx_alloc(domain1, rid);
512 ctxp = dmar_map_ctx_entry(ctx1, &sf);
516 * Recheck the contexts, other thread might have
517 * already allocated needed one.
519 ctx = dmar_find_ctx_locked(dmar, rid);
524 ctx->context.tag->owner = dev;
525 device_tag_init(ctx, dev);
528 * This is the first activated context for the
529 * DMAR unit. Enable the translation after
530 * everything is set up.
532 if (LIST_EMPTY(&dmar->domains))
534 LIST_INSERT_HEAD(&dmar->domains, domain, link);
535 ctx_id_entry_init(ctx, ctxp, false, bus);
538 "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d "
539 "agaw %d %s-mapped\n",
540 dmar->iommu.unit, dmar->segment, bus, slot,
541 func, rid, domain->domain, domain->mgaw,
542 domain->agaw, id_mapped ? "id" : "re");
544 dmar_unmap_pgtbl(sf);
546 dmar_unmap_pgtbl(sf);
547 dmar_domain_destroy(domain1);
548 /* Nothing needs to be done to destroy ctx1. */
549 free(ctx1, M_DMAR_CTX);
550 domain = CTX2DOM(ctx);
551 ctx->refs++; /* tag referenced us */
554 domain = CTX2DOM(ctx);
555 if (ctx->context.tag->owner == NULL)
556 ctx->context.tag->owner = dev;
557 ctx->refs++; /* tag referenced us */
560 error = dmar_flush_for_ctx_entry(dmar, enable);
562 dmar_free_ctx_locked(dmar, ctx);
568 * The dmar lock was potentially dropped between check for the
569 * empty context list and now. Recheck the state of GCMD_TE
570 * to avoid unneeded command.
572 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) {
573 error = dmar_enable_translation(dmar);
576 printf("dmar%d: enabled translation\n",
580 printf("dmar%d: enabling translation failed, "
581 "error %d\n", dmar->iommu.unit, error);
582 dmar_free_ctx_locked(dmar, ctx);
593 dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid,
594 bool id_mapped, bool rmrr_init)
596 int dev_domain, dev_path_len, dev_busno;
598 dev_domain = pci_get_domain(dev);
599 dev_path_len = dmar_dev_depth(dev);
600 ACPI_DMAR_PCI_PATH dev_path[dev_path_len];
601 dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len);
602 return (dmar_get_ctx_for_dev1(dmar, dev, rid, dev_domain, dev_busno,
603 dev_path, dev_path_len, id_mapped, rmrr_init));
607 dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
608 int dev_domain, int dev_busno,
609 const void *dev_path, int dev_path_len,
610 bool id_mapped, bool rmrr_init)
613 return (dmar_get_ctx_for_dev1(dmar, NULL, rid, dev_domain, dev_busno,
614 dev_path, dev_path_len, id_mapped, rmrr_init));
618 dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx)
620 struct dmar_unit *dmar;
621 struct dmar_domain *old_domain;
622 dmar_ctx_entry_t *ctxp;
627 old_domain = CTX2DOM(ctx);
628 if (domain == old_domain)
630 KASSERT(old_domain->iodom.iommu == domain->iodom.iommu,
631 ("domain %p %u moving between dmars %u %u", domain,
632 domain->domain, old_domain->iodom.iommu->unit,
633 domain->iodom.iommu->unit));
634 TD_PREP_PINNED_ASSERT;
636 ctxp = dmar_map_ctx_entry(ctx, &sf);
638 dmar_ctx_unlink(ctx);
639 ctx->context.domain = &domain->iodom;
641 ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100);
642 dmar_unmap_pgtbl(sf);
643 error = dmar_flush_for_ctx_entry(dmar, true);
644 /* If flush failed, rolling back would not work as well. */
645 printf("dmar%d rid %x domain %d->%d %s-mapped\n",
646 dmar->iommu.unit, ctx->rid, old_domain->domain, domain->domain,
647 (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 ? "id" : "re");
648 dmar_unref_domain_locked(dmar, old_domain);
654 dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain)
657 DMAR_ASSERT_LOCKED(dmar);
658 KASSERT(domain->refs >= 1,
659 ("dmar %d domain %p refs %u", dmar->iommu.unit, domain,
661 KASSERT(domain->refs > domain->ctx_cnt,
662 ("dmar %d domain %p refs %d ctx_cnt %d", dmar->iommu.unit, domain,
663 domain->refs, domain->ctx_cnt));
665 if (domain->refs > 1) {
671 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_RMRR) == 0,
672 ("lost ref on RMRR domain %p", domain));
674 LIST_REMOVE(domain, link);
677 taskqueue_drain(dmar->iommu.delayed_taskqueue,
678 &domain->iodom.unload_task);
679 dmar_domain_destroy(domain);
683 dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
686 dmar_ctx_entry_t *ctxp;
687 struct dmar_domain *domain;
689 DMAR_ASSERT_LOCKED(dmar);
690 KASSERT(ctx->refs >= 1,
691 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
694 * If our reference is not last, only the dereference should
703 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0,
704 ("lost ref on disabled ctx %p", ctx));
707 * Otherwise, the context entry must be cleared before the
708 * page table is destroyed. The mapping of the context
709 * entries page could require sleep, unlock the dmar.
712 TD_PREP_PINNED_ASSERT;
713 ctxp = dmar_map_ctx_entry(ctx, &sf);
715 KASSERT(ctx->refs >= 1,
716 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
719 * Other thread might have referenced the context, in which
720 * case again only the dereference should be performed.
725 dmar_unmap_pgtbl(sf);
730 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0,
731 ("lost ref on disabled ctx %p", ctx));
734 * Clear the context pointer and flush the caches.
735 * XXXKIB: cannot do this if any RMRR entries are still present.
737 dmar_pte_clear(&ctxp->ctx1);
739 dmar_flush_ctx_to_ram(dmar, ctxp);
740 dmar_inv_ctx_glob(dmar);
741 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) {
742 if (dmar->qi_enabled)
743 dmar_qi_invalidate_iotlb_glob_locked(dmar);
745 dmar_inv_iotlb_glob(dmar);
747 dmar_unmap_pgtbl(sf);
748 domain = CTX2DOM(ctx);
749 dmar_ctx_unlink(ctx);
750 free(ctx->context.tag, M_DMAR_CTX);
751 free(ctx, M_DMAR_CTX);
752 dmar_unref_domain_locked(dmar, domain);
757 dmar_free_ctx(struct dmar_ctx *ctx)
759 struct dmar_unit *dmar;
761 dmar = CTX2DMAR(ctx);
763 dmar_free_ctx_locked(dmar, ctx);
767 * Returns with the domain locked.
770 dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid)
772 struct dmar_domain *domain;
773 struct dmar_ctx *ctx;
775 DMAR_ASSERT_LOCKED(dmar);
777 LIST_FOREACH(domain, &dmar->domains, link) {
778 LIST_FOREACH(ctx, &domain->contexts, link) {
787 dmar_domain_free_entry(struct iommu_map_entry *entry, bool free)
789 struct iommu_domain *domain;
791 domain = entry->domain;
792 IOMMU_DOMAIN_LOCK(domain);
793 if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
794 iommu_gas_free_region(domain, entry);
796 iommu_gas_free_space(domain, entry);
797 IOMMU_DOMAIN_UNLOCK(domain);
799 iommu_gas_free_entry(domain, entry);
805 dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free)
807 struct dmar_domain *domain;
808 struct dmar_unit *unit;
810 domain = IODOM2DOM(entry->domain);
811 unit = DOM2DMAR(domain);
812 if (unit->qi_enabled) {
814 dmar_qi_invalidate_locked(IODOM2DOM(entry->domain),
815 entry->start, entry->end - entry->start, &entry->gseq,
818 entry->flags |= IOMMU_MAP_ENTRY_QI_NF;
819 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
822 domain_flush_iotlb_sync(IODOM2DOM(entry->domain),
823 entry->start, entry->end - entry->start);
824 dmar_domain_free_entry(entry, free);
829 dmar_domain_unload_emit_wait(struct dmar_domain *domain,
830 struct iommu_map_entry *entry)
833 if (TAILQ_NEXT(entry, dmamap_link) == NULL)
835 return (domain->batch_no++ % dmar_batch_coalesce == 0);
839 dmar_domain_unload(struct dmar_domain *domain,
840 struct iommu_map_entries_tailq *entries, bool cansleep)
842 struct dmar_unit *unit;
843 struct iommu_domain *iodom;
844 struct iommu_map_entry *entry, *entry1;
847 iodom = DOM2IODOM(domain);
848 unit = DOM2DMAR(domain);
850 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
851 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
852 ("not mapped entry %p %p", domain, entry));
853 error = iodom->ops->unmap(iodom, entry->start, entry->end -
854 entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
855 KASSERT(error == 0, ("unmap %p error %d", domain, error));
856 if (!unit->qi_enabled) {
857 domain_flush_iotlb_sync(domain, entry->start,
858 entry->end - entry->start);
859 TAILQ_REMOVE(entries, entry, dmamap_link);
860 dmar_domain_free_entry(entry, true);
863 if (TAILQ_EMPTY(entries))
866 KASSERT(unit->qi_enabled, ("loaded entry left"));
868 TAILQ_FOREACH(entry, entries, dmamap_link) {
869 dmar_qi_invalidate_locked(domain, entry->start, entry->end -
870 entry->start, &entry->gseq,
871 dmar_domain_unload_emit_wait(domain, entry));
873 TAILQ_CONCAT(&unit->tlb_flush_entries, entries, dmamap_link);
878 iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid,
879 bool id_mapped, bool rmrr_init)
881 struct dmar_unit *dmar;
882 struct dmar_ctx *ret;
884 dmar = IOMMU2DMAR(iommu);
886 ret = dmar_get_ctx_for_dev(dmar, dev, rid, id_mapped, rmrr_init);
888 return (CTX2IOCTX(ret));
892 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context)
894 struct dmar_unit *dmar;
895 struct dmar_ctx *ctx;
897 dmar = IOMMU2DMAR(iommu);
898 ctx = IOCTX2CTX(context);
900 dmar_free_ctx_locked(dmar, ctx);
904 iommu_free_ctx(struct iommu_ctx *context)
906 struct dmar_ctx *ctx;
908 ctx = IOCTX2CTX(context);
914 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free)
917 dmar_domain_unload_entry(entry, free);
921 iommu_domain_unload(struct iommu_domain *iodom,
922 struct iommu_map_entries_tailq *entries, bool cansleep)
924 struct dmar_domain *domain;
926 domain = IODOM2DOM(iodom);
928 dmar_domain_unload(domain, entries, cansleep);