2 * Copyright (c) 2013 The FreeBSD Foundation
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
40 #include <sys/limits.h>
42 #include <sys/memdesc.h>
43 #include <sys/mutex.h>
45 #include <sys/rwlock.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_pager.h>
57 #include <vm/vm_map.h>
58 #include <machine/atomic.h>
59 #include <machine/bus.h>
60 #include <machine/md_var.h>
61 #include <machine/specialreg.h>
62 #include <x86/include/busdma_impl.h>
63 #include <x86/iommu/intel_reg.h>
64 #include <x86/iommu/busdma_dmar.h>
65 #include <x86/iommu/intel_dmar.h>
66 #include <dev/pci/pcivar.h>
68 static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context");
70 static void dmar_ctx_unload_task(void *arg, int pending);
73 dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
76 dmar_root_entry_t *re;
80 * Allocated context page must be linked.
82 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC);
87 * Page not present, allocate and link. Note that other
88 * thread might execute this sequence in parallel. This
89 * should be safe, because the context entries written by both
92 TD_PREP_PINNED_ASSERT;
93 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO |
95 re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf);
97 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK &
98 VM_PAGE_TO_PHYS(ctxm)));
99 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
103 static dmar_ctx_entry_t *
104 dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
106 dmar_ctx_entry_t *ctxp;
108 ctxp = dmar_map_pgtbl(ctx->dmar->ctx_obj, 1 + ctx->bus,
109 DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp);
110 ctxp += ((ctx->slot & 0x1f) << 3) + (ctx->func & 0x7);
115 ctx_tag_init(struct dmar_ctx *ctx)
119 maxaddr = MIN(ctx->end, BUS_SPACE_MAXADDR);
120 ctx->ctx_tag.common.ref_count = 1; /* Prevent free */
121 ctx->ctx_tag.common.impl = &bus_dma_dmar_impl;
122 ctx->ctx_tag.common.boundary = PCI_DMA_BOUNDARY;
123 ctx->ctx_tag.common.lowaddr = maxaddr;
124 ctx->ctx_tag.common.highaddr = maxaddr;
125 ctx->ctx_tag.common.maxsize = maxaddr;
126 ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED;
127 ctx->ctx_tag.common.maxsegsz = maxaddr;
128 ctx->ctx_tag.ctx = ctx;
129 /* XXXKIB initialize tag further */
133 ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp)
135 struct dmar_unit *unit;
139 KASSERT(ctxp->ctx1 == 0 && ctxp->ctx2 == 0,
140 ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx",
141 unit->unit, ctx->bus, ctx->slot, ctx->func, ctxp->ctx1,
143 ctxp->ctx2 = DMAR_CTX2_DID(ctx->domain);
144 ctxp->ctx2 |= ctx->awlvl;
145 if ((ctx->flags & DMAR_CTX_IDMAP) != 0 &&
146 (unit->hw_ecap & DMAR_ECAP_PT) != 0) {
147 KASSERT(ctx->pgtbl_obj == NULL,
148 ("ctx %p non-null pgtbl_obj", ctx));
149 dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P);
151 ctx_root = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_NOALLOC);
152 dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_UNTR |
153 (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) |
159 ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev)
161 struct dmar_map_entries_tailq rmrr_entries;
162 struct dmar_map_entry *entry, *entry1;
164 dmar_gaddr_t start, end;
169 TAILQ_INIT(&rmrr_entries);
170 dmar_ctx_parse_rmrr(ctx, dev, &rmrr_entries);
171 TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) {
173 * VT-d specification requires that the start of an
174 * RMRR entry is 4k-aligned. Buggy BIOSes put
175 * anything into the start and end fields. Truncate
176 * and round as neccesary.
178 * We also allow the overlapping RMRR entries, see
179 * dmar_gas_alloc_region().
181 start = entry->start;
183 entry->start = trunc_page(start);
184 entry->end = round_page(end);
185 if (entry->start == entry->end) {
186 /* Workaround for some AMI (?) BIOSes */
188 device_printf(dev, "BIOS bug: dmar%d RMRR "
189 "region (%jx, %jx) corrected\n",
190 ctx->dmar->unit, start, end);
192 entry->end += DMAR_PAGE_SIZE * 0x20;
194 size = OFF_TO_IDX(entry->end - entry->start);
195 ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK);
196 for (i = 0; i < size; i++) {
197 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
200 error1 = dmar_gas_map_region(ctx, entry, DMAR_MAP_ENTRY_READ |
201 DMAR_MAP_ENTRY_WRITE, DMAR_GM_CANWAIT, ma);
203 * Non-failed RMRR entries are owned by context rb
204 * tree. Get rid of the failed entry, but do not stop
205 * the loop. Rest of the parsed RMRR entries are
206 * loaded and removed on the context destruction.
208 if (error1 == 0 && entry->end != entry->start) {
209 DMAR_LOCK(ctx->dmar);
210 ctx->flags |= DMAR_CTX_RMRR;
211 DMAR_UNLOCK(ctx->dmar);
215 "dmar%d failed to map RMRR region (%jx, %jx) %d\n",
216 ctx->dmar->unit, start, end, error1);
219 TAILQ_REMOVE(&rmrr_entries, entry, unroll_link);
220 dmar_gas_free_entry(ctx, entry);
222 for (i = 0; i < size; i++)
223 vm_page_putfake(ma[i]);
229 static struct dmar_ctx *
230 dmar_get_ctx_alloc(struct dmar_unit *dmar, int bus, int slot, int func)
232 struct dmar_ctx *ctx;
234 ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO);
235 RB_INIT(&ctx->rb_root);
236 TAILQ_INIT(&ctx->unload_entries);
237 TASK_INIT(&ctx->unload_task, 0, dmar_ctx_unload_task, ctx);
238 mtx_init(&ctx->lock, "dmarctx", NULL, MTX_DEF);
247 dmar_ctx_dtr(struct dmar_ctx *ctx, bool gas_inited, bool pgtbl_inited)
252 dmar_gas_fini_ctx(ctx);
253 DMAR_CTX_UNLOCK(ctx);
256 if (ctx->pgtbl_obj != NULL)
257 DMAR_CTX_PGLOCK(ctx);
260 mtx_destroy(&ctx->lock);
261 free(ctx, M_DMAR_CTX);
265 dmar_get_ctx(struct dmar_unit *dmar, device_t dev, int bus, int slot, int func,
266 bool id_mapped, bool rmrr_init)
268 struct dmar_ctx *ctx, *ctx1;
269 dmar_ctx_entry_t *ctxp;
275 TD_PREP_PINNED_ASSERT;
277 ctx = dmar_find_ctx_locked(dmar, bus, slot, func);
281 * Perform the allocations which require sleep or have
282 * higher chance to succeed if the sleep is allowed.
285 dmar_ensure_ctx_page(dmar, bus);
286 ctx1 = dmar_get_ctx_alloc(dmar, bus, slot, func);
290 * For now, use the maximal usable physical
291 * address of the installed memory to
292 * calculate the mgaw. It is useful for the
293 * identity mapping, and less so for the
294 * virtualized bus address space.
296 ctx1->end = ptoa(Maxmem);
297 mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, false);
298 error = ctx_set_agaw(ctx1, mgaw);
300 dmar_ctx_dtr(ctx1, false, false);
305 ctx1->end = BUS_SPACE_MAXADDR;
306 mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, true);
307 error = ctx_set_agaw(ctx1, mgaw);
309 dmar_ctx_dtr(ctx1, false, false);
313 /* Use all supported address space for remapping. */
314 ctx1->end = 1ULL << (ctx1->agaw - 1);
318 dmar_gas_init_ctx(ctx1);
320 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) {
321 ctx1->pgtbl_obj = ctx_get_idmap_pgtbl(ctx1,
324 ctx1->flags |= DMAR_CTX_IDMAP;
326 error = ctx_alloc_pgtbl(ctx1);
328 dmar_ctx_dtr(ctx1, true, false);
332 /* Disable local apic region access */
333 error = dmar_gas_reserve_region(ctx1, 0xfee00000,
336 dmar_ctx_dtr(ctx1, true, true);
340 error = ctx_init_rmrr(ctx1, dev);
342 dmar_ctx_dtr(ctx1, true, true);
347 ctxp = dmar_map_ctx_entry(ctx1, &sf);
351 * Recheck the contexts, other thread might have
352 * already allocated needed one.
354 ctx = dmar_find_ctx_locked(dmar, bus, slot, func);
357 ctx->ctx_tag.owner = dev;
358 ctx->domain = alloc_unrl(dmar->domids);
359 if (ctx->domain == -1) {
361 dmar_unmap_pgtbl(sf, true);
362 dmar_ctx_dtr(ctx, true, true);
369 * This is the first activated context for the
370 * DMAR unit. Enable the translation after
371 * everything is set up.
373 if (LIST_EMPTY(&dmar->contexts))
375 LIST_INSERT_HEAD(&dmar->contexts, ctx, link);
376 ctx_id_entry_init(ctx, ctxp);
378 "dmar%d pci%d:%d:%d:%d domain %d mgaw %d "
379 "agaw %d %s-mapped\n",
380 dmar->unit, dmar->segment, bus, slot,
381 func, ctx->domain, ctx->mgaw, ctx->agaw,
382 id_mapped ? "id" : "re");
384 dmar_ctx_dtr(ctx1, true, true);
386 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
389 if ((ctx->flags & DMAR_CTX_RMRR) != 0)
390 ctx->refs++; /* XXXKIB */
393 * If dmar declares Caching Mode as Set, follow 11.5 "Caching
394 * Mode Consideration" and do the (global) invalidation of the
395 * negative TLB entries.
397 if ((dmar->hw_cap & DMAR_CAP_CM) != 0 || enable) {
398 if (dmar->qi_enabled) {
399 dmar_qi_invalidate_ctx_glob_locked(dmar);
400 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0)
401 dmar_qi_invalidate_iotlb_glob_locked(dmar);
403 error = dmar_inv_ctx_glob(dmar);
405 (dmar->hw_ecap & DMAR_ECAP_DI) != 0)
406 error = dmar_inv_iotlb_glob(dmar);
408 dmar_free_ctx_locked(dmar, ctx);
416 * The dmar lock was potentially dropped between check for the
417 * empty context list and now. Recheck the state of GCMD_TE
418 * to avoid unneeded command.
420 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) {
421 error = dmar_enable_translation(dmar);
423 dmar_free_ctx_locked(dmar, ctx);
434 dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
437 dmar_ctx_entry_t *ctxp;
439 DMAR_ASSERT_LOCKED(dmar);
440 KASSERT(ctx->refs >= 1,
441 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
444 * If our reference is not last, only the dereference should
453 KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0,
454 ("lost ref on RMRR ctx %p", ctx));
455 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
456 ("lost ref on disabled ctx %p", ctx));
459 * Otherwise, the context entry must be cleared before the
460 * page table is destroyed. The mapping of the context
461 * entries page could require sleep, unlock the dmar.
464 TD_PREP_PINNED_ASSERT;
465 ctxp = dmar_map_ctx_entry(ctx, &sf);
467 KASSERT(ctx->refs >= 1,
468 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
471 * Other thread might have referenced the context, in which
472 * case again only the dereference should be performed.
477 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
482 KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0,
483 ("lost ref on RMRR ctx %p", ctx));
484 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
485 ("lost ref on disabled ctx %p", ctx));
488 * Clear the context pointer and flush the caches.
489 * XXXKIB: cannot do this if any RMRR entries are still present.
491 dmar_pte_clear(&ctxp->ctx1);
493 dmar_inv_ctx_glob(dmar);
494 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) {
495 if (dmar->qi_enabled)
496 dmar_qi_invalidate_iotlb_glob_locked(dmar);
498 dmar_inv_iotlb_glob(dmar);
500 LIST_REMOVE(ctx, link);
504 * The rest of the destruction is invisible for other users of
507 taskqueue_drain(dmar->delayed_taskqueue, &ctx->unload_task);
508 KASSERT(TAILQ_EMPTY(&ctx->unload_entries),
509 ("unfinished unloads %p", ctx));
510 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
511 free_unr(dmar->domids, ctx->domain);
512 dmar_ctx_dtr(ctx, true, true);
517 dmar_free_ctx(struct dmar_ctx *ctx)
519 struct dmar_unit *dmar;
523 dmar_free_ctx_locked(dmar, ctx);
527 dmar_find_ctx_locked(struct dmar_unit *dmar, int bus, int slot, int func)
529 struct dmar_ctx *ctx;
531 DMAR_ASSERT_LOCKED(dmar);
533 LIST_FOREACH(ctx, &dmar->contexts, link) {
534 if (ctx->bus == bus && ctx->slot == slot && ctx->func == func)
541 dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free)
543 struct dmar_ctx *ctx;
547 if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0)
548 dmar_gas_free_region(ctx, entry);
550 dmar_gas_free_space(ctx, entry);
551 DMAR_CTX_UNLOCK(ctx);
553 dmar_gas_free_entry(ctx, entry);
559 dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free)
561 struct dmar_unit *unit;
563 unit = entry->ctx->dmar;
564 if (unit->qi_enabled) {
566 dmar_qi_invalidate_locked(entry->ctx, entry->start,
567 entry->end - entry->start, &entry->gseq);
569 entry->flags |= DMAR_MAP_ENTRY_QI_NF;
570 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
573 ctx_flush_iotlb_sync(entry->ctx, entry->start, entry->end -
575 dmar_ctx_free_entry(entry, free);
580 dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries,
583 struct dmar_unit *unit;
584 struct dmar_map_entry *entry, *entry1;
585 struct dmar_qi_genseq gseq;
590 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
591 KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0,
592 ("not mapped entry %p %p", ctx, entry));
593 error = ctx_unmap_buf(ctx, entry->start, entry->end -
594 entry->start, cansleep ? DMAR_PGF_WAITOK : 0);
595 KASSERT(error == 0, ("unmap %p error %d", ctx, error));
596 if (!unit->qi_enabled) {
597 ctx_flush_iotlb_sync(ctx, entry->start,
598 entry->end - entry->start);
599 TAILQ_REMOVE(entries, entry, dmamap_link);
600 dmar_ctx_free_entry(entry, true);
603 if (TAILQ_EMPTY(entries))
606 KASSERT(unit->qi_enabled, ("loaded entry left"));
608 TAILQ_FOREACH(entry, entries, dmamap_link) {
611 dmar_qi_invalidate_locked(ctx, entry->start, entry->end -
612 entry->start, TAILQ_NEXT(entry, dmamap_link) == NULL ?
615 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
617 TAILQ_REMOVE(entries, entry, dmamap_link);
618 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
624 dmar_ctx_unload_task(void *arg, int pending)
626 struct dmar_ctx *ctx;
627 struct dmar_map_entries_tailq entries;
630 TAILQ_INIT(&entries);
634 TAILQ_SWAP(&ctx->unload_entries, &entries, dmar_map_entry,
636 DMAR_CTX_UNLOCK(ctx);
637 if (TAILQ_EMPTY(&entries))
639 dmar_ctx_unload(ctx, &entries, true);