2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013-2015 The FreeBSD Foundation
7 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
8 * under sponsorship from the FreeBSD Foundation.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #ifndef __X86_IOMMU_INTEL_DMAR_H
35 #define __X86_IOMMU_INTEL_DMAR_H
37 #include <dev/iommu/iommu.h>
42 * Locking annotations:
43 * (u) - Protected by iommu unit lock
44 * (d) - Protected by domain lock
45 * (c) - Immutable after initialization
49 * The domain abstraction. Most non-constant members of the domain
50 * are protected by owning dmar unit lock, not by the domain lock.
51 * Most important, the dmar lock protects the contexts list.
53 * The domain lock protects the address map for the domain, and list
54 * of unload entries delayed.
56 * Page tables pages and pages content is protected by the vm object
57 * lock pgtbl_obj, which contains the page tables pages.
60 struct iommu_domain iodom;
61 int domain; /* (c) DID, written in context entry */
62 int mgaw; /* (c) Real max address width */
63 int agaw; /* (c) Adjusted guest address width */
64 int pglvl; /* (c) The pagelevel */
65 int awlvl; /* (c) The pagelevel as the bitmask,
66 to set in context entry */
67 u_int ctx_cnt; /* (u) Number of contexts owned */
68 u_int refs; /* (u) Refs, including ctx */
69 struct dmar_unit *dmar; /* (c) */
70 LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */
71 LIST_HEAD(, dmar_ctx) contexts; /* (u) */
72 vm_object_t pgtbl_obj; /* (c) Page table pages */
77 struct iommu_ctx context;
78 uint64_t last_fault_rec[2]; /* Last fault reported */
79 LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */
80 u_int refs; /* (u) References from tags */
83 #define DMAR_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj)
84 #define DMAR_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
85 #define DMAR_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
86 #define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
87 VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
89 #define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->iodom.lock)
90 #define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock)
91 #define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED)
93 #define DMAR2IOMMU(dmar) &((dmar)->iommu)
94 #define IOMMU2DMAR(dmar) \
95 __containerof((dmar), struct dmar_unit, iommu)
97 #define DOM2IODOM(domain) &((domain)->iodom)
98 #define IODOM2DOM(domain) \
99 __containerof((domain), struct dmar_domain, iodom)
101 #define CTX2IOCTX(ctx) &((ctx)->context)
102 #define IOCTX2CTX(ctx) \
103 __containerof((ctx), struct dmar_ctx, context)
105 #define CTX2DOM(ctx) IODOM2DOM((ctx)->context.domain)
106 #define CTX2DMAR(ctx) (CTX2DOM(ctx)->dmar)
107 #define DOM2DMAR(domain) ((domain)->dmar)
109 struct dmar_msi_data {
112 struct resource *irq_res;
114 int (*handler)(void *);
118 void (*enable_intr)(struct dmar_unit *);
119 void (*disable_intr)(struct dmar_unit *);
123 #define DMAR_INTR_FAULT 0
124 #define DMAR_INTR_QI 1
125 #define DMAR_INTR_TOTAL 2
128 struct iommu_unit iommu;
135 struct resource *regs;
137 struct dmar_msi_data intrs[DMAR_INTR_TOTAL];
139 /* Hardware registers cache */
145 /* Data for being a dmar */
146 LIST_HEAD(, dmar_domain) domains;
147 struct unrhdr *domids;
151 /* Fault handler data */
152 struct mtx fault_lock;
157 struct task fault_task;
158 struct taskqueue *fault_taskqueue;
162 vm_offset_t inv_queue;
163 vm_size_t inv_queue_size;
164 uint32_t inv_queue_avail;
165 uint32_t inv_queue_tail;
166 volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
168 uint64_t inv_waitd_seq_hw_phys;
169 uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
170 u_int inv_waitd_gen; /* seq number generation AKA seq overflows */
171 u_int inv_seq_waiters; /* count of waiters for seq */
172 u_int inv_queue_full; /* informational counter */
181 /* Delayed freeing of map entries queue processing */
182 struct iommu_map_entries_tailq tlb_flush_entries;
184 struct taskqueue *qi_taskqueue;
187 #define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock)
188 #define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock)
189 #define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED)
191 #define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
192 #define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
193 #define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED)
195 #define DMAR_IS_COHERENT(dmar) (((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
196 #define DMAR_HAS_QI(dmar) (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
197 #define DMAR_X2APIC(dmar) \
198 (x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0)
201 #define DMAR_BARRIER_RMRR 0
202 #define DMAR_BARRIER_USEQ 1
204 struct dmar_unit *dmar_find(device_t dev, bool verbose);
205 struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
206 struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
208 u_int dmar_nd2mask(u_int nd);
209 bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
210 int domain_set_agaw(struct dmar_domain *domain, int mgaw);
211 int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr,
213 vm_pindex_t pglvl_max_pages(int pglvl);
214 int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
215 iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
216 iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
217 int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
218 iommu_gaddr_t *isizep);
219 struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
220 void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
221 void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
223 void dmar_unmap_pgtbl(struct sf_buf *sf);
224 int dmar_load_root_entry_ptr(struct dmar_unit *unit);
225 int dmar_inv_ctx_glob(struct dmar_unit *unit);
226 int dmar_inv_iotlb_glob(struct dmar_unit *unit);
227 int dmar_flush_write_bufs(struct dmar_unit *unit);
228 void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
229 void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
230 void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
231 int dmar_enable_translation(struct dmar_unit *unit);
232 int dmar_disable_translation(struct dmar_unit *unit);
233 int dmar_load_irt_ptr(struct dmar_unit *unit);
234 int dmar_enable_ir(struct dmar_unit *unit);
235 int dmar_disable_ir(struct dmar_unit *unit);
236 bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
237 void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
238 uint64_t dmar_get_timeout(void);
239 void dmar_update_timeout(uint64_t newval);
241 int dmar_fault_intr(void *arg);
242 void dmar_enable_fault_intr(struct dmar_unit *unit);
243 void dmar_disable_fault_intr(struct dmar_unit *unit);
244 int dmar_init_fault_log(struct dmar_unit *unit);
245 void dmar_fini_fault_log(struct dmar_unit *unit);
247 int dmar_qi_intr(void *arg);
248 void dmar_enable_qi_intr(struct dmar_unit *unit);
249 void dmar_disable_qi_intr(struct dmar_unit *unit);
250 int dmar_init_qi(struct dmar_unit *unit);
251 void dmar_fini_qi(struct dmar_unit *unit);
252 void dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t start,
253 iommu_gaddr_t size, struct iommu_qi_genseq *psec, bool emit_wait);
254 void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
255 void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
256 void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
257 void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
259 vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
260 iommu_gaddr_t maxaddr);
261 void put_idmap_pgtbl(vm_object_t obj);
262 void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
264 int domain_alloc_pgtbl(struct dmar_domain *domain);
265 void domain_free_pgtbl(struct dmar_domain *domain);
266 extern const struct iommu_domain_map_ops dmar_domain_map_ops;
268 int dmar_dev_depth(device_t child);
269 void dmar_dev_path(device_t child, int *busno, void *path1, int depth);
271 struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
272 uint16_t rid, bool id_mapped, bool rmrr_init);
273 struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
274 int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
275 bool id_mapped, bool rmrr_init);
276 int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
277 void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
278 void dmar_free_ctx(struct dmar_ctx *ctx);
279 struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
280 void dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free);
281 void dmar_domain_unload(struct dmar_domain *domain,
282 struct iommu_map_entries_tailq *entries, bool cansleep);
283 void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free);
285 void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
286 int dev_busno, const void *dev_path, int dev_path_len,
287 struct iommu_map_entries_tailq *rmrr_entries);
288 int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar);
290 void dmar_quirks_post_ident(struct dmar_unit *dmar);
291 void dmar_quirks_pre_use(struct iommu_unit *dmar);
293 int dmar_init_irt(struct dmar_unit *unit);
294 void dmar_fini_irt(struct dmar_unit *unit);
296 extern iommu_haddr_t dmar_high;
298 extern int dmar_tbl_pagecnt;
299 extern int dmar_batch_coalesce;
301 static inline uint32_t
302 dmar_read4(const struct dmar_unit *unit, int reg)
305 return (bus_read_4(unit->regs, reg));
308 static inline uint64_t
309 dmar_read8(const struct dmar_unit *unit, int reg)
314 low = bus_read_4(unit->regs, reg);
315 high = bus_read_4(unit->regs, reg + 4);
316 return (low | ((uint64_t)high << 32));
318 return (bus_read_8(unit->regs, reg));
323 dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val)
326 KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
327 (unit->hw_gcmd & DMAR_GCMD_TE),
328 ("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit,
329 unit->hw_gcmd, val));
330 bus_write_4(unit->regs, reg, val);
334 dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val)
337 KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write"));
343 bus_write_4(unit->regs, reg, low);
344 bus_write_4(unit->regs, reg + 4, high);
346 bus_write_8(unit->regs, reg, val);
351 * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes
352 * are issued in the correct order. For store, the lower word,
353 * containing the P or R and W bits, is set only after the high word
354 * is written. For clear, the P bit is cleared first, then the high
357 * dmar_pte_update updates the pte. For amd64, the update is atomic.
358 * For i386, it first disables the entry by clearing the word
359 * containing the P bit, and then defer to dmar_pte_store. The locked
360 * cmpxchg8b is probably available on any machine having DMAR support,
361 * but interrupt translation table may be mapped uncached.
364 dmar_pte_store1(volatile uint64_t *dst, uint64_t val)
367 volatile uint32_t *p;
372 p = (volatile uint32_t *)dst;
381 dmar_pte_store(volatile uint64_t *dst, uint64_t val)
384 KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
385 dst, (uintmax_t)*dst, (uintmax_t)val));
386 dmar_pte_store1(dst, val);
390 dmar_pte_update(volatile uint64_t *dst, uint64_t val)
394 volatile uint32_t *p;
396 p = (volatile uint32_t *)dst;
399 dmar_pte_store1(dst, val);
403 dmar_pte_clear(volatile uint64_t *dst)
406 volatile uint32_t *p;
408 p = (volatile uint32_t *)dst;
416 extern struct timespec dmar_hw_timeout;
418 #define DMAR_WAIT_UNTIL(cond) \
420 struct timespec last, curr; \
423 if (dmar_hw_timeout.tv_sec == 0 && \
424 dmar_hw_timeout.tv_nsec == 0) { \
429 timespecadd(&curr, &dmar_hw_timeout, &last); \
437 if (!forever && timespeccmp(&last, &curr, <)) { \
446 #define TD_PREP_PINNED_ASSERT \
448 old_td_pinned = curthread->td_pinned
449 #define TD_PINNED_ASSERT \
450 KASSERT(curthread->td_pinned == old_td_pinned, \
451 ("pin count leak: %d %d %s:%d", curthread->td_pinned, \
452 old_td_pinned, __FILE__, __LINE__))
454 #define TD_PREP_PINNED_ASSERT
455 #define TD_PINNED_ASSERT