2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013-2015 The FreeBSD Foundation
7 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
8 * under sponsorship from the FreeBSD Foundation.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #ifndef __X86_IOMMU_INTEL_DMAR_H
35 #define __X86_IOMMU_INTEL_DMAR_H
37 #include <dev/iommu/iommu.h>
42 * Locking annotations:
43 * (u) - Protected by iommu unit lock
44 * (d) - Protected by domain lock
45 * (c) - Immutable after initialization
49 * The domain abstraction. Most non-constant members of the domain
50 * are protected by owning dmar unit lock, not by the domain lock.
51 * Most important, the dmar lock protects the contexts list.
53 * The domain lock protects the address map for the domain, and list
54 * of unload entries delayed.
56 * Page tables pages and pages content is protected by the vm object
57 * lock pgtbl_obj, which contains the page tables pages.
60 struct iommu_domain iodom;
61 int domain; /* (c) DID, written in context entry */
62 int mgaw; /* (c) Real max address width */
63 int agaw; /* (c) Adjusted guest address width */
64 int pglvl; /* (c) The pagelevel */
65 int awlvl; /* (c) The pagelevel as the bitmask,
66 to set in context entry */
67 u_int ctx_cnt; /* (u) Number of contexts owned */
68 u_int refs; /* (u) Refs, including ctx */
69 struct dmar_unit *dmar; /* (c) */
70 LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */
71 LIST_HEAD(, dmar_ctx) contexts; /* (u) */
72 vm_object_t pgtbl_obj; /* (c) Page table pages */
77 struct iommu_ctx context;
78 uint16_t rid; /* (c) pci RID */
79 uint64_t last_fault_rec[2]; /* Last fault reported */
80 LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */
81 u_int refs; /* (u) References from tags */
84 #define DMAR_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj)
85 #define DMAR_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
86 #define DMAR_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
87 #define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
88 VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
90 #define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->iodom.lock)
91 #define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock)
92 #define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED)
94 struct dmar_msi_data {
97 struct resource *irq_res;
99 int (*handler)(void *);
103 void (*enable_intr)(struct dmar_unit *);
104 void (*disable_intr)(struct dmar_unit *);
108 #define DMAR_INTR_FAULT 0
109 #define DMAR_INTR_QI 1
110 #define DMAR_INTR_TOTAL 2
113 struct iommu_unit iommu;
120 struct resource *regs;
122 struct dmar_msi_data intrs[DMAR_INTR_TOTAL];
124 /* Hardware registers cache */
130 /* Data for being a dmar */
131 LIST_HEAD(, dmar_domain) domains;
132 struct unrhdr *domids;
136 /* Fault handler data */
137 struct mtx fault_lock;
142 struct task fault_task;
143 struct taskqueue *fault_taskqueue;
147 vm_offset_t inv_queue;
148 vm_size_t inv_queue_size;
149 uint32_t inv_queue_avail;
150 uint32_t inv_queue_tail;
151 volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
153 uint64_t inv_waitd_seq_hw_phys;
154 uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
155 u_int inv_waitd_gen; /* seq number generation AKA seq overflows */
156 u_int inv_seq_waiters; /* count of waiters for seq */
157 u_int inv_queue_full; /* informational counter */
166 /* Delayed freeing of map entries queue processing */
167 struct iommu_map_entries_tailq tlb_flush_entries;
169 struct taskqueue *qi_taskqueue;
172 #define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock)
173 #define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock)
174 #define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED)
176 #define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
177 #define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
178 #define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED)
180 #define DMAR_IS_COHERENT(dmar) (((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
181 #define DMAR_HAS_QI(dmar) (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
182 #define DMAR_X2APIC(dmar) \
183 (x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0)
186 #define DMAR_BARRIER_RMRR 0
187 #define DMAR_BARRIER_USEQ 1
189 struct dmar_unit *dmar_find(device_t dev, bool verbose);
190 struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
191 struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
193 u_int dmar_nd2mask(u_int nd);
194 bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
195 int domain_set_agaw(struct dmar_domain *domain, int mgaw);
196 int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr,
198 vm_pindex_t pglvl_max_pages(int pglvl);
199 int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
200 iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
201 iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
202 int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
203 iommu_gaddr_t *isizep);
204 struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
205 void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
206 void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
208 void dmar_unmap_pgtbl(struct sf_buf *sf);
209 int dmar_load_root_entry_ptr(struct dmar_unit *unit);
210 int dmar_inv_ctx_glob(struct dmar_unit *unit);
211 int dmar_inv_iotlb_glob(struct dmar_unit *unit);
212 int dmar_flush_write_bufs(struct dmar_unit *unit);
213 void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
214 void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
215 void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
216 int dmar_enable_translation(struct dmar_unit *unit);
217 int dmar_disable_translation(struct dmar_unit *unit);
218 int dmar_load_irt_ptr(struct dmar_unit *unit);
219 int dmar_enable_ir(struct dmar_unit *unit);
220 int dmar_disable_ir(struct dmar_unit *unit);
221 bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
222 void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
223 uint64_t dmar_get_timeout(void);
224 void dmar_update_timeout(uint64_t newval);
226 int dmar_fault_intr(void *arg);
227 void dmar_enable_fault_intr(struct dmar_unit *unit);
228 void dmar_disable_fault_intr(struct dmar_unit *unit);
229 int dmar_init_fault_log(struct dmar_unit *unit);
230 void dmar_fini_fault_log(struct dmar_unit *unit);
232 int dmar_qi_intr(void *arg);
233 void dmar_enable_qi_intr(struct dmar_unit *unit);
234 void dmar_disable_qi_intr(struct dmar_unit *unit);
235 int dmar_init_qi(struct dmar_unit *unit);
236 void dmar_fini_qi(struct dmar_unit *unit);
237 void dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t start,
238 iommu_gaddr_t size, struct iommu_qi_genseq *psec, bool emit_wait);
239 void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
240 void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
241 void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
242 void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
244 vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
245 iommu_gaddr_t maxaddr);
246 void put_idmap_pgtbl(vm_object_t obj);
247 int domain_map_buf(struct iommu_domain *domain, iommu_gaddr_t base,
248 iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
249 int domain_unmap_buf(struct dmar_domain *domain, iommu_gaddr_t base,
250 iommu_gaddr_t size, int flags);
251 void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
253 int domain_alloc_pgtbl(struct dmar_domain *domain);
254 void domain_free_pgtbl(struct dmar_domain *domain);
256 int dmar_dev_depth(device_t child);
257 void dmar_dev_path(device_t child, int *busno, void *path1, int depth);
259 struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
260 uint16_t rid, bool id_mapped, bool rmrr_init);
261 struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
262 int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
263 bool id_mapped, bool rmrr_init);
264 int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
265 void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
266 void dmar_free_ctx(struct dmar_ctx *ctx);
267 struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
268 void dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free);
269 void dmar_domain_unload(struct dmar_domain *domain,
270 struct iommu_map_entries_tailq *entries, bool cansleep);
271 void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free);
273 void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
274 int dev_busno, const void *dev_path, int dev_path_len,
275 struct iommu_map_entries_tailq *rmrr_entries);
276 int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar);
278 void dmar_quirks_post_ident(struct dmar_unit *dmar);
279 void dmar_quirks_pre_use(struct iommu_unit *dmar);
281 int dmar_init_irt(struct dmar_unit *unit);
282 void dmar_fini_irt(struct dmar_unit *unit);
284 void iommu_set_buswide_ctx(struct iommu_unit *unit, u_int busno);
285 bool iommu_is_buswide_ctx(struct iommu_unit *unit, u_int busno);
287 extern iommu_haddr_t dmar_high;
289 extern int dmar_tbl_pagecnt;
290 extern int dmar_batch_coalesce;
292 static inline uint32_t
293 dmar_read4(const struct dmar_unit *unit, int reg)
296 return (bus_read_4(unit->regs, reg));
299 static inline uint64_t
300 dmar_read8(const struct dmar_unit *unit, int reg)
305 low = bus_read_4(unit->regs, reg);
306 high = bus_read_4(unit->regs, reg + 4);
307 return (low | ((uint64_t)high << 32));
309 return (bus_read_8(unit->regs, reg));
314 dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val)
317 KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
318 (unit->hw_gcmd & DMAR_GCMD_TE),
319 ("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit,
320 unit->hw_gcmd, val));
321 bus_write_4(unit->regs, reg, val);
325 dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val)
328 KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write"));
334 bus_write_4(unit->regs, reg, low);
335 bus_write_4(unit->regs, reg + 4, high);
337 bus_write_8(unit->regs, reg, val);
342 * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes
343 * are issued in the correct order. For store, the lower word,
344 * containing the P or R and W bits, is set only after the high word
345 * is written. For clear, the P bit is cleared first, then the high
348 * dmar_pte_update updates the pte. For amd64, the update is atomic.
349 * For i386, it first disables the entry by clearing the word
350 * containing the P bit, and then defer to dmar_pte_store. The locked
351 * cmpxchg8b is probably available on any machine having DMAR support,
352 * but interrupt translation table may be mapped uncached.
355 dmar_pte_store1(volatile uint64_t *dst, uint64_t val)
358 volatile uint32_t *p;
363 p = (volatile uint32_t *)dst;
372 dmar_pte_store(volatile uint64_t *dst, uint64_t val)
375 KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
376 dst, (uintmax_t)*dst, (uintmax_t)val));
377 dmar_pte_store1(dst, val);
381 dmar_pte_update(volatile uint64_t *dst, uint64_t val)
385 volatile uint32_t *p;
387 p = (volatile uint32_t *)dst;
390 dmar_pte_store1(dst, val);
394 dmar_pte_clear(volatile uint64_t *dst)
397 volatile uint32_t *p;
399 p = (volatile uint32_t *)dst;
407 extern struct timespec dmar_hw_timeout;
409 #define DMAR_WAIT_UNTIL(cond) \
411 struct timespec last, curr; \
414 if (dmar_hw_timeout.tv_sec == 0 && \
415 dmar_hw_timeout.tv_nsec == 0) { \
420 timespecadd(&curr, &dmar_hw_timeout, &last); \
428 if (!forever && timespeccmp(&last, &curr, <)) { \
437 #define TD_PREP_PINNED_ASSERT \
439 old_td_pinned = curthread->td_pinned
440 #define TD_PINNED_ASSERT \
441 KASSERT(curthread->td_pinned == old_td_pinned, \
442 ("pin count leak: %d %d %s:%d", curthread->td_pinned, \
443 old_td_pinned, __FILE__, __LINE__))
445 #define TD_PREP_PINNED_ASSERT
446 #define TD_PINNED_ASSERT