2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 The FreeBSD Foundation
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #ifndef _DEV_IOMMU_IOMMU_H_
34 #define _DEV_IOMMU_IOMMU_H_
36 #include <dev/iommu/iommu_types.h>
38 struct bus_dma_tag_common;
39 struct iommu_map_entry;
40 TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry);
42 RB_HEAD(iommu_gas_entries_tree, iommu_map_entry);
43 RB_PROTOTYPE(iommu_gas_entries_tree, iommu_map_entry, rb_entry,
44 iommu_gas_cmp_entries);
46 struct iommu_qi_genseq {
51 struct iommu_map_entry {
54 iommu_gaddr_t first; /* Least start in subtree */
55 iommu_gaddr_t last; /* Greatest end in subtree */
56 iommu_gaddr_t free_down; /* Max free space below the
57 current R/B tree node */
59 TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* Link for dmamap entries */
60 RB_ENTRY(iommu_map_entry) rb_entry; /* Links for domain entries */
61 TAILQ_ENTRY(iommu_map_entry) unroll_link; /* Link for unroll after
62 dmamap_load failure */
63 struct iommu_domain *domain;
64 struct iommu_qi_genseq gseq;
74 /* Busdma delayed map load */
75 struct task dmamap_load_task;
76 TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps;
77 struct taskqueue *delayed_taskqueue;
80 * Bitmap of buses for which context must ignore slot:func,
81 * duplicating the page table pointer into all context table
82 * entries. This is a client-controlled quirk to support some
85 uint32_t buswide_ctxs[(PCI_BUSMAX + 1) / NBBY / sizeof(uint32_t)];
88 struct iommu_domain_map_ops {
89 int (*map)(struct iommu_domain *domain, iommu_gaddr_t base,
90 iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
91 int (*unmap)(struct iommu_domain *domain, iommu_gaddr_t base,
92 iommu_gaddr_t size, int flags);
96 * Locking annotations:
97 * (u) - Protected by iommu unit lock
98 * (d) - Protected by domain lock
99 * (c) - Immutable after initialization
102 struct iommu_domain {
103 struct iommu_unit *iommu; /* (c) */
104 const struct iommu_domain_map_ops *ops;
105 struct mtx lock; /* (c) */
106 struct task unload_task; /* (c) */
107 u_int entries_cnt; /* (d) */
108 struct iommu_map_entries_tailq unload_entries; /* (d) Entries to
110 struct iommu_gas_entries_tree rb_root; /* (d) */
111 iommu_gaddr_t end; /* (c) Highest address + 1 in
113 struct iommu_map_entry *first_place, *last_place; /* (d) */
114 struct iommu_map_entry *msi_entry; /* (d) Arch-specific */
115 iommu_gaddr_t msi_base; /* (d) Arch-specific */
116 vm_paddr_t msi_phys; /* (d) Arch-specific */
117 u_int flags; /* (u) */
121 struct iommu_domain *domain; /* (c) */
122 struct bus_dma_tag_iommu *tag; /* (c) Root tag */
123 u_long loads; /* atomic updates, for stat only */
124 u_long unloads; /* same */
125 u_int flags; /* (u) */
126 uint16_t rid; /* (c) pci RID */
129 /* struct iommu_ctx flags */
130 #define IOMMU_CTX_FAULTED 0x0001 /* Fault was reported,
131 last_fault_rec is valid */
132 #define IOMMU_CTX_DISABLED 0x0002 /* Device is disabled, the
133 ephemeral reference is kept
134 to prevent context destruction */
136 #define IOMMU_DOMAIN_GAS_INITED 0x0001
137 #define IOMMU_DOMAIN_PGTBL_INITED 0x0002
138 #define IOMMU_DOMAIN_IDMAP 0x0010 /* Domain uses identity
140 #define IOMMU_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry,
141 cannot be turned off */
143 #define IOMMU_LOCK(unit) mtx_lock(&(unit)->lock)
144 #define IOMMU_UNLOCK(unit) mtx_unlock(&(unit)->lock)
145 #define IOMMU_ASSERT_LOCKED(unit) mtx_assert(&(unit)->lock, MA_OWNED)
147 #define IOMMU_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock)
148 #define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
149 #define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
151 void iommu_free_ctx(struct iommu_ctx *ctx);
152 void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx);
153 struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev,
154 uint16_t rid, bool id_mapped, bool rmrr_init);
155 struct iommu_unit *iommu_find(device_t dev, bool verbose);
156 void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free);
157 void iommu_domain_unload(struct iommu_domain *domain,
158 struct iommu_map_entries_tailq *entries, bool cansleep);
160 struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu,
161 device_t dev, bool rmrr);
162 device_t iommu_get_requester(device_t dev, uint16_t *rid);
163 int iommu_init_busdma(struct iommu_unit *unit);
164 void iommu_fini_busdma(struct iommu_unit *unit);
165 struct iommu_map_entry *iommu_map_alloc_entry(struct iommu_domain *iodom,
167 void iommu_map_free_entry(struct iommu_domain *, struct iommu_map_entry *);
168 int iommu_map(struct iommu_domain *iodom,
169 const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
170 u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res);
171 int iommu_map_region(struct iommu_domain *domain,
172 struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
174 void iommu_gas_init_domain(struct iommu_domain *domain);
175 void iommu_gas_fini_domain(struct iommu_domain *domain);
176 struct iommu_map_entry *iommu_gas_alloc_entry(struct iommu_domain *domain,
178 void iommu_gas_free_entry(struct iommu_domain *domain,
179 struct iommu_map_entry *entry);
180 void iommu_gas_free_space(struct iommu_domain *domain,
181 struct iommu_map_entry *entry);
182 int iommu_gas_map(struct iommu_domain *domain,
183 const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
184 u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res);
185 void iommu_gas_free_region(struct iommu_domain *domain,
186 struct iommu_map_entry *entry);
187 int iommu_gas_map_region(struct iommu_domain *domain,
188 struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
189 int iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start,
190 iommu_gaddr_t end, struct iommu_map_entry **entry0);
191 int iommu_gas_reserve_region_extend(struct iommu_domain *domain,
192 iommu_gaddr_t start, iommu_gaddr_t end);
194 void iommu_set_buswide_ctx(struct iommu_unit *unit, u_int busno);
195 bool iommu_is_buswide_ctx(struct iommu_unit *unit, u_int busno);
196 void iommu_domain_init(struct iommu_unit *unit, struct iommu_domain *domain,
197 const struct iommu_domain_map_ops *ops);
198 void iommu_domain_fini(struct iommu_domain *domain);
200 bool bus_dma_iommu_set_buswide(device_t dev);
201 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
202 vm_paddr_t start, vm_size_t length, int flags);
204 bus_dma_tag_t iommu_get_dma_tag(device_t dev, device_t child);
205 struct iommu_ctx *iommu_get_dev_ctx(device_t dev);
206 struct iommu_domain *iommu_get_ctx_domain(struct iommu_ctx *ctx);
208 SYSCTL_DECL(_hw_iommu);
210 #endif /* !_DEV_IOMMU_IOMMU_H_ */