2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
11 * Portions of this work was supported by Innovate UK project 105694,
12 * "Digital Security by Design (DSbD) Technology Platform Prototype".
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include "opt_platform.h"
38 #include <sys/cdefs.h>
39 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/memdesc.h>
45 #include <sys/taskqueue.h>
47 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
54 #include <machine/bus.h>
55 #include <dev/iommu/busdma_iommu.h>
56 #include <machine/vmparam.h>
59 #include <dev/fdt/fdt_common.h>
60 #include <dev/ofw/ofw_bus.h>
61 #include <dev/ofw/ofw_bus_subr.h>
67 static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
69 #define IOMMU_LIST_LOCK() sx_xlock(&iommu_sx)
70 #define IOMMU_LIST_UNLOCK() sx_xunlock(&iommu_sx)
71 #define IOMMU_LIST_ASSERT_LOCKED() sx_assert(&iommu_sx, SA_XLOCKED)
73 #define dprintf(fmt, ...)
75 static struct sx iommu_sx;
78 struct iommu_unit *iommu;
79 LIST_ENTRY(iommu_entry) next;
81 static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
84 iommu_domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
85 iommu_gaddr_t size, int flags)
87 struct iommu_unit *iommu;
92 error = IOMMU_UNMAP(iommu->dev, iodom, base, size);
98 iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
99 iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
101 struct iommu_unit *iommu;
106 dprintf("%s: base %lx, size %lx\n", __func__, base, size);
109 if (eflags & IOMMU_MAP_ENTRY_READ)
110 prot |= VM_PROT_READ;
111 if (eflags & IOMMU_MAP_ENTRY_WRITE)
112 prot |= VM_PROT_WRITE;
116 iommu = iodom->iommu;
118 error = IOMMU_MAP(iommu->dev, iodom, va, ma, size, prot);
123 static const struct iommu_domain_map_ops domain_map_ops = {
124 .map = iommu_domain_map_buf,
125 .unmap = iommu_domain_unmap_buf,
128 static struct iommu_domain *
129 iommu_domain_alloc(struct iommu_unit *iommu)
131 struct iommu_domain *iodom;
133 iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu);
137 KASSERT(iodom->end != 0, ("domain end is not set"));
139 iommu_domain_init(iommu, iodom, &domain_map_ops);
140 iodom->iommu = iommu;
141 iommu_gas_init_domain(iodom);
147 iommu_domain_free(struct iommu_domain *iodom)
149 struct iommu_unit *iommu;
151 iommu = iodom->iommu;
155 if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
156 IOMMU_DOMAIN_LOCK(iodom);
157 iommu_gas_fini_domain(iodom);
158 IOMMU_DOMAIN_UNLOCK(iodom);
161 iommu_domain_fini(iodom);
163 IOMMU_DOMAIN_FREE(iommu->dev, iodom);
170 iommu_tag_init(struct iommu_domain *iodom, struct bus_dma_tag_iommu *t)
174 maxaddr = MIN(iodom->end, BUS_SPACE_MAXADDR);
176 t->common.ref_count = 0;
177 t->common.impl = &bus_dma_iommu_impl;
178 t->common.alignment = 1;
179 t->common.boundary = 0;
180 t->common.lowaddr = maxaddr;
181 t->common.highaddr = maxaddr;
182 t->common.maxsize = maxaddr;
183 t->common.nsegments = BUS_SPACE_UNRESTRICTED;
184 t->common.maxsegsz = maxaddr;
187 static struct iommu_ctx *
188 iommu_ctx_alloc(device_t requester, struct iommu_domain *iodom, bool disabled)
190 struct iommu_unit *iommu;
191 struct iommu_ctx *ioctx;
193 iommu = iodom->iommu;
195 ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, requester, disabled);
199 ioctx->domain = iodom;
205 iommu_ctx_init(device_t requester, struct iommu_ctx *ioctx)
207 struct bus_dma_tag_iommu *tag;
208 struct iommu_domain *iodom;
209 struct iommu_unit *iommu;
212 iodom = ioctx->domain;
213 iommu = iodom->iommu;
215 error = IOMMU_CTX_INIT(iommu->dev, ioctx);
219 tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu),
220 M_IOMMU, M_WAITOK | M_ZERO);
221 tag->owner = requester;
223 tag->ctx->domain = iodom;
225 iommu_tag_init(iodom, tag);
230 static struct iommu_unit *
231 iommu_lookup(device_t dev)
233 struct iommu_entry *entry;
234 struct iommu_unit *iommu;
237 LIST_FOREACH(entry, &iommu_list, next) {
238 iommu = entry->iommu;
239 if (iommu->dev == dev) {
251 iommu_get_ctx_ofw(device_t dev, int channel)
253 struct iommu_domain *iodom;
254 struct iommu_unit *iommu;
255 struct iommu_ctx *ioctx;
256 phandle_t node, parent;
263 node = ofw_bus_get_node(dev);
266 "%s called on not ofw based device.\n", __func__);
270 error = ofw_bus_parse_xref_list_get_length(node,
271 "iommus", "#iommu-cells", &niommus);
273 device_printf(dev, "%s can't get iommu list.\n", __func__);
278 device_printf(dev, "%s iommu list is empty.\n", __func__);
282 error = ofw_bus_parse_xref_list_alloc(node, "iommus", "#iommu-cells",
283 channel, &parent, &ncells, &cells);
285 device_printf(dev, "%s can't get iommu device xref.\n",
290 iommu_dev = OF_device_from_xref(parent);
291 if (iommu_dev == NULL) {
292 device_printf(dev, "%s can't get iommu device.\n", __func__);
296 iommu = iommu_lookup(iommu_dev);
298 device_printf(dev, "%s can't lookup iommu.\n", __func__);
303 * In our current configuration we have a domain per each ctx,
304 * so allocate a domain first.
306 iodom = iommu_domain_alloc(iommu);
308 device_printf(dev, "%s can't allocate domain.\n", __func__);
312 ioctx = iommu_ctx_alloc(dev, iodom, false);
314 iommu_domain_free(iodom);
318 ioctx->domain = iodom;
320 error = IOMMU_OFW_MD_DATA(iommu->dev, ioctx, cells, ncells);
322 device_printf(dev, "%s can't set MD data\n", __func__);
326 error = iommu_ctx_init(dev, ioctx);
328 IOMMU_CTX_FREE(iommu->dev, ioctx);
329 iommu_domain_free(iodom);
338 iommu_get_ctx(struct iommu_unit *iommu, device_t requester,
339 uint16_t rid, bool disabled, bool rmrr)
341 struct iommu_domain *iodom;
342 struct iommu_ctx *ioctx;
346 ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester);
354 * In our current configuration we have a domain per each ctx.
355 * So allocate a domain first.
357 iodom = iommu_domain_alloc(iommu);
361 ioctx = iommu_ctx_alloc(requester, iodom, disabled);
363 iommu_domain_free(iodom);
367 error = iommu_ctx_init(requester, ioctx);
369 IOMMU_CTX_FREE(iommu->dev, ioctx);
370 iommu_domain_free(iodom);
378 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
380 struct bus_dma_tag_iommu *tag;
382 IOMMU_ASSERT_LOCKED(iommu);
386 IOMMU_CTX_FREE(iommu->dev, ioctx);
392 iommu_free_ctx(struct iommu_ctx *ioctx)
394 struct iommu_unit *iommu;
395 struct iommu_domain *iodom;
398 iodom = ioctx->domain;
399 iommu = iodom->iommu;
402 iommu_free_ctx_locked(iommu, ioctx);
405 /* Since we have a domain per each ctx, remove the domain too. */
406 error = iommu_domain_free(iodom);
408 device_printf(iommu->dev, "Could not free a domain\n");
412 iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
414 iommu_gas_free_space(entry);
417 iommu_gas_free_entry(entry);
423 iommu_domain_unload(struct iommu_domain *iodom,
424 struct iommu_map_entries_tailq *entries, bool cansleep)
426 struct iommu_map_entry *entry, *entry1;
427 int error __diagused;
429 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
430 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
431 ("not mapped entry %p %p", iodom, entry));
432 error = iodom->ops->unmap(iodom, entry->start, entry->end -
433 entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
434 KASSERT(error == 0, ("unmap %p error %d", iodom, error));
435 TAILQ_REMOVE(entries, entry, dmamap_link);
436 iommu_domain_free_entry(entry, true);
439 if (TAILQ_EMPTY(entries))
442 panic("entries map is not empty");
446 iommu_register(struct iommu_unit *iommu)
448 struct iommu_entry *entry;
450 mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF);
452 entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO);
453 entry->iommu = iommu;
456 LIST_INSERT_HEAD(&iommu_list, entry, next);
459 iommu_init_busdma(iommu);
465 iommu_unregister(struct iommu_unit *iommu)
467 struct iommu_entry *entry, *tmp;
470 LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) {
471 if (entry->iommu == iommu) {
472 LIST_REMOVE(entry, next);
473 free(entry, M_IOMMU);
478 iommu_fini_busdma(iommu);
480 mtx_destroy(&iommu->lock);
486 iommu_find(device_t dev, bool verbose)
488 struct iommu_entry *entry;
489 struct iommu_unit *iommu;
493 LIST_FOREACH(entry, &iommu_list, next) {
494 iommu = entry->iommu;
495 error = IOMMU_FIND(iommu->dev, dev);
498 return (entry->iommu);
507 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
508 bool cansleep __unused)
511 dprintf("%s\n", __func__);
513 iommu_domain_free_entry(entry, free);
520 sx_init(&iommu_sx, "IOMMU list");
523 SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);