2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2012 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Memory ranges are represented with an RB tree. On insertion, the range
31 * is checked for overlaps. On lookup, the key has the same base and limit
32 * so it can be searched within the range.
35 #include <sys/cdefs.h>
36 #include <sys/types.h>
37 #include <sys/errno.h>
39 #include <machine/vmm.h>
40 #include <machine/vmm_instruction_emul.h>
51 struct mmio_rb_range {
52 RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */
53 struct mem_range mr_param;
59 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
61 static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
64 * Per-vCPU cache. Since most accesses from a vCPU will be to
65 * consecutive addresses in a range, it makes sense to cache the
68 static struct mmio_rb_range **mmio_hint;
71 static pthread_rwlock_t mmio_rwlock;
74 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
76 if (a->mr_end < b->mr_base)
78 else if (a->mr_base > b->mr_end)
84 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
85 struct mmio_rb_range **entry)
87 struct mmio_rb_range find, *res;
89 find.mr_base = find.mr_end = addr;
91 res = RB_FIND(mmio_rb_tree, rbt, &find);
102 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
104 struct mmio_rb_range *overlap;
106 overlap = RB_INSERT(mmio_rb_tree, rbt, new);
108 if (overlap != NULL) {
110 printf("overlap detected: new %lx:%lx, tree %lx:%lx, '%s' "
111 "claims region already claimed for '%s'\n",
112 new->mr_base, new->mr_end,
113 overlap->mr_base, overlap->mr_end,
114 new->mr_param.name, overlap->mr_param.name);
125 mmio_rb_dump(struct mmio_rb_tree *rbt)
128 struct mmio_rb_range *np;
130 pthread_rwlock_rdlock(&mmio_rwlock);
131 RB_FOREACH(np, mmio_rb_tree, rbt) {
132 printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
135 perror = pthread_rwlock_unlock(&mmio_rwlock);
140 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
142 typedef int (mem_cb_t)(struct vcpu *vcpu, uint64_t gpa, struct mem_range *mr,
146 mem_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
149 struct mem_range *mr = arg;
151 error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1,
157 mem_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
160 struct mem_range *mr = arg;
162 error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1,
168 access_memory(struct vcpu *vcpu, uint64_t paddr, mem_cb_t *cb, void *arg)
170 struct mmio_rb_range *entry;
171 int err, perror, immutable, vcpuid;
173 vcpuid = vcpu_id(vcpu);
174 pthread_rwlock_rdlock(&mmio_rwlock);
176 * First check the per-vCPU cache
178 if (mmio_hint[vcpuid] &&
179 paddr >= mmio_hint[vcpuid]->mr_base &&
180 paddr <= mmio_hint[vcpuid]->mr_end) {
181 entry = mmio_hint[vcpuid];
186 if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
187 /* Update the per-vCPU cache */
188 mmio_hint[vcpuid] = entry;
189 } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
190 perror = pthread_rwlock_unlock(&mmio_rwlock);
196 assert(entry != NULL);
199 * An 'immutable' memory range is guaranteed to be never removed
200 * so there is no need to hold 'mmio_rwlock' while calling the
203 * XXX writes to the PCIR_COMMAND register can cause register_mem()
204 * to be called. If the guest is using PCI extended config space
205 * to modify the PCIR_COMMAND register then register_mem() can
206 * deadlock on 'mmio_rwlock'. However by registering the extended
207 * config space window as 'immutable' the deadlock can be avoided.
209 immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE);
211 perror = pthread_rwlock_unlock(&mmio_rwlock);
215 err = cb(vcpu, paddr, &entry->mr_param, arg);
218 perror = pthread_rwlock_unlock(&mmio_rwlock);
225 struct emulate_mem_args {
227 struct vm_guest_paging *paging;
231 emulate_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
234 struct emulate_mem_args *ema;
237 return (vmm_emulate_instruction(vcpu, paddr, ema->vie, ema->paging,
238 mem_read, mem_write, mr));
242 emulate_mem(struct vcpu *vcpu, uint64_t paddr, struct vie *vie,
243 struct vm_guest_paging *paging)
245 struct emulate_mem_args ema;
249 return (access_memory(vcpu, paddr, emulate_mem_cb, &ema));
259 rw_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr, void *arg)
261 struct rw_mem_args *rma;
264 return (mr->handler(vcpu, rma->operation, paddr, rma->size,
265 rma->val, mr->arg1, mr->arg2));
269 read_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size)
271 struct rw_mem_args rma;
275 rma.operation = MEM_F_READ;
276 return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
280 write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size)
282 struct rw_mem_args rma;
286 rma.operation = MEM_F_WRITE;
287 return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
291 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
293 struct mmio_rb_range *entry, *mrp;
298 mrp = malloc(sizeof(struct mmio_rb_range));
300 warn("%s: couldn't allocate memory for mrp\n",
304 mrp->mr_param = *memp;
305 mrp->mr_base = memp->base;
306 mrp->mr_end = memp->base + memp->size - 1;
307 pthread_rwlock_wrlock(&mmio_rwlock);
308 if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
309 err = mmio_rb_add(rbt, mrp);
310 perror = pthread_rwlock_unlock(&mmio_rwlock);
320 register_mem(struct mem_range *memp)
323 return (register_mem_int(&mmio_rb_root, memp));
327 register_mem_fallback(struct mem_range *memp)
330 return (register_mem_int(&mmio_rb_fallback, memp));
334 unregister_mem(struct mem_range *memp)
336 struct mem_range *mr;
337 struct mmio_rb_range *entry = NULL;
340 pthread_rwlock_wrlock(&mmio_rwlock);
341 err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
343 mr = &entry->mr_param;
344 assert(mr->name == memp->name);
345 assert(mr->base == memp->base && mr->size == memp->size);
346 assert((mr->flags & MEM_F_IMMUTABLE) == 0);
347 RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
349 /* flush Per-vCPU cache */
350 for (i = 0; i < mmio_ncpu; i++) {
351 if (mmio_hint[i] == entry)
355 perror = pthread_rwlock_unlock(&mmio_rwlock);
369 mmio_hint = calloc(ncpu, sizeof(*mmio_hint));
370 RB_INIT(&mmio_rb_root);
371 RB_INIT(&mmio_rb_fallback);
372 pthread_rwlock_init(&mmio_rwlock, NULL);