2 * Copyright (c) 2012 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Memory ranges are represented with an RB tree. On insertion, the range
31 * is checked for overlaps. On lookup, the key has the same base and limit
32 * so it can be searched within the range.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <machine/vmm.h>
42 #include <machine/vmm_instruction_emul.h>
51 struct mmio_rb_range {
52 RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */
53 struct mem_range mr_param;
59 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
61 RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
64 * Per-vCPU cache. Since most accesses from a vCPU will be to
65 * consecutive addresses in a range, it makes sense to cache the
68 static struct mmio_rb_range *mmio_hint[VM_MAXCPU];
70 static pthread_rwlock_t mmio_rwlock;
73 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
75 if (a->mr_end < b->mr_base)
77 else if (a->mr_base > b->mr_end)
83 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
84 struct mmio_rb_range **entry)
86 struct mmio_rb_range find, *res;
88 find.mr_base = find.mr_end = addr;
90 res = RB_FIND(mmio_rb_tree, rbt, &find);
101 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
103 struct mmio_rb_range *overlap;
105 overlap = RB_INSERT(mmio_rb_tree, rbt, new);
107 if (overlap != NULL) {
109 printf("overlap detected: new %lx:%lx, tree %lx:%lx\n",
110 new->mr_base, new->mr_end,
111 overlap->mr_base, overlap->mr_end);
122 mmio_rb_dump(struct mmio_rb_tree *rbt)
124 struct mmio_rb_range *np;
126 pthread_rwlock_rdlock(&mmio_rwlock);
127 RB_FOREACH(np, mmio_rb_tree, rbt) {
128 printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
131 pthread_rwlock_unlock(&mmio_rwlock);
135 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
138 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
141 struct mem_range *mr = arg;
143 error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
144 rval, mr->arg1, mr->arg2);
149 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
152 struct mem_range *mr = arg;
154 error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
155 &wval, mr->arg1, mr->arg2);
160 emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie,
161 struct vm_guest_paging *paging)
164 struct mmio_rb_range *entry;
167 pthread_rwlock_rdlock(&mmio_rwlock);
169 * First check the per-vCPU cache
171 if (mmio_hint[vcpu] &&
172 paddr >= mmio_hint[vcpu]->mr_base &&
173 paddr <= mmio_hint[vcpu]->mr_end) {
174 entry = mmio_hint[vcpu];
179 if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
180 /* Update the per-vCPU cache */
181 mmio_hint[vcpu] = entry;
182 } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
183 pthread_rwlock_unlock(&mmio_rwlock);
188 assert(entry != NULL);
191 * An 'immutable' memory range is guaranteed to be never removed
192 * so there is no need to hold 'mmio_rwlock' while calling the
195 * XXX writes to the PCIR_COMMAND register can cause register_mem()
196 * to be called. If the guest is using PCI extended config space
197 * to modify the PCIR_COMMAND register then register_mem() can
198 * deadlock on 'mmio_rwlock'. However by registering the extended
199 * config space window as 'immutable' the deadlock can be avoided.
201 immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE);
203 pthread_rwlock_unlock(&mmio_rwlock);
205 err = vmm_emulate_instruction(ctx, vcpu, paddr, vie, paging,
206 mem_read, mem_write, &entry->mr_param);
209 pthread_rwlock_unlock(&mmio_rwlock);
215 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
217 struct mmio_rb_range *entry, *mrp;
222 mrp = malloc(sizeof(struct mmio_rb_range));
225 mrp->mr_param = *memp;
226 mrp->mr_base = memp->base;
227 mrp->mr_end = memp->base + memp->size - 1;
228 pthread_rwlock_wrlock(&mmio_rwlock);
229 if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
230 err = mmio_rb_add(rbt, mrp);
231 pthread_rwlock_unlock(&mmio_rwlock);
241 register_mem(struct mem_range *memp)
244 return (register_mem_int(&mmio_rb_root, memp));
248 register_mem_fallback(struct mem_range *memp)
251 return (register_mem_int(&mmio_rb_fallback, memp));
255 unregister_mem(struct mem_range *memp)
257 struct mem_range *mr;
258 struct mmio_rb_range *entry = NULL;
261 pthread_rwlock_wrlock(&mmio_rwlock);
262 err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
264 mr = &entry->mr_param;
265 assert(mr->name == memp->name);
266 assert(mr->base == memp->base && mr->size == memp->size);
267 assert((mr->flags & MEM_F_IMMUTABLE) == 0);
268 RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
270 /* flush Per-vCPU cache */
271 for (i=0; i < VM_MAXCPU; i++) {
272 if (mmio_hint[i] == entry)
276 pthread_rwlock_unlock(&mmio_rwlock);
288 RB_INIT(&mmio_rb_root);
289 RB_INIT(&mmio_rb_fallback);
290 pthread_rwlock_init(&mmio_rwlock, NULL);