2 * Copyright (c) 2012 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Memory ranges are represented with an RB tree. On insertion, the range
31 * is checked for overlaps. On lookup, the key has the same base and limit
32 * so it can be searched within the range.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <machine/vmm.h>
50 struct mmio_rb_range {
51 RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */
52 struct mem_range mr_param;
58 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
60 RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
63 * Per-vCPU cache. Since most accesses from a vCPU will be to
64 * consecutive addresses in a range, it makes sense to cache the
67 static struct mmio_rb_range *mmio_hint[VM_MAXCPU];
69 static pthread_rwlock_t mmio_rwlock;
72 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
74 if (a->mr_end < b->mr_base)
76 else if (a->mr_base > b->mr_end)
82 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
83 struct mmio_rb_range **entry)
85 struct mmio_rb_range find, *res;
87 find.mr_base = find.mr_end = addr;
89 res = RB_FIND(mmio_rb_tree, rbt, &find);
100 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
102 struct mmio_rb_range *overlap;
104 overlap = RB_INSERT(mmio_rb_tree, rbt, new);
106 if (overlap != NULL) {
108 printf("overlap detected: new %lx:%lx, tree %lx:%lx\n",
109 new->mr_base, new->mr_end,
110 overlap->mr_base, overlap->mr_end);
121 mmio_rb_dump(struct mmio_rb_tree *rbt)
123 struct mmio_rb_range *np;
125 pthread_rwlock_rdlock(&mmio_rwlock);
126 RB_FOREACH(np, mmio_rb_tree, rbt) {
127 printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
130 pthread_rwlock_unlock(&mmio_rwlock);
134 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
137 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
140 struct mem_range *mr = arg;
142 error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
143 rval, mr->arg1, mr->arg2);
148 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
151 struct mem_range *mr = arg;
153 error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
154 &wval, mr->arg1, mr->arg2);
159 emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie)
161 struct mmio_rb_range *entry;
164 pthread_rwlock_rdlock(&mmio_rwlock);
166 * First check the per-vCPU cache
168 if (mmio_hint[vcpu] &&
169 paddr >= mmio_hint[vcpu]->mr_base &&
170 paddr <= mmio_hint[vcpu]->mr_end) {
171 entry = mmio_hint[vcpu];
176 if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
177 /* Update the per-vCPU cache */
178 mmio_hint[vcpu] = entry;
179 } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
180 pthread_rwlock_unlock(&mmio_rwlock);
185 assert(entry != NULL);
186 err = vmm_emulate_instruction(ctx, vcpu, paddr, vie,
187 mem_read, mem_write, &entry->mr_param);
188 pthread_rwlock_unlock(&mmio_rwlock);
194 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
196 struct mmio_rb_range *entry, *mrp;
201 mrp = malloc(sizeof(struct mmio_rb_range));
204 mrp->mr_param = *memp;
205 mrp->mr_base = memp->base;
206 mrp->mr_end = memp->base + memp->size - 1;
207 pthread_rwlock_wrlock(&mmio_rwlock);
208 if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
209 err = mmio_rb_add(rbt, mrp);
210 pthread_rwlock_unlock(&mmio_rwlock);
220 register_mem(struct mem_range *memp)
223 return (register_mem_int(&mmio_rb_root, memp));
227 register_mem_fallback(struct mem_range *memp)
230 return (register_mem_int(&mmio_rb_fallback, memp));
234 unregister_mem(struct mem_range *memp)
236 struct mem_range *mr;
237 struct mmio_rb_range *entry = NULL;
240 pthread_rwlock_wrlock(&mmio_rwlock);
241 err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
243 mr = &entry->mr_param;
244 assert(mr->name == memp->name);
245 assert(mr->base == memp->base && mr->size == memp->size);
246 RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
248 /* flush Per-vCPU cache */
249 for (i=0; i < VM_MAXCPU; i++) {
250 if (mmio_hint[i] == entry)
254 pthread_rwlock_unlock(&mmio_rwlock);
266 RB_INIT(&mmio_rb_root);
267 RB_INIT(&mmio_rb_fallback);
268 pthread_rwlock_init(&mmio_rwlock, NULL);