1 /******************************************************************************
4 * Two sets of functionality:
5 * 1. Granting foreign access to our memory reservation.
6 * 2. Accessing others' memory reservations via grant references.
7 * (i.e., mechanisms for both sender and recipient of grant references)
9 * Copyright (c) 2005, Christopher Clark
10 * Copyright (c) 2004, K A Fraser
13 #include <sys/cdefs.h>
14 __FBSDID("$FreeBSD$");
16 #include "opt_global.h"
19 #include <sys/param.h>
20 #include <sys/systm.h>
23 #include <sys/module.h>
24 #include <sys/kernel.h>
26 #include <sys/malloc.h>
29 #include <machine/xen/xen-os.h>
30 #include <xen/hypervisor.h>
31 #include <machine/xen/synch_bitops.h>
33 #include <xen/hypervisor.h>
34 #include <xen/gnttab.h>
37 #include <vm/vm_kern.h>
38 #include <vm/vm_extern.h>
41 #define cmpxchg(a, b, c) atomic_cmpset_int((volatile u_int *)(a),(b),(c))
43 /* External tools reserve first few grant table entries. */
44 #define NR_RESERVED_ENTRIES 8
45 #define GNTTAB_LIST_END 0xffffffff
46 #define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t))
48 static grant_ref_t **gnttab_list;
49 static unsigned int nr_grant_frames;
50 static unsigned int boot_max_nr_grant_frames;
51 static int gnttab_free_count;
52 static grant_ref_t gnttab_free_head;
53 static struct mtx gnttab_list_lock;
55 static grant_entry_t *shared;
57 static struct gnttab_free_callback *gnttab_free_callback_list = NULL;
59 static int gnttab_expand(unsigned int req_entries);
61 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
62 #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP])
65 get_free_entries(int count, int *entries)
70 mtx_lock(&gnttab_list_lock);
71 if ((gnttab_free_count < count) &&
72 ((error = gnttab_expand(count - gnttab_free_count)) != 0)) {
73 mtx_unlock(&gnttab_list_lock);
76 ref = head = gnttab_free_head;
77 gnttab_free_count -= count;
79 head = gnttab_entry(head);
80 gnttab_free_head = gnttab_entry(head);
81 gnttab_entry(head) = GNTTAB_LIST_END;
82 mtx_unlock(&gnttab_list_lock);
89 do_free_callbacks(void)
91 struct gnttab_free_callback *callback, *next;
93 callback = gnttab_free_callback_list;
94 gnttab_free_callback_list = NULL;
96 while (callback != NULL) {
97 next = callback->next;
98 if (gnttab_free_count >= callback->count) {
99 callback->next = NULL;
100 callback->fn(callback->arg);
102 callback->next = gnttab_free_callback_list;
103 gnttab_free_callback_list = callback;
110 check_free_callbacks(void)
112 if (unlikely(gnttab_free_callback_list != NULL))
117 put_free_entry(grant_ref_t ref)
120 mtx_lock(&gnttab_list_lock);
121 gnttab_entry(ref) = gnttab_free_head;
122 gnttab_free_head = ref;
124 check_free_callbacks();
125 mtx_unlock(&gnttab_list_lock);
129 * Public grant-issuing interface functions
133 gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly,
138 error = get_free_entries(1, &ref);
143 shared[ref].frame = frame;
144 shared[ref].domid = domid;
146 shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
155 gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
156 unsigned long frame, int readonly)
159 shared[ref].frame = frame;
160 shared[ref].domid = domid;
162 shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
166 gnttab_query_foreign_access(grant_ref_t ref)
170 nflags = shared[ref].flags;
172 return (nflags & (GTF_reading|GTF_writing));
176 gnttab_end_foreign_access_ref(grant_ref_t ref)
178 uint16_t flags, nflags;
180 nflags = shared[ref].flags;
182 if ( (flags = nflags) & (GTF_reading|GTF_writing) ) {
183 printf("WARNING: g.e. still in use!\n");
186 } while ((nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) !=
193 gnttab_end_foreign_access(grant_ref_t ref, void *page)
195 if (gnttab_end_foreign_access_ref(ref)) {
198 free(page, M_DEVBUF);
202 /* XXX This needs to be fixed so that the ref and page are
203 placed on a list to be freed up later. */
204 printf("WARNING: leaking g.e. and page still in use!\n");
209 gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn,
214 error = get_free_entries(1, &ref);
218 gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
225 gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
228 shared[ref].frame = pfn;
229 shared[ref].domid = domid;
231 shared[ref].flags = GTF_accept_transfer;
235 gnttab_end_foreign_transfer_ref(grant_ref_t ref)
241 * If a transfer is not even yet started, try to reclaim the grant
242 * reference and return failure (== 0).
244 while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
245 if ( synch_cmpxchg(&shared[ref].flags, flags, 0) == flags )
250 /* If a transfer is in progress then wait until it is completed. */
251 while (!(flags & GTF_transfer_completed)) {
252 flags = shared[ref].flags;
256 /* Read the frame number /after/ reading completion status. */
258 frame = shared[ref].frame;
259 KASSERT(frame != 0, ("grant table inconsistent"));
265 gnttab_end_foreign_transfer(grant_ref_t ref)
267 unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
274 gnttab_free_grant_reference(grant_ref_t ref)
281 gnttab_free_grant_references(grant_ref_t head)
286 if (head == GNTTAB_LIST_END)
289 mtx_lock(&gnttab_list_lock);
291 while (gnttab_entry(ref) != GNTTAB_LIST_END) {
292 ref = gnttab_entry(ref);
295 gnttab_entry(ref) = gnttab_free_head;
296 gnttab_free_head = head;
297 gnttab_free_count += count;
298 check_free_callbacks();
299 mtx_unlock(&gnttab_list_lock);
303 gnttab_alloc_grant_references(uint16_t count, grant_ref_t *head)
307 error = get_free_entries(count, &ref);
316 gnttab_empty_grant_references(const grant_ref_t *private_head)
319 return (*private_head == GNTTAB_LIST_END);
323 gnttab_claim_grant_reference(grant_ref_t *private_head)
325 grant_ref_t g = *private_head;
327 if (unlikely(g == GNTTAB_LIST_END))
329 *private_head = gnttab_entry(g);
334 gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release)
337 gnttab_entry(release) = *private_head;
338 *private_head = release;
342 gnttab_request_free_callback(struct gnttab_free_callback *callback,
343 void (*fn)(void *), void *arg, uint16_t count)
346 mtx_lock(&gnttab_list_lock);
351 callback->count = count;
352 callback->next = gnttab_free_callback_list;
353 gnttab_free_callback_list = callback;
354 check_free_callbacks();
356 mtx_unlock(&gnttab_list_lock);
361 gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
363 struct gnttab_free_callback **pcb;
365 mtx_lock(&gnttab_list_lock);
366 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
367 if (*pcb == callback) {
368 *pcb = callback->next;
372 mtx_unlock(&gnttab_list_lock);
377 grow_gnttab_list(unsigned int more_frames)
379 unsigned int new_nr_grant_frames, extra_entries, i;
381 new_nr_grant_frames = nr_grant_frames + more_frames;
382 extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
384 for (i = nr_grant_frames; i < new_nr_grant_frames; i++)
386 gnttab_list[i] = (grant_ref_t *)
387 malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
393 for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
394 i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
395 gnttab_entry(i) = i + 1;
397 gnttab_entry(i) = gnttab_free_head;
398 gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
399 gnttab_free_count += extra_entries;
401 nr_grant_frames = new_nr_grant_frames;
403 check_free_callbacks();
408 for ( ; i >= nr_grant_frames; i--)
409 free(gnttab_list[i], M_DEVBUF);
414 __max_nr_grant_frames(void)
416 struct gnttab_query_size query;
419 query.dom = DOMID_SELF;
421 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
422 if ((rc < 0) || (query.status != GNTST_okay))
423 return (4); /* Legacy max supported number of frames */
425 return (query.max_nr_frames);
429 unsigned int max_nr_grant_frames(void)
431 unsigned int xen_max = __max_nr_grant_frames();
433 if (xen_max > boot_max_nr_grant_frames)
434 return (boot_max_nr_grant_frames);
440 * XXX needed for backend support
444 map_pte_fn(pte_t *pte, struct page *pmd_page,
445 unsigned long addr, void *data)
447 unsigned long **frames = (unsigned long **)data;
449 set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL));
455 unmap_pte_fn(pte_t *pte, struct page *pmd_page,
456 unsigned long addr, void *data)
459 set_pte_at(&init_mm, addr, pte, __pte(0));
467 gnttab_map(unsigned int start_idx, unsigned int end_idx)
469 struct gnttab_setup_table setup;
472 unsigned int nr_gframes = end_idx + 1;
475 frames = malloc(nr_gframes * sizeof(unsigned long), M_DEVBUF, M_NOWAIT);
479 setup.dom = DOMID_SELF;
480 setup.nr_frames = nr_gframes;
481 set_xen_guest_handle(setup.frame_list, frames);
483 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
485 free(frames, M_DEVBUF);
488 KASSERT(!(rc || setup.status),
489 ("unexpected result from grant_table_op"));
491 if (shared == NULL) {
494 area = kmem_alloc_nofault(kernel_map,
495 PAGE_SIZE * max_nr_grant_frames());
496 KASSERT(area, ("can't allocate VM space for grant table"));
497 shared = (grant_entry_t *)area;
500 for (i = 0; i < nr_gframes; i++)
501 PT_SET_MA(((caddr_t)shared) + i*PAGE_SIZE,
502 ((vm_paddr_t)frames[i]) << PAGE_SHIFT | PG_RW | PG_V);
504 free(frames, M_DEVBUF);
513 if (max_nr_grant_frames() < nr_grant_frames)
515 return (gnttab_map(0, nr_grant_frames - 1));
523 for (i = 0; i < nr_grant_frames; i++)
524 pmap_kremove((vm_offset_t) shared + i * PAGE_SIZE);
531 #include <dev/xen/xenpci/xenpcivar.h>
533 static vm_paddr_t resume_frames;
535 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
537 struct xen_add_to_physmap xatp;
538 unsigned int i = end_idx;
541 * Loop backwards, so that the first hypercall has the largest index,
542 * ensuring that the table will grow only once.
545 xatp.domid = DOMID_SELF;
547 xatp.space = XENMAPSPACE_grant_table;
548 xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
549 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
550 panic("HYPERVISOR_memory_op failed to map gnttab");
551 } while (i-- > start_idx);
553 if (shared == NULL) {
556 area = kmem_alloc_nofault(kernel_map,
557 PAGE_SIZE * max_nr_grant_frames());
558 KASSERT(area, ("can't allocate VM space for grant table"));
559 shared = (grant_entry_t *)area;
562 for (i = start_idx; i <= end_idx; i++) {
563 pmap_kenter((vm_offset_t) shared + i * PAGE_SIZE,
564 resume_frames + i * PAGE_SIZE);
574 unsigned int max_nr_gframes, nr_gframes;
576 nr_gframes = nr_grant_frames;
577 max_nr_gframes = max_nr_grant_frames();
578 if (max_nr_gframes < nr_gframes)
581 if (!resume_frames) {
582 error = xenpci_alloc_space(PAGE_SIZE * max_nr_gframes,
585 printf("error mapping gnttab share frames\n");
590 return (gnttab_map(0, nr_gframes - 1));
596 gnttab_expand(unsigned int req_entries)
599 unsigned int cur, extra;
601 cur = nr_grant_frames;
602 extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
603 GREFS_PER_GRANT_FRAME);
604 if (cur + extra > max_nr_grant_frames())
607 error = gnttab_map(cur, cur + extra - 1);
609 error = grow_gnttab_list(extra);
618 unsigned int max_nr_glist_frames;
619 unsigned int nr_init_grefs;
621 if (!is_running_on_xen())
625 boot_max_nr_grant_frames = __max_nr_grant_frames();
627 /* Determine the maximum number of frames required for the
628 * grant reference free list on the current hypervisor.
630 max_nr_glist_frames = (boot_max_nr_grant_frames *
631 GREFS_PER_GRANT_FRAME /
632 (PAGE_SIZE / sizeof(grant_ref_t)));
634 gnttab_list = malloc(max_nr_glist_frames * sizeof(grant_ref_t *),
637 if (gnttab_list == NULL)
640 for (i = 0; i < nr_grant_frames; i++) {
641 gnttab_list[i] = (grant_ref_t *)
642 malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
643 if (gnttab_list[i] == NULL)
650 nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
652 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
653 gnttab_entry(i) = i + 1;
655 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
656 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
657 gnttab_free_head = NR_RESERVED_ENTRIES;
660 printf("Grant table initialized\n");
665 for (i--; i >= 0; i--)
666 free(gnttab_list[i], M_DEVBUF);
667 free(gnttab_list, M_DEVBUF);
672 MTX_SYSINIT(gnttab, &gnttab_list_lock, "GNTTAB LOCK", MTX_DEF);
673 //SYSINIT(gnttab, SI_SUB_PSEUDO, SI_ORDER_FIRST, gnttab_init, NULL);