2 * Copyright (c) 2018 VMware, Inc.
4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
7 /* VMCI QueuePair API implementation. */
10 __FBSDID("$FreeBSD$");
13 #include "vmci_driver.h"
14 #include "vmci_event.h"
15 #include "vmci_kernel_api.h"
16 #include "vmci_kernel_defs.h"
17 #include "vmci_queue_pair.h"
19 #define LGPFX "vmci_queue_pair: "
21 struct queue_pair_entry {
22 vmci_list_item(queue_pair_entry) list_item;
23 struct vmci_handle handle;
26 uint64_t produce_size;
27 uint64_t consume_size;
31 struct qp_guest_endpoint {
32 struct queue_pair_entry qp;
36 bool hibernate_failure;
37 struct ppn_set ppn_set;
40 struct queue_pair_list {
41 vmci_list(queue_pair_entry) head;
42 volatile int hibernate;
46 #define QPE_NUM_PAGES(_QPE) \
47 ((uint32_t)(CEILING(_QPE.produce_size, PAGE_SIZE) + \
48 CEILING(_QPE.consume_size, PAGE_SIZE) + 2))
50 static struct queue_pair_list qp_guest_endpoints;
52 static struct queue_pair_entry *queue_pair_list_find_entry(
53 struct queue_pair_list *qp_list, struct vmci_handle handle);
54 static void queue_pair_list_add_entry(struct queue_pair_list *qp_list,
55 struct queue_pair_entry *entry);
56 static void queue_pair_list_remove_entry(struct queue_pair_list *qp_list,
57 struct queue_pair_entry *entry);
58 static struct queue_pair_entry *queue_pair_list_get_head(
59 struct queue_pair_list *qp_list);
60 static int queue_pair_notify_peer_local(bool attach,
61 struct vmci_handle handle);
62 static struct qp_guest_endpoint *qp_guest_endpoint_create(
63 struct vmci_handle handle, vmci_id peer, uint32_t flags,
64 uint64_t produce_size, uint64_t consume_size,
65 void *produce_q, void *consume_q);
66 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry);
67 static int vmci_queue_pair_alloc_hypercall(
68 const struct qp_guest_endpoint *entry);
69 static int vmci_queue_pair_alloc_guest_work(struct vmci_handle *handle,
70 struct vmci_queue **produce_q, uint64_t produce_size,
71 struct vmci_queue **consume_q, uint64_t consume_size,
72 vmci_id peer, uint32_t flags,
73 vmci_privilege_flags priv_flags);
74 static int vmci_queue_pair_detach_guest_work(struct vmci_handle handle);
75 static int vmci_queue_pair_detach_hypercall(struct vmci_handle handle);
78 *------------------------------------------------------------------------------
80 * vmci_queue_pair_alloc --
82 * Allocates a VMCI QueuePair. Only checks validity of input arguments. The
83 * real work is done in the host or guest specific function.
86 * VMCI_SUCCESS on success, appropriate error code otherwise.
91 *------------------------------------------------------------------------------
95 vmci_queue_pair_alloc(struct vmci_handle *handle, struct vmci_queue **produce_q,
96 uint64_t produce_size, struct vmci_queue **consume_q, uint64_t consume_size,
97 vmci_id peer, uint32_t flags, vmci_privilege_flags priv_flags)
100 if (!handle || !produce_q || !consume_q ||
101 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
102 return (VMCI_ERROR_INVALID_ARGS);
104 return (vmci_queue_pair_alloc_guest_work(handle, produce_q,
105 produce_size, consume_q, consume_size, peer, flags, priv_flags));
109 *------------------------------------------------------------------------------
111 * vmci_queue_pair_detach --
113 * Detaches from a VMCI QueuePair. Only checks validity of input argument.
114 * Real work is done in the host or guest specific function.
117 * Success or failure.
122 *------------------------------------------------------------------------------
126 vmci_queue_pair_detach(struct vmci_handle handle)
129 if (VMCI_HANDLE_INVALID(handle))
130 return (VMCI_ERROR_INVALID_ARGS);
132 return (vmci_queue_pair_detach_guest_work(handle));
136 *------------------------------------------------------------------------------
138 * queue_pair_list_init --
140 * Initializes the list of QueuePairs.
143 * Success or failure.
148 *------------------------------------------------------------------------------
152 queue_pair_list_init(struct queue_pair_list *qp_list)
156 vmci_list_init(&qp_list->head);
157 atomic_store_int(&qp_list->hibernate, 0);
158 ret = vmci_mutex_init(&qp_list->mutex, "VMCI QP List lock");
163 *------------------------------------------------------------------------------
165 * queue_pair_list_destroy --
167 * Destroy the list's mutex.
175 *------------------------------------------------------------------------------
179 queue_pair_list_destroy(struct queue_pair_list *qp_list)
182 vmci_mutex_destroy(&qp_list->mutex);
183 vmci_list_init(&qp_list->head);
187 *------------------------------------------------------------------------------
189 * queue_pair_list_find_entry --
191 * Finds the entry in the list corresponding to a given handle. Assumes that
192 * the list is locked.
200 *------------------------------------------------------------------------------
203 static struct queue_pair_entry *
204 queue_pair_list_find_entry(struct queue_pair_list *qp_list,
205 struct vmci_handle handle)
207 struct queue_pair_entry *next;
209 if (VMCI_HANDLE_INVALID(handle))
212 vmci_list_scan(next, &qp_list->head, list_item) {
213 if (VMCI_HANDLE_EQUAL(next->handle, handle))
221 *------------------------------------------------------------------------------
223 * queue_pair_list_add_entry --
225 * Adds the given entry to the list. Assumes that the list is locked.
233 *------------------------------------------------------------------------------
237 queue_pair_list_add_entry(struct queue_pair_list *qp_list,
238 struct queue_pair_entry *entry)
242 vmci_list_insert(&qp_list->head, entry, list_item);
246 *------------------------------------------------------------------------------
248 * queue_pair_list_remove_entry --
250 * Removes the given entry from the list. Assumes that the list is locked.
258 *------------------------------------------------------------------------------
262 queue_pair_list_remove_entry(struct queue_pair_list *qp_list,
263 struct queue_pair_entry *entry)
267 vmci_list_remove(entry, list_item);
271 *------------------------------------------------------------------------------
273 * queue_pair_list_get_head --
275 * Returns the entry from the head of the list. Assumes that the list is
284 *------------------------------------------------------------------------------
287 static struct queue_pair_entry *
288 queue_pair_list_get_head(struct queue_pair_list *qp_list)
291 return (vmci_list_first(&qp_list->head));
295 *------------------------------------------------------------------------------
297 * vmci_qp_guest_endpoints_init --
299 * Initalizes data structure state keeping track of queue pair guest
303 * VMCI_SUCCESS on success and appropriate failure code otherwise.
308 *------------------------------------------------------------------------------
312 vmci_qp_guest_endpoints_init(void)
315 return (queue_pair_list_init(&qp_guest_endpoints));
319 *------------------------------------------------------------------------------
321 * vmci_qp_guest_endpoints_exit --
323 * Destroys all guest queue pair endpoints. If active guest queue pairs
324 * still exist, hypercalls to attempt detach from these queue pairs will be
325 * made. Any failure to detach is silently ignored.
333 *------------------------------------------------------------------------------
337 vmci_qp_guest_endpoints_exit(void)
339 struct qp_guest_endpoint *entry;
341 if (!vmci_mutex_initialized(&qp_guest_endpoints.mutex))
344 vmci_mutex_acquire(&qp_guest_endpoints.mutex);
347 (struct qp_guest_endpoint *)queue_pair_list_get_head(
348 &qp_guest_endpoints)) != NULL) {
350 * Don't make a hypercall for local QueuePairs.
352 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL))
353 vmci_queue_pair_detach_hypercall(entry->qp.handle);
355 * We cannot fail the exit, so let's reset ref_count.
357 entry->qp.ref_count = 0;
358 queue_pair_list_remove_entry(&qp_guest_endpoints, &entry->qp);
359 qp_guest_endpoint_destroy(entry);
362 atomic_store_int(&qp_guest_endpoints.hibernate, 0);
363 vmci_mutex_release(&qp_guest_endpoints.mutex);
364 queue_pair_list_destroy(&qp_guest_endpoints);
368 *------------------------------------------------------------------------------
370 * vmci_qp_guest_endpoints_sync --
372 * Use this as a synchronization point when setting globals, for example,
373 * during device shutdown.
381 *------------------------------------------------------------------------------
385 vmci_qp_guest_endpoints_sync(void)
388 vmci_mutex_acquire(&qp_guest_endpoints.mutex);
389 vmci_mutex_release(&qp_guest_endpoints.mutex);
393 *------------------------------------------------------------------------------
395 * qp_guest_endpoint_create --
397 * Allocates and initializes a qp_guest_endpoint structure. Allocates a
398 * QueuePair rid (and handle) iff the given entry has an invalid handle.
399 * 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved handles. Assumes
400 * that the QP list mutex is held by the caller.
403 * Pointer to structure intialized.
408 *------------------------------------------------------------------------------
411 struct qp_guest_endpoint *
412 qp_guest_endpoint_create(struct vmci_handle handle, vmci_id peer,
413 uint32_t flags, uint64_t produce_size, uint64_t consume_size,
414 void *produce_q, void *consume_q)
416 struct qp_guest_endpoint *entry;
417 static vmci_id queue_pair_rid;
418 const uint64_t num_ppns = CEILING(produce_size, PAGE_SIZE) +
419 CEILING(consume_size, PAGE_SIZE) +
420 2; /* One page each for the queue headers. */
422 queue_pair_rid = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
424 ASSERT((produce_size || consume_size) && produce_q && consume_q);
426 if (VMCI_HANDLE_INVALID(handle)) {
427 vmci_id context_id = vmci_get_context_id();
428 vmci_id old_rid = queue_pair_rid;
431 * Generate a unique QueuePair rid. Keep on trying until we
432 * wrap around in the RID space.
434 ASSERT(old_rid > VMCI_RESERVED_RESOURCE_ID_MAX);
436 handle = VMCI_MAKE_HANDLE(context_id, queue_pair_rid);
438 (struct qp_guest_endpoint *)
439 queue_pair_list_find_entry(&qp_guest_endpoints,
442 if (UNLIKELY(!queue_pair_rid)) {
444 * Skip the reserved rids.
447 VMCI_RESERVED_RESOURCE_ID_MAX + 1;
449 } while (entry && queue_pair_rid != old_rid);
451 if (UNLIKELY(entry != NULL)) {
452 ASSERT(queue_pair_rid == old_rid);
454 * We wrapped around --- no rids were free.
460 ASSERT(!VMCI_HANDLE_INVALID(handle) &&
461 queue_pair_list_find_entry(&qp_guest_endpoints, handle) == NULL);
462 entry = vmci_alloc_kernel_mem(sizeof(*entry), VMCI_MEMORY_NORMAL);
464 entry->qp.handle = handle;
465 entry->qp.peer = peer;
466 entry->qp.flags = flags;
467 entry->qp.produce_size = produce_size;
468 entry->qp.consume_size = consume_size;
469 entry->qp.ref_count = 0;
470 entry->num_ppns = num_ppns;
471 memset(&entry->ppn_set, 0, sizeof(entry->ppn_set));
472 entry->produce_q = produce_q;
473 entry->consume_q = consume_q;
479 *------------------------------------------------------------------------------
481 * qp_guest_endpoint_destroy --
483 * Frees a qp_guest_endpoint structure.
491 *------------------------------------------------------------------------------
495 qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
499 ASSERT(entry->qp.ref_count == 0);
501 vmci_free_ppn_set(&entry->ppn_set);
502 vmci_free_queue(entry->produce_q, entry->qp.produce_size);
503 vmci_free_queue(entry->consume_q, entry->qp.consume_size);
504 vmci_free_kernel_mem(entry, sizeof(*entry));
508 *------------------------------------------------------------------------------
510 * vmci_queue_pair_alloc_hypercall --
512 * Helper to make a QueuePairAlloc hypercall when the driver is
513 * supporting a guest device.
516 * Result of the hypercall.
519 * Memory is allocated & freed.
521 *------------------------------------------------------------------------------
524 vmci_queue_pair_alloc_hypercall(const struct qp_guest_endpoint *entry)
526 struct vmci_queue_pair_alloc_msg *alloc_msg;
530 if (!entry || entry->num_ppns <= 2)
531 return (VMCI_ERROR_INVALID_ARGS);
533 ASSERT(!(entry->qp.flags & VMCI_QPFLAG_LOCAL));
535 msg_size = sizeof(*alloc_msg) + (size_t)entry->num_ppns * sizeof(PPN);
536 alloc_msg = vmci_alloc_kernel_mem(msg_size, VMCI_MEMORY_NORMAL);
538 return (VMCI_ERROR_NO_MEM);
540 alloc_msg->hdr.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
541 VMCI_QUEUEPAIR_ALLOC);
542 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
543 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
544 alloc_msg->handle = entry->qp.handle;
545 alloc_msg->peer = entry->qp.peer;
546 alloc_msg->flags = entry->qp.flags;
547 alloc_msg->produce_size = entry->qp.produce_size;
548 alloc_msg->consume_size = entry->qp.consume_size;
549 alloc_msg->num_ppns = entry->num_ppns;
550 result = vmci_populate_ppn_list((uint8_t *)alloc_msg +
551 sizeof(*alloc_msg), &entry->ppn_set);
552 if (result == VMCI_SUCCESS)
553 result = vmci_send_datagram((struct vmci_datagram *)alloc_msg);
554 vmci_free_kernel_mem(alloc_msg, msg_size);
560 *------------------------------------------------------------------------------
562 * vmci_queue_pair_alloc_guest_work --
564 * This functions handles the actual allocation of a VMCI queue pair guest
565 * endpoint. Allocates physical pages for the queue pair. It makes OS
566 * dependent calls through generic wrappers.
569 * Success or failure.
572 * Memory is allocated.
574 *------------------------------------------------------------------------------
578 vmci_queue_pair_alloc_guest_work(struct vmci_handle *handle,
579 struct vmci_queue **produce_q, uint64_t produce_size,
580 struct vmci_queue **consume_q, uint64_t consume_size, vmci_id peer,
581 uint32_t flags, vmci_privilege_flags priv_flags)
583 struct qp_guest_endpoint *queue_pair_entry = NULL;
584 void *my_consume_q = NULL;
585 void *my_produce_q = NULL;
586 const uint64_t num_consume_pages = CEILING(consume_size, PAGE_SIZE) + 1;
587 const uint64_t num_produce_pages = CEILING(produce_size, PAGE_SIZE) + 1;
590 ASSERT(handle && produce_q && consume_q &&
591 (produce_size || consume_size));
593 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
594 return (VMCI_ERROR_NO_ACCESS);
596 vmci_mutex_acquire(&qp_guest_endpoints.mutex);
598 if ((atomic_load_int(&qp_guest_endpoints.hibernate) == 1) &&
599 !(flags & VMCI_QPFLAG_LOCAL)) {
601 * While guest OS is in hibernate state, creating non-local
602 * queue pairs is not allowed after the point where the VMCI
603 * guest driver converted the existing queue pairs to local
607 result = VMCI_ERROR_UNAVAILABLE;
611 if ((queue_pair_entry =
612 (struct qp_guest_endpoint *)queue_pair_list_find_entry(
613 &qp_guest_endpoints, *handle)) != NULL) {
614 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
615 /* Local attach case. */
616 if (queue_pair_entry->qp.ref_count > 1) {
617 VMCI_LOG_DEBUG(LGPFX"Error attempting to "
618 "attach more than once.\n");
619 result = VMCI_ERROR_UNAVAILABLE;
620 goto error_keep_entry;
623 if (queue_pair_entry->qp.produce_size != consume_size ||
624 queue_pair_entry->qp.consume_size != produce_size ||
625 queue_pair_entry->qp.flags !=
626 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
627 VMCI_LOG_DEBUG(LGPFX"Error mismatched "
628 "queue pair in local attach.\n");
629 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
630 goto error_keep_entry;
634 * Do a local attach. We swap the consume and produce
635 * queues for the attacher and deliver an attach event.
637 result = queue_pair_notify_peer_local(true, *handle);
638 if (result < VMCI_SUCCESS)
639 goto error_keep_entry;
640 my_produce_q = queue_pair_entry->consume_q;
641 my_consume_q = queue_pair_entry->produce_q;
644 result = VMCI_ERROR_ALREADY_EXISTS;
645 goto error_keep_entry;
648 my_produce_q = vmci_alloc_queue(produce_size, flags);
650 VMCI_LOG_WARNING(LGPFX"Error allocating pages for produce "
652 result = VMCI_ERROR_NO_MEM;
656 my_consume_q = vmci_alloc_queue(consume_size, flags);
658 VMCI_LOG_WARNING(LGPFX"Error allocating pages for consume "
660 result = VMCI_ERROR_NO_MEM;
664 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
665 produce_size, consume_size, my_produce_q, my_consume_q);
666 if (!queue_pair_entry) {
667 VMCI_LOG_WARNING(LGPFX"Error allocating memory in %s.\n",
669 result = VMCI_ERROR_NO_MEM;
673 result = vmci_alloc_ppn_set(my_produce_q, num_produce_pages,
674 my_consume_q, num_consume_pages, &queue_pair_entry->ppn_set);
675 if (result < VMCI_SUCCESS) {
676 VMCI_LOG_WARNING(LGPFX"vmci_alloc_ppn_set failed.\n");
681 * It's only necessary to notify the host if this queue pair will be
682 * attached to from another context.
684 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
685 /* Local create case. */
686 vmci_id context_id = vmci_get_context_id();
689 * Enforce similar checks on local queue pairs as we do for
690 * regular ones. The handle's context must match the creator
691 * or attacher context id (here they are both the current
692 * context id) and the attach-only flag cannot exist during
693 * create. We also ensure specified peer is this context or
696 if (queue_pair_entry->qp.handle.context != context_id ||
697 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
698 queue_pair_entry->qp.peer != context_id)) {
699 result = VMCI_ERROR_NO_ACCESS;
703 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
704 result = VMCI_ERROR_NOT_FOUND;
708 result = vmci_queue_pair_alloc_hypercall(queue_pair_entry);
709 if (result < VMCI_SUCCESS) {
711 LGPFX"vmci_queue_pair_alloc_hypercall result = "
717 queue_pair_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
720 queue_pair_entry->qp.ref_count++;
721 *handle = queue_pair_entry->qp.handle;
722 *produce_q = (struct vmci_queue *)my_produce_q;
723 *consume_q = (struct vmci_queue *)my_consume_q;
726 * We should initialize the queue pair header pages on a local queue
727 * pair create. For non-local queue pairs, the hypervisor initializes
728 * the header pages in the create step.
730 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
731 queue_pair_entry->qp.ref_count == 1) {
732 vmci_queue_header_init((*produce_q)->q_header, *handle);
733 vmci_queue_header_init((*consume_q)->q_header, *handle);
736 vmci_mutex_release(&qp_guest_endpoints.mutex);
738 return (VMCI_SUCCESS);
741 vmci_mutex_release(&qp_guest_endpoints.mutex);
742 if (queue_pair_entry) {
743 /* The queues will be freed inside the destroy routine. */
744 qp_guest_endpoint_destroy(queue_pair_entry);
747 vmci_free_queue(my_produce_q, produce_size);
749 vmci_free_queue(my_consume_q, consume_size);
754 /* This path should only be used when an existing entry was found. */
755 ASSERT(queue_pair_entry->qp.ref_count > 0);
756 vmci_mutex_release(&qp_guest_endpoints.mutex);
761 *------------------------------------------------------------------------------
763 * vmci_queue_pair_detach_hypercall --
765 * Helper to make a QueuePairDetach hypercall when the driver is supporting
769 * Result of the hypercall.
774 *------------------------------------------------------------------------------
778 vmci_queue_pair_detach_hypercall(struct vmci_handle handle)
780 struct vmci_queue_pair_detach_msg detach_msg;
782 detach_msg.hdr.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
783 VMCI_QUEUEPAIR_DETACH);
784 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
785 detach_msg.hdr.payload_size = sizeof(handle);
786 detach_msg.handle = handle;
788 return (vmci_send_datagram((struct vmci_datagram *)&detach_msg));
792 *------------------------------------------------------------------------------
794 * vmci_queue_pair_detach_guest_work --
796 * Helper for VMCI QueuePair detach interface. Frees the physical pages for
800 * Success or failure.
803 * Memory may be freed.
805 *------------------------------------------------------------------------------
809 vmci_queue_pair_detach_guest_work(struct vmci_handle handle)
811 struct qp_guest_endpoint *entry;
815 ASSERT(!VMCI_HANDLE_INVALID(handle));
817 vmci_mutex_acquire(&qp_guest_endpoints.mutex);
819 entry = (struct qp_guest_endpoint *)queue_pair_list_find_entry(
820 &qp_guest_endpoints, handle);
822 vmci_mutex_release(&qp_guest_endpoints.mutex);
823 return (VMCI_ERROR_NOT_FOUND);
826 ASSERT(entry->qp.ref_count >= 1);
828 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
829 result = VMCI_SUCCESS;
831 if (entry->qp.ref_count > 1) {
832 result = queue_pair_notify_peer_local(false, handle);
835 * We can fail to notify a local queuepair because we
836 * can't allocate. We still want to release the entry
837 * if that happens, so don't bail out yet.
841 result = vmci_queue_pair_detach_hypercall(handle);
842 if (entry->hibernate_failure) {
843 if (result == VMCI_ERROR_NOT_FOUND) {
845 * If a queue pair detach failed when entering
846 * hibernation, the guest driver and the device
847 * may disagree on its existence when coming
848 * out of hibernation. The guest driver will
849 * regard it as a non-local queue pair, but
850 * the device state is gone, since the device
851 * has been powered off. In this case, we
852 * treat the queue pair as a local queue pair
856 ASSERT(entry->qp.ref_count == 1);
857 result = VMCI_SUCCESS;
860 if (result < VMCI_SUCCESS) {
862 * We failed to notify a non-local queuepair. That other
863 * queuepair might still be accessing the shared
864 * memory, so don't release the entry yet. It will get
865 * cleaned up by vmci_queue_pair_Exit() if necessary
866 * (assuming we are going away, otherwise why did this
870 vmci_mutex_release(&qp_guest_endpoints.mutex);
876 * If we get here then we either failed to notify a local queuepair, or
877 * we succeeded in all cases. Release the entry if required.
880 entry->qp.ref_count--;
881 if (entry->qp.ref_count == 0)
882 queue_pair_list_remove_entry(&qp_guest_endpoints, &entry->qp);
884 /* If we didn't remove the entry, this could change once we unlock. */
885 ref_count = entry ? entry->qp.ref_count :
887 * Value does not matter, silence the
891 vmci_mutex_release(&qp_guest_endpoints.mutex);
894 qp_guest_endpoint_destroy(entry);
899 *------------------------------------------------------------------------------
901 * queue_pair_notify_peer_local --
903 * Dispatches a queue pair event message directly into the local event
907 * VMCI_SUCCESS on success, error code otherwise
912 *------------------------------------------------------------------------------
916 queue_pair_notify_peer_local(bool attach, struct vmci_handle handle)
918 struct vmci_event_msg *e_msg;
919 struct vmci_event_payload_qp *e_payload;
920 /* buf is only 48 bytes. */
922 context_id = vmci_get_context_id();
923 char buf[sizeof(*e_msg) + sizeof(*e_payload)];
925 e_msg = (struct vmci_event_msg *)buf;
926 e_payload = vmci_event_msg_payload(e_msg);
928 e_msg->hdr.dst = VMCI_MAKE_HANDLE(context_id, VMCI_EVENT_HANDLER);
929 e_msg->hdr.src = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
930 VMCI_CONTEXT_RESOURCE_ID);
931 e_msg->hdr.payload_size = sizeof(*e_msg) + sizeof(*e_payload) -
933 e_msg->event_data.event = attach ? VMCI_EVENT_QP_PEER_ATTACH :
934 VMCI_EVENT_QP_PEER_DETACH;
935 e_payload->peer_id = context_id;
936 e_payload->handle = handle;
938 return (vmci_event_dispatch((struct vmci_datagram *)e_msg));