2 * Copyright (c) 2018 VMware, Inc.
4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
7 /* This file implements VMCI Event code. */
10 __FBSDID("$FreeBSD$");
13 #include "vmci_driver.h"
14 #include "vmci_event.h"
15 #include "vmci_kernel_api.h"
16 #include "vmci_kernel_defs.h"
17 #include "vmci_kernel_if.h"
19 #define LGPFX "vmci_event: "
20 #define EVENT_MAGIC 0xEABE0000
22 struct vmci_subscription {
26 vmci_event destroy_event;
27 vmci_event_type event;
28 vmci_event_cb callback;
30 vmci_list_item(vmci_subscription) subscriber_list_item;
33 static struct vmci_subscription *vmci_event_find(vmci_id sub_id);
34 static int vmci_event_deliver(struct vmci_event_msg *event_msg);
35 static int vmci_event_register_subscription(struct vmci_subscription *sub,
36 vmci_event_type event, uint32_t flags,
37 vmci_event_cb callback, void *callback_data);
38 static struct vmci_subscription *vmci_event_unregister_subscription(
41 static vmci_list(vmci_subscription) subscriber_array[VMCI_EVENT_MAX];
42 static vmci_lock subscriber_lock;
44 struct vmci_delayed_event_info {
45 struct vmci_subscription *sub;
46 uint8_t event_payload[sizeof(struct vmci_event_data_max)];
49 struct vmci_event_ref {
50 struct vmci_subscription *sub;
51 vmci_list_item(vmci_event_ref) list_item;
55 *------------------------------------------------------------------------------
62 * VMCI_SUCCESS on success, appropriate error code otherwise.
67 *------------------------------------------------------------------------------
75 for (i = 0; i < VMCI_EVENT_MAX; i++)
76 vmci_list_init(&subscriber_array[i]);
78 return (vmci_init_lock(&subscriber_lock, "VMCI Event subscriber lock"));
82 *------------------------------------------------------------------------------
94 *------------------------------------------------------------------------------
100 struct vmci_subscription *iter, *iter_2;
103 /* We free all memory at exit. */
104 for (e = 0; e < VMCI_EVENT_MAX; e++) {
105 vmci_list_scan_safe(iter, &subscriber_array[e],
106 subscriber_list_item, iter_2) {
109 * We should never get here because all events should
110 * have been unregistered before we try to unload the
111 * driver module. Also, delayed callbacks could still
112 * be firing so this cleanup would not be safe. Still
113 * it is better to free the memory than not ... so we
114 * leave this code in just in case....
118 vmci_free_kernel_mem(iter, sizeof(*iter));
121 vmci_cleanup_lock(&subscriber_lock);
125 *------------------------------------------------------------------------------
129 * Use this as a synchronization point when setting globals, for example,
130 * during device shutdown.
138 *------------------------------------------------------------------------------
142 vmci_event_sync(void)
145 vmci_grab_lock_bh(&subscriber_lock);
146 vmci_release_lock_bh(&subscriber_lock);
150 *------------------------------------------------------------------------------
152 * vmci_event_check_host_capabilities --
154 * Verify that the host supports the hypercalls we need. If it does not,
155 * try to find fallback hypercalls and use those instead.
158 * true if required hypercalls (or fallback hypercalls) are
159 * supported by the host, false otherwise.
164 *------------------------------------------------------------------------------
168 vmci_event_check_host_capabilities(void)
171 /* vmci_event does not require any hypercalls. */
176 *------------------------------------------------------------------------------
180 * Gets a reference to the given struct vmci_subscription.
188 *------------------------------------------------------------------------------
192 vmci_event_get(struct vmci_subscription *entry)
201 *------------------------------------------------------------------------------
203 * vmci_event_release --
205 * Releases the given struct vmci_subscription.
211 * Fires the destroy event if the reference count has gone to zero.
213 *------------------------------------------------------------------------------
217 vmci_event_release(struct vmci_subscription *entry)
221 ASSERT(entry->ref_count > 0);
224 if (entry->ref_count == 0)
225 vmci_signal_event(&entry->destroy_event);
229 *------------------------------------------------------------------------------
231 * event_release_cb --
233 * Callback to release the event entry reference. It is called by the
234 * vmci_wait_on_event function before it blocks.
242 *------------------------------------------------------------------------------
246 event_release_cb(void *client_data)
248 struct vmci_subscription *sub = (struct vmci_subscription *)client_data;
252 vmci_grab_lock_bh(&subscriber_lock);
253 vmci_event_release(sub);
254 vmci_release_lock_bh(&subscriber_lock);
260 *------------------------------------------------------------------------------
264 * Find entry. Assumes lock is held.
267 * Entry if found, NULL if not.
270 * Increments the struct vmci_subscription refcount if an entry is found.
272 *------------------------------------------------------------------------------
275 static struct vmci_subscription *
276 vmci_event_find(vmci_id sub_id)
278 struct vmci_subscription *iter;
281 for (e = 0; e < VMCI_EVENT_MAX; e++) {
282 vmci_list_scan(iter, &subscriber_array[e],
283 subscriber_list_item) {
284 if (iter->id == sub_id) {
285 vmci_event_get(iter);
294 *------------------------------------------------------------------------------
296 * vmci_event_delayed_dispatch_cb --
298 * Calls the specified callback in a delayed context.
306 *------------------------------------------------------------------------------
310 vmci_event_delayed_dispatch_cb(void *data)
312 struct vmci_delayed_event_info *event_info;
313 struct vmci_subscription *sub;
314 struct vmci_event_data *ed;
316 event_info = (struct vmci_delayed_event_info *)data;
319 ASSERT(event_info->sub);
321 sub = event_info->sub;
322 ed = (struct vmci_event_data *)event_info->event_payload;
324 sub->callback(sub->id, ed, sub->callback_data);
326 vmci_grab_lock_bh(&subscriber_lock);
327 vmci_event_release(sub);
328 vmci_release_lock_bh(&subscriber_lock);
330 vmci_free_kernel_mem(event_info, sizeof(*event_info));
334 *------------------------------------------------------------------------------
336 * vmci_event_deliver --
338 * Actually delivers the events to the subscribers.
344 * The callback function for each subscriber is invoked.
346 *------------------------------------------------------------------------------
350 vmci_event_deliver(struct vmci_event_msg *event_msg)
352 struct vmci_subscription *iter;
353 int err = VMCI_SUCCESS;
355 vmci_list(vmci_event_ref) no_delay_list;
356 vmci_list_init(&no_delay_list);
360 vmci_grab_lock_bh(&subscriber_lock);
361 vmci_list_scan(iter, &subscriber_array[event_msg->event_data.event],
362 subscriber_list_item) {
363 if (iter->run_delayed) {
364 struct vmci_delayed_event_info *event_info;
366 vmci_alloc_kernel_mem(sizeof(*event_info),
367 VMCI_MEMORY_ATOMIC)) == NULL) {
368 err = VMCI_ERROR_NO_MEM;
372 vmci_event_get(iter);
374 memset(event_info, 0, sizeof(*event_info));
375 memcpy(event_info->event_payload,
376 VMCI_DG_PAYLOAD(event_msg),
377 (size_t)event_msg->hdr.payload_size);
378 event_info->sub = iter;
380 vmci_schedule_delayed_work(
381 vmci_event_delayed_dispatch_cb, event_info);
382 if (err != VMCI_SUCCESS) {
383 vmci_event_release(iter);
384 vmci_free_kernel_mem(
385 event_info, sizeof(*event_info));
390 struct vmci_event_ref *event_ref;
393 * We construct a local list of subscribers and release
394 * subscriber_lock before invoking the callbacks. This
395 * is similar to delayed callbacks, but callbacks are
396 * invoked right away here.
398 if ((event_ref = vmci_alloc_kernel_mem(
399 sizeof(*event_ref), VMCI_MEMORY_ATOMIC)) == NULL) {
400 err = VMCI_ERROR_NO_MEM;
404 vmci_event_get(iter);
405 event_ref->sub = iter;
406 vmci_list_insert(&no_delay_list, event_ref, list_item);
411 vmci_release_lock_bh(&subscriber_lock);
413 if (!vmci_list_empty(&no_delay_list)) {
414 struct vmci_event_data *ed;
415 struct vmci_event_ref *iter;
416 struct vmci_event_ref *iter_2;
418 vmci_list_scan_safe(iter, &no_delay_list, list_item, iter_2) {
419 struct vmci_subscription *cur;
420 uint8_t event_payload[sizeof(
421 struct vmci_event_data_max)];
426 * We set event data before each callback to ensure
429 memset(event_payload, 0, sizeof(event_payload));
430 memcpy(event_payload, VMCI_DG_PAYLOAD(event_msg),
431 (size_t)event_msg->hdr.payload_size);
432 ed = (struct vmci_event_data *)event_payload;
433 cur->callback(cur->id, ed, cur->callback_data);
435 vmci_grab_lock_bh(&subscriber_lock);
436 vmci_event_release(cur);
437 vmci_release_lock_bh(&subscriber_lock);
438 vmci_free_kernel_mem(iter, sizeof(*iter));
446 *------------------------------------------------------------------------------
448 * vmci_event_dispatch --
450 * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
451 * subscribers for given event.
454 * VMCI_SUCCESS on success, error code otherwise.
459 *------------------------------------------------------------------------------
463 vmci_event_dispatch(struct vmci_datagram *msg)
465 struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
468 msg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
469 msg->dst.resource == VMCI_EVENT_HANDLER);
471 if (msg->payload_size < sizeof(vmci_event_type) ||
472 msg->payload_size > sizeof(struct vmci_event_data_max))
473 return (VMCI_ERROR_INVALID_ARGS);
475 if (!VMCI_EVENT_VALID(event_msg->event_data.event))
476 return (VMCI_ERROR_EVENT_UNKNOWN);
478 vmci_event_deliver(event_msg);
480 return (VMCI_SUCCESS);
484 *------------------------------------------------------------------------------
486 * vmci_event_register_subscription --
488 * Initialize and add subscription to subscriber list.
491 * VMCI_SUCCESS on success, error code otherwise.
496 *------------------------------------------------------------------------------
500 vmci_event_register_subscription(struct vmci_subscription *sub,
501 vmci_event_type event, uint32_t flags, vmci_event_cb callback,
504 #define VMCI_EVENT_MAX_ATTEMPTS 10
505 static vmci_id subscription_id = 0;
507 uint32_t attempts = 0;
512 if (!VMCI_EVENT_VALID(event) || callback == NULL) {
513 VMCI_LOG_DEBUG(LGPFX"Failed to subscribe to event"
514 " (type=%d) (callback=%p) (data=%p).\n",
515 event, callback, callback_data);
516 return (VMCI_ERROR_INVALID_ARGS);
519 if (!vmci_can_schedule_delayed_work()) {
521 * If the platform doesn't support delayed work callbacks then
522 * don't allow registration for them.
524 if (flags & VMCI_FLAG_EVENT_DELAYED_CB)
525 return (VMCI_ERROR_INVALID_ARGS);
526 sub->run_delayed = false;
529 * The platform supports delayed work callbacks. Honor the
532 sub->run_delayed = (flags & VMCI_FLAG_EVENT_DELAYED_CB) ?
538 sub->callback = callback;
539 sub->callback_data = callback_data;
541 vmci_grab_lock_bh(&subscriber_lock);
543 for (success = false, attempts = 0;
544 success == false && attempts < VMCI_EVENT_MAX_ATTEMPTS;
546 struct vmci_subscription *existing_sub = NULL;
549 * We try to get an id a couple of time before claiming we are
552 sub->id = ++subscription_id;
554 /* Test for duplicate id. */
555 existing_sub = vmci_event_find(sub->id);
556 if (existing_sub == NULL) {
557 /* We succeeded if we didn't find a duplicate. */
560 vmci_event_release(existing_sub);
564 vmci_create_event(&sub->destroy_event);
565 vmci_list_insert(&subscriber_array[event], sub,
566 subscriber_list_item);
567 result = VMCI_SUCCESS;
569 result = VMCI_ERROR_NO_RESOURCES;
571 vmci_release_lock_bh(&subscriber_lock);
573 #undef VMCI_EVENT_MAX_ATTEMPTS
577 *------------------------------------------------------------------------------
579 * vmci_event_unregister_subscription --
581 * Remove subscription from subscriber list.
584 * struct vmci_subscription when found, NULL otherwise.
589 *------------------------------------------------------------------------------
592 static struct vmci_subscription *
593 vmci_event_unregister_subscription(vmci_id sub_id)
595 struct vmci_subscription *s;
597 if (!vmci_initialized_lock(&subscriber_lock))
600 vmci_grab_lock_bh(&subscriber_lock);
601 s = vmci_event_find(sub_id);
603 vmci_event_release(s);
604 vmci_list_remove(s, subscriber_list_item);
606 vmci_release_lock_bh(&subscriber_lock);
609 vmci_wait_on_event(&s->destroy_event, event_release_cb, s);
610 vmci_destroy_event(&s->destroy_event);
617 *------------------------------------------------------------------------------
619 * vmci_event_subscribe --
621 * Subscribe to given event. The callback specified can be fired in
622 * different contexts depending on what flag is specified while registering.
623 * If flags contains VMCI_FLAG_EVENT_NONE then the callback is fired with
624 * the subscriber lock held (and BH context on the guest). If flags contain
625 * VMCI_FLAG_EVENT_DELAYED_CB then the callback is fired with no locks held
626 * in thread context. This is useful because other vmci_event functions can
627 * be called, but it also increases the chances that an event will be
631 * VMCI_SUCCESS on success, error code otherwise.
636 *------------------------------------------------------------------------------
640 vmci_event_subscribe(vmci_event_type event, vmci_event_cb callback,
641 void *callback_data, vmci_id *subscription_id)
644 uint32_t flags = VMCI_FLAG_EVENT_NONE;
645 struct vmci_subscription *s = NULL;
647 if (subscription_id == NULL) {
648 VMCI_LOG_DEBUG(LGPFX"Invalid subscription (NULL).\n");
649 return (VMCI_ERROR_INVALID_ARGS);
652 s = vmci_alloc_kernel_mem(sizeof(*s), VMCI_MEMORY_NORMAL);
654 return (VMCI_ERROR_NO_MEM);
656 retval = vmci_event_register_subscription(s, event, flags,
657 callback, callback_data);
658 if (retval < VMCI_SUCCESS) {
659 vmci_free_kernel_mem(s, sizeof(*s));
663 *subscription_id = s->id;
668 *------------------------------------------------------------------------------
670 * vmci_event_unsubscribe --
672 * Unsubscribe to given event. Removes it from list and frees it.
673 * Will return callback_data if requested by caller.
676 * VMCI_SUCCESS on success, error code otherwise.
681 *------------------------------------------------------------------------------
685 vmci_event_unsubscribe(vmci_id sub_id)
687 struct vmci_subscription *s;
690 * Return subscription. At this point we know noone else is accessing
691 * the subscription so we can free it.
693 s = vmci_event_unregister_subscription(sub_id);
695 return (VMCI_ERROR_NOT_FOUND);
696 vmci_free_kernel_mem(s, sizeof(*s));
698 return (VMCI_SUCCESS);