2 * Copyright (c) 2018 VMware, Inc.
4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
7 /* This file implements VMCI Event code. */
10 __FBSDID("$FreeBSD$");
13 #include "vmci_driver.h"
14 #include "vmci_event.h"
15 #include "vmci_kernel_api.h"
16 #include "vmci_kernel_defs.h"
17 #include "vmci_kernel_if.h"
19 #define LGPFX "vmci_event: "
20 #define EVENT_MAGIC 0xEABE0000
22 struct vmci_subscription {
26 vmci_event destroy_event;
27 vmci_event_type event;
28 vmci_event_cb callback;
30 vmci_list_item(vmci_subscription) subscriber_list_item;
33 static struct vmci_subscription *vmci_event_find(vmci_id sub_id);
34 static int vmci_event_deliver(struct vmci_event_msg *event_msg);
35 static int vmci_event_register_subscription(struct vmci_subscription *sub,
36 vmci_event_type event, uint32_t flags,
37 vmci_event_cb callback, void *callback_data);
38 static struct vmci_subscription *vmci_event_unregister_subscription(
41 static vmci_list(vmci_subscription) subscriber_array[VMCI_EVENT_MAX];
42 static vmci_lock subscriber_lock;
44 struct vmci_delayed_event_info {
45 struct vmci_subscription *sub;
46 uint8_t event_payload[sizeof(struct vmci_event_data_max)];
49 struct vmci_event_ref {
50 struct vmci_subscription *sub;
51 vmci_list_item(vmci_event_ref) list_item;
55 *------------------------------------------------------------------------------
62 * VMCI_SUCCESS on success, appropriate error code otherwise.
67 *------------------------------------------------------------------------------
75 for (i = 0; i < VMCI_EVENT_MAX; i++)
76 vmci_list_init(&subscriber_array[i]);
78 return (vmci_init_lock(&subscriber_lock, "VMCI Event subscriber lock"));
82 *------------------------------------------------------------------------------
94 *------------------------------------------------------------------------------
100 struct vmci_subscription *iter, *iter_2;
103 /* We free all memory at exit. */
104 for (e = 0; e < VMCI_EVENT_MAX; e++) {
105 vmci_list_scan_safe(iter, &subscriber_array[e],
106 subscriber_list_item, iter_2) {
108 * We should never get here because all events should
109 * have been unregistered before we try to unload the
110 * driver module. Also, delayed callbacks could still
111 * be firing so this cleanup would not be safe. Still
112 * it is better to free the memory than not ... so we
113 * leave this code in just in case....
117 vmci_free_kernel_mem(iter, sizeof(*iter));
120 vmci_cleanup_lock(&subscriber_lock);
124 *------------------------------------------------------------------------------
128 * Use this as a synchronization point when setting globals, for example,
129 * during device shutdown.
137 *------------------------------------------------------------------------------
141 vmci_event_sync(void)
144 vmci_grab_lock_bh(&subscriber_lock);
145 vmci_release_lock_bh(&subscriber_lock);
149 *------------------------------------------------------------------------------
151 * vmci_event_check_host_capabilities --
153 * Verify that the host supports the hypercalls we need. If it does not,
154 * try to find fallback hypercalls and use those instead.
157 * true if required hypercalls (or fallback hypercalls) are
158 * supported by the host, false otherwise.
163 *------------------------------------------------------------------------------
167 vmci_event_check_host_capabilities(void)
170 /* vmci_event does not require any hypercalls. */
175 *------------------------------------------------------------------------------
179 * Gets a reference to the given struct vmci_subscription.
187 *------------------------------------------------------------------------------
191 vmci_event_get(struct vmci_subscription *entry)
200 *------------------------------------------------------------------------------
202 * vmci_event_release --
204 * Releases the given struct vmci_subscription.
210 * Fires the destroy event if the reference count has gone to zero.
212 *------------------------------------------------------------------------------
216 vmci_event_release(struct vmci_subscription *entry)
220 ASSERT(entry->ref_count > 0);
223 if (entry->ref_count == 0)
224 vmci_signal_event(&entry->destroy_event);
228 *------------------------------------------------------------------------------
230 * event_release_cb --
232 * Callback to release the event entry reference. It is called by the
233 * vmci_wait_on_event function before it blocks.
241 *------------------------------------------------------------------------------
245 event_release_cb(void *client_data)
247 struct vmci_subscription *sub = (struct vmci_subscription *)client_data;
251 vmci_grab_lock_bh(&subscriber_lock);
252 vmci_event_release(sub);
253 vmci_release_lock_bh(&subscriber_lock);
259 *------------------------------------------------------------------------------
263 * Find entry. Assumes lock is held.
266 * Entry if found, NULL if not.
269 * Increments the struct vmci_subscription refcount if an entry is found.
271 *------------------------------------------------------------------------------
274 static struct vmci_subscription *
275 vmci_event_find(vmci_id sub_id)
277 struct vmci_subscription *iter;
280 for (e = 0; e < VMCI_EVENT_MAX; e++) {
281 vmci_list_scan(iter, &subscriber_array[e],
282 subscriber_list_item) {
283 if (iter->id == sub_id) {
284 vmci_event_get(iter);
293 *------------------------------------------------------------------------------
295 * vmci_event_delayed_dispatch_cb --
297 * Calls the specified callback in a delayed context.
305 *------------------------------------------------------------------------------
309 vmci_event_delayed_dispatch_cb(void *data)
311 struct vmci_delayed_event_info *event_info;
312 struct vmci_subscription *sub;
313 struct vmci_event_data *ed;
315 event_info = (struct vmci_delayed_event_info *)data;
318 ASSERT(event_info->sub);
320 sub = event_info->sub;
321 ed = (struct vmci_event_data *)event_info->event_payload;
323 sub->callback(sub->id, ed, sub->callback_data);
325 vmci_grab_lock_bh(&subscriber_lock);
326 vmci_event_release(sub);
327 vmci_release_lock_bh(&subscriber_lock);
329 vmci_free_kernel_mem(event_info, sizeof(*event_info));
333 *------------------------------------------------------------------------------
335 * vmci_event_deliver --
337 * Actually delivers the events to the subscribers.
343 * The callback function for each subscriber is invoked.
345 *------------------------------------------------------------------------------
349 vmci_event_deliver(struct vmci_event_msg *event_msg)
351 struct vmci_subscription *iter;
352 int err = VMCI_SUCCESS;
354 vmci_list(vmci_event_ref) no_delay_list;
355 vmci_list_init(&no_delay_list);
359 vmci_grab_lock_bh(&subscriber_lock);
360 vmci_list_scan(iter, &subscriber_array[event_msg->event_data.event],
361 subscriber_list_item) {
362 if (iter->run_delayed) {
363 struct vmci_delayed_event_info *event_info;
365 vmci_alloc_kernel_mem(sizeof(*event_info),
366 VMCI_MEMORY_ATOMIC)) == NULL) {
367 err = VMCI_ERROR_NO_MEM;
371 vmci_event_get(iter);
373 memset(event_info, 0, sizeof(*event_info));
374 memcpy(event_info->event_payload,
375 VMCI_DG_PAYLOAD(event_msg),
376 (size_t)event_msg->hdr.payload_size);
377 event_info->sub = iter;
379 vmci_schedule_delayed_work(
380 vmci_event_delayed_dispatch_cb, event_info);
381 if (err != VMCI_SUCCESS) {
382 vmci_event_release(iter);
383 vmci_free_kernel_mem(
384 event_info, sizeof(*event_info));
389 struct vmci_event_ref *event_ref;
392 * We construct a local list of subscribers and release
393 * subscriber_lock before invoking the callbacks. This
394 * is similar to delayed callbacks, but callbacks are
395 * invoked right away here.
397 if ((event_ref = vmci_alloc_kernel_mem(
398 sizeof(*event_ref), VMCI_MEMORY_ATOMIC)) == NULL) {
399 err = VMCI_ERROR_NO_MEM;
403 vmci_event_get(iter);
404 event_ref->sub = iter;
405 vmci_list_insert(&no_delay_list, event_ref, list_item);
410 vmci_release_lock_bh(&subscriber_lock);
412 if (!vmci_list_empty(&no_delay_list)) {
413 struct vmci_event_data *ed;
414 struct vmci_event_ref *iter;
415 struct vmci_event_ref *iter_2;
417 vmci_list_scan_safe(iter, &no_delay_list, list_item, iter_2) {
418 struct vmci_subscription *cur;
419 uint8_t event_payload[sizeof(
420 struct vmci_event_data_max)];
425 * We set event data before each callback to ensure
428 memset(event_payload, 0, sizeof(event_payload));
429 memcpy(event_payload, VMCI_DG_PAYLOAD(event_msg),
430 (size_t)event_msg->hdr.payload_size);
431 ed = (struct vmci_event_data *)event_payload;
432 cur->callback(cur->id, ed, cur->callback_data);
434 vmci_grab_lock_bh(&subscriber_lock);
435 vmci_event_release(cur);
436 vmci_release_lock_bh(&subscriber_lock);
437 vmci_free_kernel_mem(iter, sizeof(*iter));
445 *------------------------------------------------------------------------------
447 * vmci_event_dispatch --
449 * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
450 * subscribers for given event.
453 * VMCI_SUCCESS on success, error code otherwise.
458 *------------------------------------------------------------------------------
462 vmci_event_dispatch(struct vmci_datagram *msg)
464 struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
467 msg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
468 msg->dst.resource == VMCI_EVENT_HANDLER);
470 if (msg->payload_size < sizeof(vmci_event_type) ||
471 msg->payload_size > sizeof(struct vmci_event_data_max))
472 return (VMCI_ERROR_INVALID_ARGS);
474 if (!VMCI_EVENT_VALID(event_msg->event_data.event))
475 return (VMCI_ERROR_EVENT_UNKNOWN);
477 vmci_event_deliver(event_msg);
479 return (VMCI_SUCCESS);
483 *------------------------------------------------------------------------------
485 * vmci_event_register_subscription --
487 * Initialize and add subscription to subscriber list.
490 * VMCI_SUCCESS on success, error code otherwise.
495 *------------------------------------------------------------------------------
499 vmci_event_register_subscription(struct vmci_subscription *sub,
500 vmci_event_type event, uint32_t flags, vmci_event_cb callback,
503 #define VMCI_EVENT_MAX_ATTEMPTS 10
504 static vmci_id subscription_id = 0;
506 uint32_t attempts = 0;
511 if (!VMCI_EVENT_VALID(event) || callback == NULL) {
512 VMCI_LOG_DEBUG(LGPFX"Failed to subscribe to event"
513 " (type=%d) (callback=%p) (data=%p).\n",
514 event, callback, callback_data);
515 return (VMCI_ERROR_INVALID_ARGS);
518 if (!vmci_can_schedule_delayed_work()) {
520 * If the platform doesn't support delayed work callbacks then
521 * don't allow registration for them.
523 if (flags & VMCI_FLAG_EVENT_DELAYED_CB)
524 return (VMCI_ERROR_INVALID_ARGS);
525 sub->run_delayed = false;
528 * The platform supports delayed work callbacks. Honor the
531 sub->run_delayed = (flags & VMCI_FLAG_EVENT_DELAYED_CB) ?
537 sub->callback = callback;
538 sub->callback_data = callback_data;
540 vmci_grab_lock_bh(&subscriber_lock);
542 for (success = false, attempts = 0;
543 success == false && attempts < VMCI_EVENT_MAX_ATTEMPTS;
545 struct vmci_subscription *existing_sub = NULL;
548 * We try to get an id a couple of time before claiming we are
551 sub->id = ++subscription_id;
553 /* Test for duplicate id. */
554 existing_sub = vmci_event_find(sub->id);
555 if (existing_sub == NULL) {
556 /* We succeeded if we didn't find a duplicate. */
559 vmci_event_release(existing_sub);
563 vmci_create_event(&sub->destroy_event);
564 vmci_list_insert(&subscriber_array[event], sub,
565 subscriber_list_item);
566 result = VMCI_SUCCESS;
568 result = VMCI_ERROR_NO_RESOURCES;
570 vmci_release_lock_bh(&subscriber_lock);
572 #undef VMCI_EVENT_MAX_ATTEMPTS
576 *------------------------------------------------------------------------------
578 * vmci_event_unregister_subscription --
580 * Remove subscription from subscriber list.
583 * struct vmci_subscription when found, NULL otherwise.
588 *------------------------------------------------------------------------------
591 static struct vmci_subscription *
592 vmci_event_unregister_subscription(vmci_id sub_id)
594 struct vmci_subscription *s;
596 vmci_grab_lock_bh(&subscriber_lock);
597 s = vmci_event_find(sub_id);
599 vmci_event_release(s);
600 vmci_list_remove(s, subscriber_list_item);
602 vmci_release_lock_bh(&subscriber_lock);
605 vmci_wait_on_event(&s->destroy_event, event_release_cb, s);
606 vmci_destroy_event(&s->destroy_event);
613 *------------------------------------------------------------------------------
615 * vmci_event_subscribe --
617 * Subscribe to given event. The callback specified can be fired in
618 * different contexts depending on what flag is specified while registering.
619 * If flags contains VMCI_FLAG_EVENT_NONE then the callback is fired with
620 * the subscriber lock held (and BH context on the guest). If flags contain
621 * VMCI_FLAG_EVENT_DELAYED_CB then the callback is fired with no locks held
622 * in thread context. This is useful because other vmci_event functions can
623 * be called, but it also increases the chances that an event will be
627 * VMCI_SUCCESS on success, error code otherwise.
632 *------------------------------------------------------------------------------
636 vmci_event_subscribe(vmci_event_type event, vmci_event_cb callback,
637 void *callback_data, vmci_id *subscription_id)
640 uint32_t flags = VMCI_FLAG_EVENT_NONE;
641 struct vmci_subscription *s = NULL;
643 if (subscription_id == NULL) {
644 VMCI_LOG_DEBUG(LGPFX"Invalid subscription (NULL).\n");
645 return (VMCI_ERROR_INVALID_ARGS);
648 s = vmci_alloc_kernel_mem(sizeof(*s), VMCI_MEMORY_NORMAL);
650 return (VMCI_ERROR_NO_MEM);
652 retval = vmci_event_register_subscription(s, event, flags,
653 callback, callback_data);
654 if (retval < VMCI_SUCCESS) {
655 vmci_free_kernel_mem(s, sizeof(*s));
659 *subscription_id = s->id;
664 *------------------------------------------------------------------------------
666 * vmci_event_unsubscribe --
668 * Unsubscribe to given event. Removes it from list and frees it.
669 * Will return callback_data if requested by caller.
672 * VMCI_SUCCESS on success, error code otherwise.
677 *------------------------------------------------------------------------------
681 vmci_event_unsubscribe(vmci_id sub_id)
683 struct vmci_subscription *s;
686 * Return subscription. At this point we know noone else is accessing
687 * the subscription so we can free it.
689 s = vmci_event_unregister_subscription(sub_id);
691 return (VMCI_ERROR_NOT_FOUND);
692 vmci_free_kernel_mem(s, sizeof(*s));
694 return (VMCI_SUCCESS);