2 * Copyright (c) 2018 VMware, Inc.
4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
7 /* Driver for VMware Virtual Machine Communication Interface (VMCI) device. */
10 __FBSDID("$FreeBSD$");
12 #include <sys/types.h>
14 #include <sys/kernel.h>
15 #include <sys/malloc.h>
16 #include <sys/module.h>
18 #include <sys/systm.h>
20 #include <dev/pci/pcireg.h>
21 #include <dev/pci/pcivar.h>
23 #include <machine/bus.h>
26 #include "vmci_doorbell.h"
27 #include "vmci_driver.h"
28 #include "vmci_kernel_defs.h"
29 #include "vmci_queue_pair.h"
31 static int vmci_probe(device_t);
32 static int vmci_attach(device_t);
33 static int vmci_detach(device_t);
34 static int vmci_shutdown(device_t);
36 static int vmci_map_bars(struct vmci_softc *);
37 static void vmci_unmap_bars(struct vmci_softc *);
39 static int vmci_config_capabilities(struct vmci_softc *);
41 static int vmci_dma_malloc_int(struct vmci_softc *, bus_size_t,
42 bus_size_t, struct vmci_dma_alloc *);
43 static void vmci_dma_free_int(struct vmci_softc *,
44 struct vmci_dma_alloc *);
46 static int vmci_config_interrupts(struct vmci_softc *);
47 static int vmci_config_interrupt(struct vmci_softc *);
48 static int vmci_check_intr_cnt(struct vmci_softc *);
49 static int vmci_allocate_interrupt_resources(struct vmci_softc *);
50 static int vmci_setup_interrupts(struct vmci_softc *);
51 static void vmci_dismantle_interrupts(struct vmci_softc *);
52 static void vmci_interrupt(void *);
53 static void vmci_interrupt_bm(void *);
54 static void dispatch_datagrams(void *, int);
55 static void process_bitmap(void *, int);
57 static void vmci_delayed_work_fn_cb(void *context, int data);
59 static device_method_t vmci_methods[] = {
60 /* Device interface. */
61 DEVMETHOD(device_probe, vmci_probe),
62 DEVMETHOD(device_attach, vmci_attach),
63 DEVMETHOD(device_detach, vmci_detach),
64 DEVMETHOD(device_shutdown, vmci_shutdown),
69 static driver_t vmci_driver = {
70 "vmci", vmci_methods, sizeof(struct vmci_softc)
73 static devclass_t vmci_devclass;
74 DRIVER_MODULE(vmci, pci, vmci_driver, vmci_devclass, 0, 0);
75 MODULE_VERSION(vmci, VMCI_VERSION);
77 MODULE_DEPEND(vmci, pci, 1, 1, 1);
79 static struct vmci_softc *vmci_sc;
81 #define LGPFX "vmci: "
83 * Allocate a buffer for incoming datagrams globally to avoid repeated
84 * allocation in the interrupt handler's atomic context.
86 static uint8_t *data_buffer = NULL;
87 static uint32_t data_buffer_size = VMCI_MAX_DG_SIZE;
89 struct vmci_delayed_work_info {
90 vmci_work_fn *work_fn;
92 vmci_list_item(vmci_delayed_work_info) entry;
96 *------------------------------------------------------------------------------
100 * Probe to see if the VMCI device is present.
103 * BUS_PROBE_DEFAULT if device exists, ENXIO otherwise.
108 *------------------------------------------------------------------------------
112 vmci_probe(device_t dev)
115 if (pci_get_vendor(dev) == VMCI_VMWARE_VENDOR_ID &&
116 pci_get_device(dev) == VMCI_VMWARE_DEVICE_ID) {
118 "VMware Virtual Machine Communication Interface");
120 return (BUS_PROBE_DEFAULT);
127 *------------------------------------------------------------------------------
131 * Attach VMCI device to the system after vmci_probe() has been called and
132 * the device has been detected.
135 * 0 if success, ENXIO otherwise.
140 *------------------------------------------------------------------------------
144 vmci_attach(device_t dev)
146 struct vmci_softc *sc;
149 sc = device_get_softc(dev);
154 sc->vmci_num_intr = 0;
155 for (i = 0; i < VMCI_MAX_INTRS; i++) {
156 sc->vmci_intrs[i].vmci_irq = NULL;
157 sc->vmci_intrs[i].vmci_handler = NULL;
160 TASK_INIT(&sc->vmci_interrupt_dq_task, 0, dispatch_datagrams, sc);
161 TASK_INIT(&sc->vmci_interrupt_bm_task, 0, process_bitmap, sc);
163 TASK_INIT(&sc->vmci_delayed_work_task, 0, vmci_delayed_work_fn_cb, sc);
165 pci_enable_busmaster(dev);
167 mtx_init(&sc->vmci_spinlock, "VMCI Spinlock", NULL, MTX_SPIN);
168 mtx_init(&sc->vmci_delayed_work_lock, "VMCI Delayed Work Lock",
171 error = vmci_map_bars(sc);
173 VMCI_LOG_ERROR(LGPFX"Failed to map PCI BARs.\n");
177 error = vmci_config_capabilities(sc);
179 VMCI_LOG_ERROR(LGPFX"Failed to configure capabilities.\n");
183 vmci_list_init(&sc->vmci_delayed_work_infos);
185 vmci_components_init();
187 error = vmci_qp_guest_endpoints_init();
189 VMCI_LOG_ERROR(LGPFX"vmci_qp_guest_endpoints_init failed.\n");
193 error = vmci_config_interrupts(sc);
195 VMCI_LOG_ERROR(LGPFX"Failed to enable interrupts.\n");
207 *------------------------------------------------------------------------------
211 * Detach the VMCI device.
219 *------------------------------------------------------------------------------
223 vmci_detach(device_t dev)
225 struct vmci_softc *sc;
227 sc = device_get_softc(dev);
229 vmci_qp_guest_endpoints_exit();
232 vmci_dismantle_interrupts(sc);
234 vmci_components_cleanup();
236 taskqueue_drain(taskqueue_thread, &sc->vmci_delayed_work_task);
237 mtx_destroy(&sc->vmci_delayed_work_lock);
239 if (sc->vmci_res0 != NULL)
240 bus_space_write_4(sc->vmci_iot0, sc->vmci_ioh0,
241 VMCI_CONTROL_ADDR, VMCI_CONTROL_RESET);
243 if (sc->vmci_notifications_bitmap.dma_vaddr != NULL)
244 vmci_dma_free(&sc->vmci_notifications_bitmap);
248 mtx_destroy(&sc->vmci_spinlock);
250 pci_disable_busmaster(dev);
256 *------------------------------------------------------------------------------
260 * This function is called during system shutdown. We don't do anything.
268 *------------------------------------------------------------------------------
272 vmci_shutdown(device_t dev)
279 *------------------------------------------------------------------------------
283 * Maps the PCI I/O and MMIO BARs.
286 * 0 on success, ENXIO otherwise.
291 *------------------------------------------------------------------------------
295 vmci_map_bars(struct vmci_softc *sc)
299 /* Map the PCI I/O BAR: BAR0 */
301 sc->vmci_res0 = bus_alloc_resource_any(sc->vmci_dev, SYS_RES_IOPORT,
303 if (sc->vmci_res0 == NULL) {
304 VMCI_LOG_ERROR(LGPFX"Could not map: BAR0\n");
308 sc->vmci_iot0 = rman_get_bustag(sc->vmci_res0);
309 sc->vmci_ioh0 = rman_get_bushandle(sc->vmci_res0);
310 sc->vmci_ioaddr = rman_get_start(sc->vmci_res0);
312 /* Map the PCI MMIO BAR: BAR1 */
314 sc->vmci_res1 = bus_alloc_resource_any(sc->vmci_dev, SYS_RES_MEMORY,
316 if (sc->vmci_res1 == NULL) {
317 VMCI_LOG_ERROR(LGPFX"Could not map: BAR1\n");
321 sc->vmci_iot1 = rman_get_bustag(sc->vmci_res1);
322 sc->vmci_ioh1 = rman_get_bushandle(sc->vmci_res1);
328 *------------------------------------------------------------------------------
332 * Unmaps the VMCI PCI I/O and MMIO BARs.
340 *------------------------------------------------------------------------------
344 vmci_unmap_bars(struct vmci_softc *sc)
348 if (sc->vmci_res0 != NULL) {
350 bus_release_resource(sc->vmci_dev, SYS_RES_IOPORT, rid,
352 sc->vmci_res0 = NULL;
355 if (sc->vmci_res1 != NULL) {
357 bus_release_resource(sc->vmci_dev, SYS_RES_MEMORY, rid,
359 sc->vmci_res1 = NULL;
364 *------------------------------------------------------------------------------
366 * vmci_config_capabilities --
368 * Check the VMCI device capabilities and configure the device accordingly.
371 * 0 if success, ENODEV otherwise.
374 * Device capabilities are enabled.
376 *------------------------------------------------------------------------------
380 vmci_config_capabilities(struct vmci_softc *sc)
382 unsigned long bitmap_PPN;
386 * Verify that the VMCI device supports the capabilities that we
387 * need. Datagrams are necessary and notifications will be used
388 * if the device supports it.
390 sc->capabilities = bus_space_read_4(sc->vmci_iot0, sc->vmci_ioh0,
393 if ((sc->capabilities & VMCI_CAPS_DATAGRAM) == 0) {
394 VMCI_LOG_ERROR(LGPFX"VMCI device does not support "
399 if (sc->capabilities & VMCI_CAPS_NOTIFICATIONS) {
400 sc->capabilities = VMCI_CAPS_DATAGRAM;
401 error = vmci_dma_malloc(PAGE_SIZE, 1,
402 &sc->vmci_notifications_bitmap);
404 VMCI_LOG_ERROR(LGPFX"Failed to alloc memory for "
405 "notification bitmap.\n");
407 memset(sc->vmci_notifications_bitmap.dma_vaddr, 0,
409 sc->capabilities |= VMCI_CAPS_NOTIFICATIONS;
412 sc->capabilities = VMCI_CAPS_DATAGRAM;
414 /* Let the host know which capabilities we intend to use. */
415 bus_space_write_4(sc->vmci_iot0, sc->vmci_ioh0,
416 VMCI_CAPS_ADDR, sc->capabilities);
419 * Register notification bitmap with device if that capability is
422 if (sc->capabilities & VMCI_CAPS_NOTIFICATIONS) {
424 sc->vmci_notifications_bitmap.dma_paddr >> PAGE_SHIFT;
425 vmci_register_notification_bitmap(bitmap_PPN);
428 /* Check host capabilities. */
429 if (!vmci_check_host_capabilities())
436 *------------------------------------------------------------------------------
440 * Callback to receive mapping information resulting from the load of a
441 * bus_dmamap_t via bus_dmamap_load()
449 *------------------------------------------------------------------------------
453 vmci_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
455 bus_addr_t *baddr = arg;
458 *baddr = segs->ds_addr;
462 *------------------------------------------------------------------------------
464 * vmci_dma_malloc_int --
466 * Internal function that allocates DMA memory.
470 * ENOMEM if insufficient memory.
471 * EINPROGRESS if mapping is deferred.
472 * EINVAL if the request was invalid.
475 * DMA memory is allocated.
477 *------------------------------------------------------------------------------
481 vmci_dma_malloc_int(struct vmci_softc *sc, bus_size_t size, bus_size_t align,
482 struct vmci_dma_alloc *dma)
486 bzero(dma, sizeof(struct vmci_dma_alloc));
488 error = bus_dma_tag_create(bus_get_dma_tag(vmci_sc->vmci_dev),
489 align, 0, /* alignment, bounds */
490 BUS_SPACE_MAXADDR, /* lowaddr */
491 BUS_SPACE_MAXADDR, /* highaddr */
492 NULL, NULL, /* filter, filterarg */
495 size, /* maxsegsize */
496 BUS_DMA_ALLOCNOW, /* flags */
498 NULL, /* lockfuncarg */
501 VMCI_LOG_ERROR(LGPFX"bus_dma_tag_create failed: %d\n", error);
505 error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
506 BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
508 VMCI_LOG_ERROR(LGPFX"bus_dmamem_alloc failed: %d\n", error);
512 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
513 size, vmci_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
515 VMCI_LOG_ERROR(LGPFX"bus_dmamap_load failed: %d\n", error);
519 dma->dma_size = size;
529 *------------------------------------------------------------------------------
533 * This function is a wrapper around vmci_dma_malloc_int for callers
534 * outside of this module. Since we only support a single VMCI device, this
535 * wrapper provides access to the device softc structure.
539 * ENOMEM if insufficient memory.
540 * EINPROGRESS if mapping is deferred.
541 * EINVAL if the request was invalid.
544 * DMA memory is allocated.
546 *------------------------------------------------------------------------------
550 vmci_dma_malloc(bus_size_t size, bus_size_t align, struct vmci_dma_alloc *dma)
553 return (vmci_dma_malloc_int(vmci_sc, size, align, dma));
557 *------------------------------------------------------------------------------
559 * vmci_dma_free_int --
561 * Internal function that frees DMA memory.
569 *------------------------------------------------------------------------------
573 vmci_dma_free_int(struct vmci_softc *sc, struct vmci_dma_alloc *dma)
576 if (dma->dma_tag != NULL) {
577 if (dma->dma_paddr != 0) {
578 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
579 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
580 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
583 if (dma->dma_vaddr != NULL)
584 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
587 bus_dma_tag_destroy(dma->dma_tag);
589 bzero(dma, sizeof(struct vmci_dma_alloc));
593 *------------------------------------------------------------------------------
597 * This function is a wrapper around vmci_dma_free_int for callers outside
598 * of this module. Since we only support a single VMCI device, this wrapper
599 * provides access to the device softc structure.
607 *------------------------------------------------------------------------------
611 vmci_dma_free(struct vmci_dma_alloc *dma)
614 vmci_dma_free_int(vmci_sc, dma);
618 *------------------------------------------------------------------------------
620 * vmci_config_interrupts --
622 * Configures and enables interrupts. Try to configure MSI-X. If this fails,
623 * try to configure MSI. If even this fails, try legacy interrupts.
627 * ENOMEM if insufficient memory.
628 * ENODEV if the device doesn't support interrupts.
629 * ENXIO if the device configuration failed.
632 * Interrupts get enabled if successful.
634 *------------------------------------------------------------------------------
638 vmci_config_interrupts(struct vmci_softc *sc)
642 data_buffer = malloc(data_buffer_size, M_DEVBUF, M_ZERO | M_NOWAIT);
643 if (data_buffer == NULL)
646 sc->vmci_intr_type = VMCI_INTR_TYPE_MSIX;
647 error = vmci_config_interrupt(sc);
649 sc->vmci_intr_type = VMCI_INTR_TYPE_MSI;
650 error = vmci_config_interrupt(sc);
653 sc->vmci_intr_type = VMCI_INTR_TYPE_INTX;
654 error = vmci_config_interrupt(sc);
659 /* Enable specific interrupt bits. */
660 if (sc->capabilities & VMCI_CAPS_NOTIFICATIONS)
661 bus_space_write_4(sc->vmci_iot0, sc->vmci_ioh0,
662 VMCI_IMR_ADDR, VMCI_IMR_DATAGRAM | VMCI_IMR_NOTIFICATION);
664 bus_space_write_4(sc->vmci_iot0, sc->vmci_ioh0,
665 VMCI_IMR_ADDR, VMCI_IMR_DATAGRAM);
667 /* Enable interrupts. */
668 bus_space_write_4(sc->vmci_iot0, sc->vmci_ioh0,
669 VMCI_CONTROL_ADDR, VMCI_CONTROL_INT_ENABLE);
675 *------------------------------------------------------------------------------
677 * vmci_config_interrupt --
679 * Check the number of interrupts supported, allocate resources and setup
684 * ENOMEM if insufficient memory.
685 * ENODEV if the device doesn't support interrupts.
686 * ENXIO if the device configuration failed.
689 * Resources get allocated and interrupts get setup (but not enabled) if
692 *------------------------------------------------------------------------------
696 vmci_config_interrupt(struct vmci_softc *sc)
700 error = vmci_check_intr_cnt(sc);
704 error = vmci_allocate_interrupt_resources(sc);
708 error = vmci_setup_interrupts(sc);
716 *------------------------------------------------------------------------------
718 * vmci_check_intr_cnt --
720 * Check the number of interrupts supported by the device and ask PCI bus
721 * to allocate appropriate number of interrupts.
725 * ENODEV if the device doesn't support any interrupts.
726 * ENXIO if the device configuration failed.
729 * Resources get allocated on success.
731 *------------------------------------------------------------------------------
735 vmci_check_intr_cnt(struct vmci_softc *sc)
738 if (sc->vmci_intr_type == VMCI_INTR_TYPE_INTX) {
739 sc->vmci_num_intr = 1;
744 * Make sure that the device supports the required number of MSI/MSI-X
745 * messages. We try for 2 MSI-X messages but 1 is good too. We need at
746 * least 1 MSI message.
748 sc->vmci_num_intr = (sc->vmci_intr_type == VMCI_INTR_TYPE_MSIX) ?
749 pci_msix_count(sc->vmci_dev) : pci_msi_count(sc->vmci_dev);
751 if (!sc->vmci_num_intr) {
752 VMCI_LOG_ERROR(LGPFX"Device does not support any interrupt"
757 sc->vmci_num_intr = (sc->vmci_intr_type == VMCI_INTR_TYPE_MSIX) ?
759 if (sc->vmci_intr_type == VMCI_INTR_TYPE_MSIX) {
760 if (pci_alloc_msix(sc->vmci_dev, &sc->vmci_num_intr))
762 } else if (sc->vmci_intr_type == VMCI_INTR_TYPE_MSI) {
763 if (pci_alloc_msi(sc->vmci_dev, &sc->vmci_num_intr))
771 *------------------------------------------------------------------------------
773 * vmci_allocate_interrupt_resources --
775 * Allocate resources necessary for interrupts.
778 * 0 if success, ENXIO otherwise.
781 * Resources get allocated on success.
783 *------------------------------------------------------------------------------
787 vmci_allocate_interrupt_resources(struct vmci_softc *sc)
789 struct resource *irq;
793 flags |= (sc->vmci_num_intr == 1) ? RF_SHAREABLE : 0;
794 rid = (sc->vmci_intr_type == VMCI_INTR_TYPE_INTX) ? 0 : 1;
796 for (i = 0; i < sc->vmci_num_intr; i++, rid++) {
797 irq = bus_alloc_resource_any(sc->vmci_dev, SYS_RES_IRQ, &rid,
801 sc->vmci_intrs[i].vmci_irq = irq;
802 sc->vmci_intrs[i].vmci_rid = rid;
809 *------------------------------------------------------------------------------
811 * vmci_setup_interrupts --
813 * Sets up the interrupts.
816 * 0 if success, appropriate error code from bus_setup_intr otherwise.
819 * Interrupt handler gets attached.
821 *------------------------------------------------------------------------------
825 vmci_setup_interrupts(struct vmci_softc *sc)
827 struct vmci_interrupt *intr;
830 flags = INTR_TYPE_NET | INTR_MPSAFE;
831 if (sc->vmci_num_intr > 1)
834 intr = &sc->vmci_intrs[0];
835 error = bus_setup_intr(sc->vmci_dev, intr->vmci_irq, flags, NULL,
836 vmci_interrupt, NULL, &intr->vmci_handler);
839 bus_describe_intr(sc->vmci_dev, intr->vmci_irq, intr->vmci_handler,
842 if (sc->vmci_num_intr == 2) {
843 intr = &sc->vmci_intrs[1];
844 error = bus_setup_intr(sc->vmci_dev, intr->vmci_irq, flags,
845 NULL, vmci_interrupt_bm, NULL, &intr->vmci_handler);
848 bus_describe_intr(sc->vmci_dev, intr->vmci_irq,
849 intr->vmci_handler, "vmci_interrupt_bm");
856 *------------------------------------------------------------------------------
860 * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
861 * interrupt (vector VMCI_INTR_DATAGRAM).
869 *------------------------------------------------------------------------------
873 vmci_interrupt(void *arg)
876 if (vmci_sc->vmci_num_intr == 2)
877 taskqueue_enqueue(taskqueue_swi,
878 &vmci_sc->vmci_interrupt_dq_task);
882 icr = inl(vmci_sc->vmci_ioaddr + VMCI_ICR_ADDR);
883 if (icr == 0 || icr == 0xffffffff)
885 if (icr & VMCI_ICR_DATAGRAM) {
886 taskqueue_enqueue(taskqueue_swi,
887 &vmci_sc->vmci_interrupt_dq_task);
888 icr &= ~VMCI_ICR_DATAGRAM;
890 if (icr & VMCI_ICR_NOTIFICATION) {
891 taskqueue_enqueue(taskqueue_swi,
892 &vmci_sc->vmci_interrupt_bm_task);
893 icr &= ~VMCI_ICR_NOTIFICATION;
896 VMCI_LOG_INFO(LGPFX"Ignoring unknown interrupt "
902 *------------------------------------------------------------------------------
904 * vmci_interrupt_bm --
906 * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
907 * which is for the notification bitmap. Will only get called if we are
908 * using MSI-X with exclusive vectors.
916 *------------------------------------------------------------------------------
920 vmci_interrupt_bm(void *arg)
923 ASSERT(vmci_sc->vmci_num_intr == 2);
924 taskqueue_enqueue(taskqueue_swi, &vmci_sc->vmci_interrupt_bm_task);
928 *------------------------------------------------------------------------------
930 * dispatch_datagrams --
932 * Reads and dispatches incoming datagrams.
938 * Reads data from the device.
940 *------------------------------------------------------------------------------
944 dispatch_datagrams(void *context, int data)
947 if (data_buffer == NULL)
948 VMCI_LOG_INFO(LGPFX"dispatch_datagrams(): no buffer "
951 vmci_read_datagrams_from_port((vmci_io_handle) 0,
952 vmci_sc->vmci_ioaddr + VMCI_DATA_IN_ADDR,
953 data_buffer, data_buffer_size);
957 *------------------------------------------------------------------------------
961 * Scans the notification bitmap for raised flags, clears them and handles
970 *------------------------------------------------------------------------------
974 process_bitmap(void *context, int data)
977 if (vmci_sc->vmci_notifications_bitmap.dma_vaddr == NULL)
978 VMCI_LOG_INFO(LGPFX"process_bitmaps(): no bitmap present");
980 vmci_scan_notification_bitmap(
981 vmci_sc->vmci_notifications_bitmap.dma_vaddr);
985 *------------------------------------------------------------------------------
987 * vmci_dismantle_interrupts --
989 * Releases resources, detaches the interrupt handler and drains the task
996 * No more interrupts.
998 *------------------------------------------------------------------------------
1002 vmci_dismantle_interrupts(struct vmci_softc *sc)
1004 struct vmci_interrupt *intr;
1007 for (i = 0; i < sc->vmci_num_intr; i++) {
1008 intr = &sc->vmci_intrs[i];
1009 if (intr->vmci_handler != NULL) {
1010 bus_teardown_intr(sc->vmci_dev, intr->vmci_irq,
1011 intr->vmci_handler);
1012 intr->vmci_handler = NULL;
1014 if (intr->vmci_irq != NULL) {
1015 bus_release_resource(sc->vmci_dev, SYS_RES_IRQ,
1016 intr->vmci_rid, intr->vmci_irq);
1017 intr->vmci_irq = NULL;
1018 intr->vmci_rid = -1;
1022 if ((sc->vmci_intr_type != VMCI_INTR_TYPE_INTX) &&
1023 (sc->vmci_num_intr))
1024 pci_release_msi(sc->vmci_dev);
1026 taskqueue_drain(taskqueue_swi, &sc->vmci_interrupt_dq_task);
1027 taskqueue_drain(taskqueue_swi, &sc->vmci_interrupt_bm_task);
1029 if (data_buffer != NULL)
1030 free(data_buffer, M_DEVBUF);
1034 *------------------------------------------------------------------------------
1036 * vmci_delayed_work_fn_cb --
1038 * Callback function that executes the queued up delayed work functions.
1046 *------------------------------------------------------------------------------
1050 vmci_delayed_work_fn_cb(void *context, int data)
1052 vmci_list(vmci_delayed_work_info) temp_list;
1054 vmci_list_init(&temp_list);
1057 * Swap vmci_delayed_work_infos list with the empty temp_list while
1058 * holding a lock. vmci_delayed_work_infos would then be an empty list
1059 * and temp_list would contain the elements from the original
1060 * vmci_delayed_work_infos. Finally, iterate through temp_list
1061 * executing the delayed callbacks.
1064 mtx_lock(&vmci_sc->vmci_delayed_work_lock);
1065 vmci_list_swap(&temp_list, &vmci_sc->vmci_delayed_work_infos,
1066 vmci_delayed_work_info, entry);
1067 mtx_unlock(&vmci_sc->vmci_delayed_work_lock);
1069 while (!vmci_list_empty(&temp_list)) {
1070 struct vmci_delayed_work_info *delayed_work_info =
1071 vmci_list_first(&temp_list);
1073 delayed_work_info->work_fn(delayed_work_info->data);
1075 vmci_list_remove(delayed_work_info, entry);
1076 vmci_free_kernel_mem(delayed_work_info,
1077 sizeof(*delayed_work_info));
1082 *------------------------------------------------------------------------------
1084 * vmci_schedule_delayed_work_fn --
1086 * Schedule the specified callback.
1089 * 0 if success, error code otherwise.
1094 *------------------------------------------------------------------------------
1098 vmci_schedule_delayed_work_fn(vmci_work_fn *work_fn, void *data)
1100 struct vmci_delayed_work_info *delayed_work_info;
1102 delayed_work_info = vmci_alloc_kernel_mem(sizeof(*delayed_work_info),
1103 VMCI_MEMORY_ATOMIC);
1105 if (!delayed_work_info)
1106 return (VMCI_ERROR_NO_MEM);
1108 delayed_work_info->work_fn = work_fn;
1109 delayed_work_info->data = data;
1110 mtx_lock(&vmci_sc->vmci_delayed_work_lock);
1111 vmci_list_insert(&vmci_sc->vmci_delayed_work_infos,
1112 delayed_work_info, entry);
1113 mtx_unlock(&vmci_sc->vmci_delayed_work_lock);
1115 taskqueue_enqueue(taskqueue_thread,
1116 &vmci_sc->vmci_delayed_work_task);
1118 return (VMCI_SUCCESS);
1122 *------------------------------------------------------------------------------
1124 * vmci_send_datagram --
1126 * VM to hypervisor call mechanism.
1129 * The result of the hypercall.
1134 *------------------------------------------------------------------------------
1138 vmci_send_datagram(struct vmci_datagram *dg)
1143 return (VMCI_ERROR_INVALID_ARGS);
1146 * Need to acquire spinlock on the device because
1147 * the datagram data may be spread over multiple pages and the monitor
1148 * may interleave device user rpc calls from multiple VCPUs. Acquiring
1149 * the spinlock precludes that possibility. Disabling interrupts to
1150 * avoid incoming datagrams during a "rep out" and possibly landing up
1153 mtx_lock_spin(&vmci_sc->vmci_spinlock);
1156 * Send the datagram and retrieve the return value from the result
1159 __asm__ __volatile__(
1163 : "d"(vmci_sc->vmci_ioaddr + VMCI_DATA_OUT_ADDR),
1164 "c"(VMCI_DG_SIZE(dg)), "S"(dg)
1168 * XXX: Should read result high port as well when updating handlers to
1172 result = bus_space_read_4(vmci_sc->vmci_iot0,
1173 vmci_sc->vmci_ioh0, VMCI_RESULT_LOW_ADDR);
1174 mtx_unlock_spin(&vmci_sc->vmci_spinlock);