2 * Copyright (c) 2018 VMware, Inc.
4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
7 /* Driver for VMware Virtual Machine Communication Interface (VMCI) device. */
10 __FBSDID("$FreeBSD$");
12 #include <sys/types.h>
14 #include <sys/kernel.h>
15 #include <sys/malloc.h>
16 #include <sys/module.h>
18 #include <sys/systm.h>
20 #include <dev/pci/pcireg.h>
21 #include <dev/pci/pcivar.h>
23 #include <machine/bus.h>
26 #include "vmci_doorbell.h"
27 #include "vmci_driver.h"
28 #include "vmci_kernel_defs.h"
29 #include "vmci_queue_pair.h"
31 static int vmci_probe(device_t);
32 static int vmci_attach(device_t);
33 static int vmci_detach(device_t);
34 static int vmci_shutdown(device_t);
36 static int vmci_map_bars(struct vmci_softc *);
37 static void vmci_unmap_bars(struct vmci_softc *);
39 static int vmci_config_capabilities(struct vmci_softc *);
41 static int vmci_dma_malloc_int(struct vmci_softc *, bus_size_t,
42 bus_size_t, struct vmci_dma_alloc *);
43 static void vmci_dma_free_int(struct vmci_softc *,
44 struct vmci_dma_alloc *);
46 static int vmci_config_interrupts(struct vmci_softc *);
47 static int vmci_config_interrupt(struct vmci_softc *);
48 static int vmci_check_intr_cnt(struct vmci_softc *);
49 static int vmci_allocate_interrupt_resources(struct vmci_softc *);
50 static int vmci_setup_interrupts(struct vmci_softc *);
51 static void vmci_dismantle_interrupts(struct vmci_softc *);
52 static void vmci_interrupt(void *);
53 static void vmci_interrupt_bm(void *);
54 static void dispatch_datagrams(void *, int);
55 static void process_bitmap(void *, int);
57 static void vmci_delayed_work_fn_cb(void *context, int data);
59 static device_method_t vmci_methods[] = {
60 /* Device interface. */
61 DEVMETHOD(device_probe, vmci_probe),
62 DEVMETHOD(device_attach, vmci_attach),
63 DEVMETHOD(device_detach, vmci_detach),
64 DEVMETHOD(device_shutdown, vmci_shutdown),
69 static driver_t vmci_driver = {
70 "vmci", vmci_methods, sizeof(struct vmci_softc)
73 static devclass_t vmci_devclass;
74 DRIVER_MODULE(vmci, pci, vmci_driver, vmci_devclass, 0, 0);
75 MODULE_VERSION(vmci, VMCI_VERSION);
81 { VMCI_VMWARE_VENDOR_ID, VMCI_VMWARE_DEVICE_ID,
82 "VMware Virtual Machine Communication Interface" },
84 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmci, vmci_ids,
87 MODULE_DEPEND(vmci, pci, 1, 1, 1);
89 static struct vmci_softc *vmci_sc;
91 #define LGPFX "vmci: "
93 * Allocate a buffer for incoming datagrams globally to avoid repeated
94 * allocation in the interrupt handler's atomic context.
96 static uint8_t *data_buffer = NULL;
97 static uint32_t data_buffer_size = VMCI_MAX_DG_SIZE;
99 struct vmci_delayed_work_info {
100 vmci_work_fn *work_fn;
102 vmci_list_item(vmci_delayed_work_info) entry;
106 *------------------------------------------------------------------------------
110 * Probe to see if the VMCI device is present.
113 * BUS_PROBE_DEFAULT if device exists, ENXIO otherwise.
118 *------------------------------------------------------------------------------
122 vmci_probe(device_t dev)
125 if (pci_get_vendor(dev) == vmci_ids[0].vendor &&
126 pci_get_device(dev) == vmci_ids[0].device) {
127 device_set_desc(dev, vmci_ids[0].desc);
129 return (BUS_PROBE_DEFAULT);
136 *------------------------------------------------------------------------------
140 * Attach VMCI device to the system after vmci_probe() has been called and
141 * the device has been detected.
144 * 0 if success, ENXIO otherwise.
149 *------------------------------------------------------------------------------
153 vmci_attach(device_t dev)
155 struct vmci_softc *sc;
158 sc = device_get_softc(dev);
163 sc->vmci_num_intr = 0;
164 for (i = 0; i < VMCI_MAX_INTRS; i++) {
165 sc->vmci_intrs[i].vmci_irq = NULL;
166 sc->vmci_intrs[i].vmci_handler = NULL;
169 TASK_INIT(&sc->vmci_interrupt_dq_task, 0, dispatch_datagrams, sc);
170 TASK_INIT(&sc->vmci_interrupt_bm_task, 0, process_bitmap, sc);
172 TASK_INIT(&sc->vmci_delayed_work_task, 0, vmci_delayed_work_fn_cb, sc);
174 pci_enable_busmaster(dev);
176 mtx_init(&sc->vmci_spinlock, "VMCI Spinlock", NULL, MTX_SPIN);
177 mtx_init(&sc->vmci_delayed_work_lock, "VMCI Delayed Work Lock",
180 error = vmci_map_bars(sc);
182 VMCI_LOG_ERROR(LGPFX"Failed to map PCI BARs.\n");
186 error = vmci_config_capabilities(sc);
188 VMCI_LOG_ERROR(LGPFX"Failed to configure capabilities.\n");
192 vmci_list_init(&sc->vmci_delayed_work_infos);
194 vmci_components_init();
196 error = vmci_qp_guest_endpoints_init();
198 VMCI_LOG_ERROR(LGPFX"vmci_qp_guest_endpoints_init failed.\n");
202 error = vmci_config_interrupts(sc);
204 VMCI_LOG_ERROR(LGPFX"Failed to enable interrupts.\n");
216 *------------------------------------------------------------------------------
220 * Detach the VMCI device.
228 *------------------------------------------------------------------------------
232 vmci_detach(device_t dev)
234 struct vmci_softc *sc;
236 sc = device_get_softc(dev);
238 vmci_qp_guest_endpoints_exit();
241 vmci_dismantle_interrupts(sc);
243 vmci_components_cleanup();
245 if mtx_initialized(&sc->vmci_spinlock) {
246 taskqueue_drain(taskqueue_thread, &sc->vmci_delayed_work_task);
247 mtx_destroy(&sc->vmci_delayed_work_lock);
250 if (sc->vmci_res0 != NULL)
251 bus_space_write_4(sc->vmci_iot0, sc->vmci_ioh0,
252 VMCI_CONTROL_ADDR, VMCI_CONTROL_RESET);
254 if (sc->vmci_notifications_bitmap.dma_vaddr != NULL)
255 vmci_dma_free(&sc->vmci_notifications_bitmap);
259 if mtx_initialized(&sc->vmci_spinlock)
260 mtx_destroy(&sc->vmci_spinlock);
262 pci_disable_busmaster(dev);
268 *------------------------------------------------------------------------------
272 * This function is called during system shutdown. We don't do anything.
280 *------------------------------------------------------------------------------
284 vmci_shutdown(device_t dev)
291 *------------------------------------------------------------------------------
295 * Maps the PCI I/O and MMIO BARs.
298 * 0 on success, ENXIO otherwise.
303 *------------------------------------------------------------------------------
307 vmci_map_bars(struct vmci_softc *sc)
311 /* Map the PCI I/O BAR: BAR0 */
313 sc->vmci_res0 = bus_alloc_resource_any(sc->vmci_dev, SYS_RES_IOPORT,
315 if (sc->vmci_res0 == NULL) {
316 VMCI_LOG_ERROR(LGPFX"Could not map: BAR0\n");
320 sc->vmci_iot0 = rman_get_bustag(sc->vmci_res0);
321 sc->vmci_ioh0 = rman_get_bushandle(sc->vmci_res0);
322 sc->vmci_ioaddr = rman_get_start(sc->vmci_res0);
324 /* Map the PCI MMIO BAR: BAR1 */
326 sc->vmci_res1 = bus_alloc_resource_any(sc->vmci_dev, SYS_RES_MEMORY,
328 if (sc->vmci_res1 == NULL) {
329 VMCI_LOG_ERROR(LGPFX"Could not map: BAR1\n");
333 sc->vmci_iot1 = rman_get_bustag(sc->vmci_res1);
334 sc->vmci_ioh1 = rman_get_bushandle(sc->vmci_res1);
340 *------------------------------------------------------------------------------
344 * Unmaps the VMCI PCI I/O and MMIO BARs.
352 *------------------------------------------------------------------------------
356 vmci_unmap_bars(struct vmci_softc *sc)
360 if (sc->vmci_res0 != NULL) {
362 bus_release_resource(sc->vmci_dev, SYS_RES_IOPORT, rid,
364 sc->vmci_res0 = NULL;
367 if (sc->vmci_res1 != NULL) {
369 bus_release_resource(sc->vmci_dev, SYS_RES_MEMORY, rid,
371 sc->vmci_res1 = NULL;
376 *------------------------------------------------------------------------------
378 * vmci_config_capabilities --
380 * Check the VMCI device capabilities and configure the device accordingly.
383 * 0 if success, ENODEV otherwise.
386 * Device capabilities are enabled.
388 *------------------------------------------------------------------------------
392 vmci_config_capabilities(struct vmci_softc *sc)
394 unsigned long bitmap_PPN;
398 * Verify that the VMCI device supports the capabilities that we
399 * need. Datagrams are necessary and notifications will be used
400 * if the device supports it.
402 sc->capabilities = bus_space_read_4(sc->vmci_iot0, sc->vmci_ioh0,
405 if ((sc->capabilities & VMCI_CAPS_DATAGRAM) == 0) {
406 VMCI_LOG_ERROR(LGPFX"VMCI device does not support "
411 if (sc->capabilities & VMCI_CAPS_NOTIFICATIONS) {
412 sc->capabilities = VMCI_CAPS_DATAGRAM;
413 error = vmci_dma_malloc(PAGE_SIZE, 1,
414 &sc->vmci_notifications_bitmap);
416 VMCI_LOG_ERROR(LGPFX"Failed to alloc memory for "
417 "notification bitmap.\n");
419 memset(sc->vmci_notifications_bitmap.dma_vaddr, 0,
421 sc->capabilities |= VMCI_CAPS_NOTIFICATIONS;
424 sc->capabilities = VMCI_CAPS_DATAGRAM;
426 /* Let the host know which capabilities we intend to use. */
427 bus_space_write_4(sc->vmci_iot0, sc->vmci_ioh0,
428 VMCI_CAPS_ADDR, sc->capabilities);
431 * Register notification bitmap with device if that capability is
434 if (sc->capabilities & VMCI_CAPS_NOTIFICATIONS) {
436 sc->vmci_notifications_bitmap.dma_paddr >> PAGE_SHIFT;
437 vmci_register_notification_bitmap(bitmap_PPN);
440 /* Check host capabilities. */
441 if (!vmci_check_host_capabilities())
448 *------------------------------------------------------------------------------
452 * Callback to receive mapping information resulting from the load of a
453 * bus_dmamap_t via bus_dmamap_load()
461 *------------------------------------------------------------------------------
465 vmci_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
467 bus_addr_t *baddr = arg;
470 *baddr = segs->ds_addr;
474 *------------------------------------------------------------------------------
476 * vmci_dma_malloc_int --
478 * Internal function that allocates DMA memory.
482 * ENOMEM if insufficient memory.
483 * EINPROGRESS if mapping is deferred.
484 * EINVAL if the request was invalid.
487 * DMA memory is allocated.
489 *------------------------------------------------------------------------------
493 vmci_dma_malloc_int(struct vmci_softc *sc, bus_size_t size, bus_size_t align,
494 struct vmci_dma_alloc *dma)
498 bzero(dma, sizeof(struct vmci_dma_alloc));
500 error = bus_dma_tag_create(bus_get_dma_tag(vmci_sc->vmci_dev),
501 align, 0, /* alignment, bounds */
502 BUS_SPACE_MAXADDR, /* lowaddr */
503 BUS_SPACE_MAXADDR, /* highaddr */
504 NULL, NULL, /* filter, filterarg */
507 size, /* maxsegsize */
508 BUS_DMA_ALLOCNOW, /* flags */
510 NULL, /* lockfuncarg */
513 VMCI_LOG_ERROR(LGPFX"bus_dma_tag_create failed: %d\n", error);
517 error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
518 BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
520 VMCI_LOG_ERROR(LGPFX"bus_dmamem_alloc failed: %d\n", error);
524 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
525 size, vmci_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
527 VMCI_LOG_ERROR(LGPFX"bus_dmamap_load failed: %d\n", error);
531 dma->dma_size = size;
541 *------------------------------------------------------------------------------
545 * This function is a wrapper around vmci_dma_malloc_int for callers
546 * outside of this module. Since we only support a single VMCI device, this
547 * wrapper provides access to the device softc structure.
551 * ENOMEM if insufficient memory.
552 * EINPROGRESS if mapping is deferred.
553 * EINVAL if the request was invalid.
556 * DMA memory is allocated.
558 *------------------------------------------------------------------------------
562 vmci_dma_malloc(bus_size_t size, bus_size_t align, struct vmci_dma_alloc *dma)
565 return (vmci_dma_malloc_int(vmci_sc, size, align, dma));
569 *------------------------------------------------------------------------------
571 * vmci_dma_free_int --
573 * Internal function that frees DMA memory.
581 *------------------------------------------------------------------------------
585 vmci_dma_free_int(struct vmci_softc *sc, struct vmci_dma_alloc *dma)
588 if (dma->dma_tag != NULL) {
589 if (dma->dma_paddr != 0) {
590 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
591 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
592 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
595 if (dma->dma_vaddr != NULL)
596 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
599 bus_dma_tag_destroy(dma->dma_tag);
601 bzero(dma, sizeof(struct vmci_dma_alloc));
605 *------------------------------------------------------------------------------
609 * This function is a wrapper around vmci_dma_free_int for callers outside
610 * of this module. Since we only support a single VMCI device, this wrapper
611 * provides access to the device softc structure.
619 *------------------------------------------------------------------------------
623 vmci_dma_free(struct vmci_dma_alloc *dma)
626 vmci_dma_free_int(vmci_sc, dma);
630 *------------------------------------------------------------------------------
632 * vmci_config_interrupts --
634 * Configures and enables interrupts. Try to configure MSI-X. If this fails,
635 * try to configure MSI. If even this fails, try legacy interrupts.
639 * ENOMEM if insufficient memory.
640 * ENODEV if the device doesn't support interrupts.
641 * ENXIO if the device configuration failed.
644 * Interrupts get enabled if successful.
646 *------------------------------------------------------------------------------
650 vmci_config_interrupts(struct vmci_softc *sc)
654 data_buffer = malloc(data_buffer_size, M_DEVBUF, M_ZERO | M_NOWAIT);
655 if (data_buffer == NULL)
658 sc->vmci_intr_type = VMCI_INTR_TYPE_MSIX;
659 error = vmci_config_interrupt(sc);
661 sc->vmci_intr_type = VMCI_INTR_TYPE_MSI;
662 error = vmci_config_interrupt(sc);
665 sc->vmci_intr_type = VMCI_INTR_TYPE_INTX;
666 error = vmci_config_interrupt(sc);
671 /* Enable specific interrupt bits. */
672 if (sc->capabilities & VMCI_CAPS_NOTIFICATIONS)
673 bus_space_write_4(sc->vmci_iot0, sc->vmci_ioh0,
674 VMCI_IMR_ADDR, VMCI_IMR_DATAGRAM | VMCI_IMR_NOTIFICATION);
676 bus_space_write_4(sc->vmci_iot0, sc->vmci_ioh0,
677 VMCI_IMR_ADDR, VMCI_IMR_DATAGRAM);
679 /* Enable interrupts. */
680 bus_space_write_4(sc->vmci_iot0, sc->vmci_ioh0,
681 VMCI_CONTROL_ADDR, VMCI_CONTROL_INT_ENABLE);
687 *------------------------------------------------------------------------------
689 * vmci_config_interrupt --
691 * Check the number of interrupts supported, allocate resources and setup
696 * ENOMEM if insufficient memory.
697 * ENODEV if the device doesn't support interrupts.
698 * ENXIO if the device configuration failed.
701 * Resources get allocated and interrupts get setup (but not enabled) if
704 *------------------------------------------------------------------------------
708 vmci_config_interrupt(struct vmci_softc *sc)
712 error = vmci_check_intr_cnt(sc);
716 error = vmci_allocate_interrupt_resources(sc);
720 error = vmci_setup_interrupts(sc);
728 *------------------------------------------------------------------------------
730 * vmci_check_intr_cnt --
732 * Check the number of interrupts supported by the device and ask PCI bus
733 * to allocate appropriate number of interrupts.
737 * ENODEV if the device doesn't support any interrupts.
738 * ENXIO if the device configuration failed.
741 * Resources get allocated on success.
743 *------------------------------------------------------------------------------
747 vmci_check_intr_cnt(struct vmci_softc *sc)
750 if (sc->vmci_intr_type == VMCI_INTR_TYPE_INTX) {
751 sc->vmci_num_intr = 1;
756 * Make sure that the device supports the required number of MSI/MSI-X
757 * messages. We try for 2 MSI-X messages but 1 is good too. We need at
758 * least 1 MSI message.
760 sc->vmci_num_intr = (sc->vmci_intr_type == VMCI_INTR_TYPE_MSIX) ?
761 pci_msix_count(sc->vmci_dev) : pci_msi_count(sc->vmci_dev);
763 if (!sc->vmci_num_intr) {
764 VMCI_LOG_ERROR(LGPFX"Device does not support any interrupt"
769 sc->vmci_num_intr = (sc->vmci_intr_type == VMCI_INTR_TYPE_MSIX) ?
771 if (sc->vmci_intr_type == VMCI_INTR_TYPE_MSIX) {
772 if (pci_alloc_msix(sc->vmci_dev, &sc->vmci_num_intr))
774 } else if (sc->vmci_intr_type == VMCI_INTR_TYPE_MSI) {
775 if (pci_alloc_msi(sc->vmci_dev, &sc->vmci_num_intr))
783 *------------------------------------------------------------------------------
785 * vmci_allocate_interrupt_resources --
787 * Allocate resources necessary for interrupts.
790 * 0 if success, ENXIO otherwise.
793 * Resources get allocated on success.
795 *------------------------------------------------------------------------------
799 vmci_allocate_interrupt_resources(struct vmci_softc *sc)
801 struct resource *irq;
805 flags |= (sc->vmci_num_intr == 1) ? RF_SHAREABLE : 0;
806 rid = (sc->vmci_intr_type == VMCI_INTR_TYPE_INTX) ? 0 : 1;
808 for (i = 0; i < sc->vmci_num_intr; i++, rid++) {
809 irq = bus_alloc_resource_any(sc->vmci_dev, SYS_RES_IRQ, &rid,
813 sc->vmci_intrs[i].vmci_irq = irq;
814 sc->vmci_intrs[i].vmci_rid = rid;
821 *------------------------------------------------------------------------------
823 * vmci_setup_interrupts --
825 * Sets up the interrupts.
828 * 0 if success, appropriate error code from bus_setup_intr otherwise.
831 * Interrupt handler gets attached.
833 *------------------------------------------------------------------------------
837 vmci_setup_interrupts(struct vmci_softc *sc)
839 struct vmci_interrupt *intr;
842 flags = INTR_TYPE_NET | INTR_MPSAFE;
843 if (sc->vmci_num_intr > 1)
846 intr = &sc->vmci_intrs[0];
847 error = bus_setup_intr(sc->vmci_dev, intr->vmci_irq, flags, NULL,
848 vmci_interrupt, NULL, &intr->vmci_handler);
852 if (sc->vmci_num_intr == 2) {
853 bus_describe_intr(sc->vmci_dev, intr->vmci_irq,
854 intr->vmci_handler, "dg");
855 intr = &sc->vmci_intrs[1];
856 error = bus_setup_intr(sc->vmci_dev, intr->vmci_irq, flags,
857 NULL, vmci_interrupt_bm, NULL, &intr->vmci_handler);
860 bus_describe_intr(sc->vmci_dev, intr->vmci_irq,
861 intr->vmci_handler, "bm");
868 *------------------------------------------------------------------------------
872 * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
873 * interrupt (vector VMCI_INTR_DATAGRAM).
881 *------------------------------------------------------------------------------
885 vmci_interrupt(void *arg)
888 if (vmci_sc->vmci_num_intr == 2)
889 taskqueue_enqueue(taskqueue_swi,
890 &vmci_sc->vmci_interrupt_dq_task);
894 icr = inl(vmci_sc->vmci_ioaddr + VMCI_ICR_ADDR);
895 if (icr == 0 || icr == 0xffffffff)
897 if (icr & VMCI_ICR_DATAGRAM) {
898 taskqueue_enqueue(taskqueue_swi,
899 &vmci_sc->vmci_interrupt_dq_task);
900 icr &= ~VMCI_ICR_DATAGRAM;
902 if (icr & VMCI_ICR_NOTIFICATION) {
903 taskqueue_enqueue(taskqueue_swi,
904 &vmci_sc->vmci_interrupt_bm_task);
905 icr &= ~VMCI_ICR_NOTIFICATION;
908 VMCI_LOG_INFO(LGPFX"Ignoring unknown interrupt "
914 *------------------------------------------------------------------------------
916 * vmci_interrupt_bm --
918 * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
919 * which is for the notification bitmap. Will only get called if we are
920 * using MSI-X with exclusive vectors.
928 *------------------------------------------------------------------------------
932 vmci_interrupt_bm(void *arg)
935 ASSERT(vmci_sc->vmci_num_intr == 2);
936 taskqueue_enqueue(taskqueue_swi, &vmci_sc->vmci_interrupt_bm_task);
940 *------------------------------------------------------------------------------
942 * dispatch_datagrams --
944 * Reads and dispatches incoming datagrams.
950 * Reads data from the device.
952 *------------------------------------------------------------------------------
956 dispatch_datagrams(void *context, int data)
959 if (data_buffer == NULL)
960 VMCI_LOG_INFO(LGPFX"dispatch_datagrams(): no buffer "
963 vmci_read_datagrams_from_port((vmci_io_handle) 0,
964 vmci_sc->vmci_ioaddr + VMCI_DATA_IN_ADDR,
965 data_buffer, data_buffer_size);
969 *------------------------------------------------------------------------------
973 * Scans the notification bitmap for raised flags, clears them and handles
982 *------------------------------------------------------------------------------
986 process_bitmap(void *context, int data)
989 if (vmci_sc->vmci_notifications_bitmap.dma_vaddr == NULL)
990 VMCI_LOG_INFO(LGPFX"process_bitmaps(): no bitmap present");
992 vmci_scan_notification_bitmap(
993 vmci_sc->vmci_notifications_bitmap.dma_vaddr);
997 *------------------------------------------------------------------------------
999 * vmci_dismantle_interrupts --
1001 * Releases resources, detaches the interrupt handler and drains the task
1008 * No more interrupts.
1010 *------------------------------------------------------------------------------
1014 vmci_dismantle_interrupts(struct vmci_softc *sc)
1016 struct vmci_interrupt *intr;
1019 for (i = 0; i < sc->vmci_num_intr; i++) {
1020 intr = &sc->vmci_intrs[i];
1021 if (intr->vmci_handler != NULL) {
1022 bus_teardown_intr(sc->vmci_dev, intr->vmci_irq,
1023 intr->vmci_handler);
1024 intr->vmci_handler = NULL;
1026 if (intr->vmci_irq != NULL) {
1027 bus_release_resource(sc->vmci_dev, SYS_RES_IRQ,
1028 intr->vmci_rid, intr->vmci_irq);
1029 intr->vmci_irq = NULL;
1030 intr->vmci_rid = -1;
1034 if ((sc->vmci_intr_type != VMCI_INTR_TYPE_INTX) &&
1035 (sc->vmci_num_intr))
1036 pci_release_msi(sc->vmci_dev);
1038 taskqueue_drain(taskqueue_swi, &sc->vmci_interrupt_dq_task);
1039 taskqueue_drain(taskqueue_swi, &sc->vmci_interrupt_bm_task);
1041 if (data_buffer != NULL)
1042 free(data_buffer, M_DEVBUF);
1046 *------------------------------------------------------------------------------
1048 * vmci_delayed_work_fn_cb --
1050 * Callback function that executes the queued up delayed work functions.
1058 *------------------------------------------------------------------------------
1062 vmci_delayed_work_fn_cb(void *context, int data)
1064 vmci_list(vmci_delayed_work_info) temp_list;
1066 vmci_list_init(&temp_list);
1069 * Swap vmci_delayed_work_infos list with the empty temp_list while
1070 * holding a lock. vmci_delayed_work_infos would then be an empty list
1071 * and temp_list would contain the elements from the original
1072 * vmci_delayed_work_infos. Finally, iterate through temp_list
1073 * executing the delayed callbacks.
1076 mtx_lock(&vmci_sc->vmci_delayed_work_lock);
1077 vmci_list_swap(&temp_list, &vmci_sc->vmci_delayed_work_infos,
1078 vmci_delayed_work_info, entry);
1079 mtx_unlock(&vmci_sc->vmci_delayed_work_lock);
1081 while (!vmci_list_empty(&temp_list)) {
1082 struct vmci_delayed_work_info *delayed_work_info =
1083 vmci_list_first(&temp_list);
1085 delayed_work_info->work_fn(delayed_work_info->data);
1087 vmci_list_remove(delayed_work_info, entry);
1088 vmci_free_kernel_mem(delayed_work_info,
1089 sizeof(*delayed_work_info));
1094 *------------------------------------------------------------------------------
1096 * vmci_schedule_delayed_work_fn --
1098 * Schedule the specified callback.
1101 * 0 if success, error code otherwise.
1106 *------------------------------------------------------------------------------
1110 vmci_schedule_delayed_work_fn(vmci_work_fn *work_fn, void *data)
1112 struct vmci_delayed_work_info *delayed_work_info;
1114 delayed_work_info = vmci_alloc_kernel_mem(sizeof(*delayed_work_info),
1115 VMCI_MEMORY_ATOMIC);
1117 if (!delayed_work_info)
1118 return (VMCI_ERROR_NO_MEM);
1120 delayed_work_info->work_fn = work_fn;
1121 delayed_work_info->data = data;
1122 mtx_lock(&vmci_sc->vmci_delayed_work_lock);
1123 vmci_list_insert(&vmci_sc->vmci_delayed_work_infos,
1124 delayed_work_info, entry);
1125 mtx_unlock(&vmci_sc->vmci_delayed_work_lock);
1127 taskqueue_enqueue(taskqueue_thread,
1128 &vmci_sc->vmci_delayed_work_task);
1130 return (VMCI_SUCCESS);
1134 *------------------------------------------------------------------------------
1136 * vmci_send_datagram --
1138 * VM to hypervisor call mechanism.
1141 * The result of the hypercall.
1146 *------------------------------------------------------------------------------
1150 vmci_send_datagram(struct vmci_datagram *dg)
1155 return (VMCI_ERROR_INVALID_ARGS);
1158 * Need to acquire spinlock on the device because
1159 * the datagram data may be spread over multiple pages and the monitor
1160 * may interleave device user rpc calls from multiple VCPUs. Acquiring
1161 * the spinlock precludes that possibility. Disabling interrupts to
1162 * avoid incoming datagrams during a "rep out" and possibly landing up
1165 mtx_lock_spin(&vmci_sc->vmci_spinlock);
1168 * Send the datagram and retrieve the return value from the result
1171 __asm__ __volatile__(
1175 : "d"(vmci_sc->vmci_ioaddr + VMCI_DATA_OUT_ADDR),
1176 "c"(VMCI_DG_SIZE(dg)), "S"(dg)
1180 * XXX: Should read result high port as well when updating handlers to
1184 result = bus_space_read_4(vmci_sc->vmci_iot0,
1185 vmci_sc->vmci_ioh0, VMCI_RESULT_LOW_ADDR);
1186 mtx_unlock_spin(&vmci_sc->vmci_spinlock);