2 * Copyright (c) 2009-2012,2016-2017 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * VM Bus Driver Implementation
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/linker.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
45 #include <sys/sysctl.h>
46 #include <sys/systm.h>
47 #include <sys/taskqueue.h>
50 #include <vm/vm_param.h>
53 #include <machine/bus.h>
54 #include <machine/intr_machdep.h>
55 #include <machine/metadata.h>
56 #include <machine/md_var.h>
57 #include <machine/resource.h>
58 #include <x86/include/apicvar.h>
60 #include <contrib/dev/acpica/include/acpi.h>
61 #include <dev/acpica/acpivar.h>
63 #include <dev/hyperv/include/hyperv.h>
64 #include <dev/hyperv/include/vmbus_xact.h>
65 #include <dev/hyperv/vmbus/hyperv_reg.h>
66 #include <dev/hyperv/vmbus/hyperv_var.h>
67 #include <dev/hyperv/vmbus/vmbus_reg.h>
68 #include <dev/hyperv/vmbus/vmbus_var.h>
69 #include <dev/hyperv/vmbus/vmbus_chanvar.h>
75 #define VMBUS_GPADL_START 0xe1e10
78 struct vmbus_xact *mh_xact;
79 struct hypercall_postmsg_in mh_inprm_save;
82 static void vmbus_identify(driver_t *, device_t);
83 static int vmbus_probe(device_t);
84 static int vmbus_attach(device_t);
85 static int vmbus_detach(device_t);
86 static int vmbus_read_ivar(device_t, device_t, int,
88 static int vmbus_child_pnpinfo(device_t, device_t, struct sbuf *);
89 static struct resource *vmbus_alloc_resource(device_t dev,
90 device_t child, int type, int *rid,
91 rman_res_t start, rman_res_t end,
92 rman_res_t count, u_int flags);
93 static int vmbus_alloc_msi(device_t bus, device_t dev,
94 int count, int maxcount, int *irqs);
95 static int vmbus_release_msi(device_t bus, device_t dev,
96 int count, int *irqs);
97 static int vmbus_alloc_msix(device_t bus, device_t dev,
99 static int vmbus_release_msix(device_t bus, device_t dev,
101 static int vmbus_map_msi(device_t bus, device_t dev,
102 int irq, uint64_t *addr, uint32_t *data);
103 static uint32_t vmbus_get_version_method(device_t, device_t);
104 static int vmbus_probe_guid_method(device_t, device_t,
105 const struct hyperv_guid *);
106 static uint32_t vmbus_get_vcpu_id_method(device_t bus,
107 device_t dev, int cpu);
108 static struct taskqueue *vmbus_get_eventtq_method(device_t, device_t,
110 #ifdef EARLY_AP_STARTUP
111 static void vmbus_intrhook(void *);
114 static int vmbus_init(struct vmbus_softc *);
115 static int vmbus_connect(struct vmbus_softc *, uint32_t);
116 static int vmbus_req_channels(struct vmbus_softc *sc);
117 static void vmbus_disconnect(struct vmbus_softc *);
118 static int vmbus_scan(struct vmbus_softc *);
119 static void vmbus_scan_teardown(struct vmbus_softc *);
120 static void vmbus_scan_done(struct vmbus_softc *,
121 const struct vmbus_message *);
122 static void vmbus_chanmsg_handle(struct vmbus_softc *,
123 const struct vmbus_message *);
124 static void vmbus_msg_task(void *, int);
125 static void vmbus_synic_setup(void *);
126 static void vmbus_synic_teardown(void *);
127 static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS);
128 static int vmbus_dma_alloc(struct vmbus_softc *);
129 static void vmbus_dma_free(struct vmbus_softc *);
130 static int vmbus_intr_setup(struct vmbus_softc *);
131 static void vmbus_intr_teardown(struct vmbus_softc *);
132 static int vmbus_doattach(struct vmbus_softc *);
133 static void vmbus_event_proc_dummy(struct vmbus_softc *,
136 static struct vmbus_softc *vmbus_sc;
138 SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
141 static int vmbus_pin_evttask = 1;
142 SYSCTL_INT(_hw_vmbus, OID_AUTO, pin_evttask, CTLFLAG_RDTUN,
143 &vmbus_pin_evttask, 0, "Pin event tasks to their respective CPU");
145 extern inthand_t IDTVEC(vmbus_isr), IDTVEC(vmbus_isr_pti);
146 #define VMBUS_ISR_ADDR trunc_page((uintptr_t)IDTVEC(vmbus_isr_pti))
148 uint32_t vmbus_current_version;
150 static const uint32_t vmbus_version[] = {
152 VMBUS_VERSION_WIN8_1,
158 static const vmbus_chanmsg_proc_t
159 vmbus_chanmsg_handlers[VMBUS_CHANMSG_TYPE_MAX] = {
160 VMBUS_CHANMSG_PROC(CHOFFER_DONE, vmbus_scan_done),
161 VMBUS_CHANMSG_PROC_WAKEUP(CONNECT_RESP)
164 static device_method_t vmbus_methods[] = {
165 /* Device interface */
166 DEVMETHOD(device_identify, vmbus_identify),
167 DEVMETHOD(device_probe, vmbus_probe),
168 DEVMETHOD(device_attach, vmbus_attach),
169 DEVMETHOD(device_detach, vmbus_detach),
170 DEVMETHOD(device_shutdown, bus_generic_shutdown),
171 DEVMETHOD(device_suspend, bus_generic_suspend),
172 DEVMETHOD(device_resume, bus_generic_resume),
175 DEVMETHOD(bus_add_child, bus_generic_add_child),
176 DEVMETHOD(bus_print_child, bus_generic_print_child),
177 DEVMETHOD(bus_read_ivar, vmbus_read_ivar),
178 DEVMETHOD(bus_child_pnpinfo, vmbus_child_pnpinfo),
179 DEVMETHOD(bus_alloc_resource, vmbus_alloc_resource),
180 DEVMETHOD(bus_release_resource, bus_generic_release_resource),
181 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
182 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
183 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
184 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
185 #if __FreeBSD_version >= 1100000
186 DEVMETHOD(bus_get_cpus, bus_generic_get_cpus),
190 DEVMETHOD(pcib_alloc_msi, vmbus_alloc_msi),
191 DEVMETHOD(pcib_release_msi, vmbus_release_msi),
192 DEVMETHOD(pcib_alloc_msix, vmbus_alloc_msix),
193 DEVMETHOD(pcib_release_msix, vmbus_release_msix),
194 DEVMETHOD(pcib_map_msi, vmbus_map_msi),
196 /* Vmbus interface */
197 DEVMETHOD(vmbus_get_version, vmbus_get_version_method),
198 DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method),
199 DEVMETHOD(vmbus_get_vcpu_id, vmbus_get_vcpu_id_method),
200 DEVMETHOD(vmbus_get_event_taskq, vmbus_get_eventtq_method),
205 static driver_t vmbus_driver = {
208 sizeof(struct vmbus_softc)
211 static devclass_t vmbus_devclass;
213 DRIVER_MODULE(vmbus, pcib, vmbus_driver, vmbus_devclass, NULL, NULL);
214 DRIVER_MODULE(vmbus, acpi_syscontainer, vmbus_driver, vmbus_devclass,
217 MODULE_DEPEND(vmbus, acpi, 1, 1, 1);
218 MODULE_DEPEND(vmbus, pci, 1, 1, 1);
219 MODULE_VERSION(vmbus, 1);
221 static __inline struct vmbus_softc *
222 vmbus_get_softc(void)
228 vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize)
230 struct hypercall_postmsg_in *inprm;
232 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX)
233 panic("invalid data size %zu", dsize);
235 inprm = vmbus_xact_req_data(mh->mh_xact);
236 memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE);
237 inprm->hc_connid = VMBUS_CONNID_MESSAGE;
238 inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL;
239 inprm->hc_dsize = dsize;
243 vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize)
245 struct vmbus_msghc *mh;
246 struct vmbus_xact *xact;
248 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX)
249 panic("invalid data size %zu", dsize);
251 xact = vmbus_xact_get(sc->vmbus_xc,
252 dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0]));
256 mh = vmbus_xact_priv(xact, sizeof(*mh));
259 vmbus_msghc_reset(mh, dsize);
264 vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
267 vmbus_xact_put(mh->mh_xact);
271 vmbus_msghc_dataptr(struct vmbus_msghc *mh)
273 struct hypercall_postmsg_in *inprm;
275 inprm = vmbus_xact_req_data(mh->mh_xact);
276 return (inprm->hc_data);
280 vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
282 sbintime_t time = SBT_1MS;
283 struct hypercall_postmsg_in *inprm;
284 bus_addr_t inprm_paddr;
287 inprm = vmbus_xact_req_data(mh->mh_xact);
288 inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact);
291 * Save the input parameter so that we could restore the input
292 * parameter if the Hypercall failed.
295 * Is this really necessary?! i.e. Will the Hypercall ever
296 * overwrite the input parameter?
298 memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE);
301 * In order to cope with transient failures, e.g. insufficient
302 * resources on host side, we retry the post message Hypercall
303 * several times. 20 retries seem sufficient.
305 #define HC_RETRY_MAX 20
307 for (i = 0; i < HC_RETRY_MAX; ++i) {
310 status = hypercall_post_message(inprm_paddr);
311 if (status == HYPERCALL_STATUS_SUCCESS)
314 pause_sbt("hcpmsg", time, 0, C_HARDCLOCK);
315 if (time < SBT_1S * 2)
318 /* Restore input parameter and try again */
319 memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE);
328 vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
332 vmbus_xact_activate(mh->mh_xact);
333 error = vmbus_msghc_exec_noresult(mh);
335 vmbus_xact_deactivate(mh->mh_xact);
340 vmbus_msghc_exec_cancel(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
343 vmbus_xact_deactivate(mh->mh_xact);
346 const struct vmbus_message *
347 vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
351 return (vmbus_xact_wait(mh->mh_xact, &resp_len));
354 const struct vmbus_message *
355 vmbus_msghc_poll_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
359 return (vmbus_xact_poll(mh->mh_xact, &resp_len));
363 vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg)
366 vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg));
370 vmbus_gpadl_alloc(struct vmbus_softc *sc)
375 gpadl = atomic_fetchadd_int(&sc->vmbus_gpadl, 1);
381 /* Used for Hyper-V socket when guest client connects to host */
383 vmbus_req_tl_connect(struct hyperv_guid *guest_srv_id,
384 struct hyperv_guid *host_srv_id)
386 struct vmbus_softc *sc = vmbus_get_softc();
387 struct vmbus_chanmsg_tl_connect *req;
388 struct vmbus_msghc *mh;
394 mh = vmbus_msghc_get(sc, sizeof(*req));
396 device_printf(sc->vmbus_dev,
397 "can not get msg hypercall for tl connect\n");
401 req = vmbus_msghc_dataptr(mh);
402 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_TL_CONN;
403 req->guest_endpoint_id = *guest_srv_id;
404 req->host_service_id = *host_srv_id;
406 error = vmbus_msghc_exec_noresult(mh);
407 vmbus_msghc_put(sc, mh);
410 device_printf(sc->vmbus_dev,
411 "tl connect msg hypercall failed\n");
418 vmbus_connect(struct vmbus_softc *sc, uint32_t version)
420 struct vmbus_chanmsg_connect *req;
421 const struct vmbus_message *msg;
422 struct vmbus_msghc *mh;
425 mh = vmbus_msghc_get(sc, sizeof(*req));
429 req = vmbus_msghc_dataptr(mh);
430 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CONNECT;
431 req->chm_ver = version;
432 req->chm_evtflags = sc->vmbus_evtflags_dma.hv_paddr;
433 req->chm_mnf1 = sc->vmbus_mnf1_dma.hv_paddr;
434 req->chm_mnf2 = sc->vmbus_mnf2_dma.hv_paddr;
436 error = vmbus_msghc_exec(sc, mh);
438 vmbus_msghc_put(sc, mh);
442 msg = vmbus_msghc_wait_result(sc, mh);
443 done = ((const struct vmbus_chanmsg_connect_resp *)
444 msg->msg_data)->chm_done;
446 vmbus_msghc_put(sc, mh);
448 return (done ? 0 : EOPNOTSUPP);
452 vmbus_init(struct vmbus_softc *sc)
456 for (i = 0; i < nitems(vmbus_version); ++i) {
459 error = vmbus_connect(sc, vmbus_version[i]);
461 vmbus_current_version = vmbus_version[i];
462 sc->vmbus_version = vmbus_version[i];
463 device_printf(sc->vmbus_dev, "version %u.%u\n",
464 VMBUS_VERSION_MAJOR(sc->vmbus_version),
465 VMBUS_VERSION_MINOR(sc->vmbus_version));
473 vmbus_disconnect(struct vmbus_softc *sc)
475 struct vmbus_chanmsg_disconnect *req;
476 struct vmbus_msghc *mh;
479 mh = vmbus_msghc_get(sc, sizeof(*req));
481 device_printf(sc->vmbus_dev,
482 "can not get msg hypercall for disconnect\n");
486 req = vmbus_msghc_dataptr(mh);
487 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_DISCONNECT;
489 error = vmbus_msghc_exec_noresult(mh);
490 vmbus_msghc_put(sc, mh);
493 device_printf(sc->vmbus_dev,
494 "disconnect msg hypercall failed\n");
499 vmbus_req_channels(struct vmbus_softc *sc)
501 struct vmbus_chanmsg_chrequest *req;
502 struct vmbus_msghc *mh;
505 mh = vmbus_msghc_get(sc, sizeof(*req));
509 req = vmbus_msghc_dataptr(mh);
510 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHREQUEST;
512 error = vmbus_msghc_exec_noresult(mh);
513 vmbus_msghc_put(sc, mh);
519 vmbus_scan_done_task(void *xsc, int pending __unused)
521 struct vmbus_softc *sc = xsc;
524 sc->vmbus_scandone = true;
526 wakeup(&sc->vmbus_scandone);
530 vmbus_scan_done(struct vmbus_softc *sc,
531 const struct vmbus_message *msg __unused)
534 taskqueue_enqueue(sc->vmbus_devtq, &sc->vmbus_scandone_task);
538 vmbus_scan(struct vmbus_softc *sc)
543 * Identify, probe and attach for non-channel devices.
545 bus_generic_probe(sc->vmbus_dev);
546 bus_generic_attach(sc->vmbus_dev);
549 * This taskqueue serializes vmbus devices' attach and detach
550 * for channel offer and rescind messages.
552 sc->vmbus_devtq = taskqueue_create("vmbus dev", M_WAITOK,
553 taskqueue_thread_enqueue, &sc->vmbus_devtq);
554 taskqueue_start_threads(&sc->vmbus_devtq, 1, PI_NET, "vmbusdev");
555 TASK_INIT(&sc->vmbus_scandone_task, 0, vmbus_scan_done_task, sc);
558 * This taskqueue handles sub-channel detach, so that vmbus
559 * device's detach running in vmbus_devtq can drain its sub-
562 sc->vmbus_subchtq = taskqueue_create("vmbus subch", M_WAITOK,
563 taskqueue_thread_enqueue, &sc->vmbus_subchtq);
564 taskqueue_start_threads(&sc->vmbus_subchtq, 1, PI_NET, "vmbussch");
567 * Start vmbus scanning.
569 error = vmbus_req_channels(sc);
571 device_printf(sc->vmbus_dev, "channel request failed: %d\n",
577 * Wait for all vmbus devices from the initial channel offers to be
581 while (!sc->vmbus_scandone)
582 mtx_sleep(&sc->vmbus_scandone, &Giant, 0, "vmbusdev", 0);
585 device_printf(sc->vmbus_dev, "device scan, probe and attach "
592 vmbus_scan_teardown(struct vmbus_softc *sc)
596 if (sc->vmbus_devtq != NULL) {
598 taskqueue_free(sc->vmbus_devtq);
600 sc->vmbus_devtq = NULL;
602 if (sc->vmbus_subchtq != NULL) {
604 taskqueue_free(sc->vmbus_subchtq);
606 sc->vmbus_subchtq = NULL;
611 vmbus_chanmsg_handle(struct vmbus_softc *sc, const struct vmbus_message *msg)
613 vmbus_chanmsg_proc_t msg_proc;
616 msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type;
617 if (msg_type >= VMBUS_CHANMSG_TYPE_MAX) {
618 device_printf(sc->vmbus_dev, "unknown message type 0x%x\n",
623 msg_proc = vmbus_chanmsg_handlers[msg_type];
624 if (msg_proc != NULL)
627 /* Channel specific processing */
628 vmbus_chan_msgproc(sc, msg);
632 vmbus_msg_task(void *xsc, int pending __unused)
634 struct vmbus_softc *sc = xsc;
635 volatile struct vmbus_message *msg;
637 msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE;
639 if (msg->msg_type == HYPERV_MSGTYPE_NONE) {
642 } else if (msg->msg_type == HYPERV_MSGTYPE_CHANNEL) {
643 /* Channel message */
644 vmbus_chanmsg_handle(sc,
645 __DEVOLATILE(const struct vmbus_message *, msg));
648 msg->msg_type = HYPERV_MSGTYPE_NONE;
650 * Make sure the write to msg_type (i.e. set to
651 * HYPERV_MSGTYPE_NONE) happens before we read the
652 * msg_flags and EOMing. Otherwise, the EOMing will
653 * not deliver any more messages since there is no
657 * mb() is used here, since atomic_thread_fence_seq_cst()
658 * will become compiler fence on UP kernel.
661 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) {
663 * This will cause message queue rescan to possibly
664 * deliver another msg from the hypervisor
666 wrmsr(MSR_HV_EOM, 0);
672 vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu)
674 volatile struct vmbus_message *msg;
675 struct vmbus_message *msg_base;
677 msg_base = VMBUS_PCPU_GET(sc, message, cpu);
682 * TODO: move this to independent IDT vector.
684 msg = msg_base + VMBUS_SINT_TIMER;
685 if (msg->msg_type == HYPERV_MSGTYPE_TIMER_EXPIRED) {
686 msg->msg_type = HYPERV_MSGTYPE_NONE;
688 vmbus_et_intr(frame);
691 * Make sure the write to msg_type (i.e. set to
692 * HYPERV_MSGTYPE_NONE) happens before we read the
693 * msg_flags and EOMing. Otherwise, the EOMing will
694 * not deliver any more messages since there is no
698 * mb() is used here, since atomic_thread_fence_seq_cst()
699 * will become compiler fence on UP kernel.
702 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) {
704 * This will cause message queue rescan to possibly
705 * deliver another msg from the hypervisor
707 wrmsr(MSR_HV_EOM, 0);
712 * Check events. Hot path for network and storage I/O data; high rate.
715 * As recommended by the Windows guest fellows, we check events before
718 sc->vmbus_event_proc(sc, cpu);
721 * Check messages. Mainly management stuffs; ultra low rate.
723 msg = msg_base + VMBUS_SINT_MESSAGE;
724 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) {
725 taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu),
726 VMBUS_PCPU_PTR(sc, message_task, cpu));
729 return (FILTER_HANDLED);
733 vmbus_handle_intr(struct trapframe *trap_frame)
735 struct vmbus_softc *sc = vmbus_get_softc();
739 * Disable preemption.
744 * Do a little interrupt counting.
746 (*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++;
748 vmbus_handle_intr1(sc, trap_frame, cpu);
757 vmbus_synic_setup(void *xsc)
759 struct vmbus_softc *sc = xsc;
764 if (hyperv_features & CPUID_HV_MSR_VP_INDEX) {
765 /* Save virtual processor id. */
766 VMBUS_PCPU_GET(sc, vcpuid, cpu) = rdmsr(MSR_HV_VP_INDEX);
768 /* Set virtual processor id to 0 for compatibility. */
769 VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0;
773 * Setup the SynIC message.
775 orig = rdmsr(MSR_HV_SIMP);
776 val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) |
777 ((VMBUS_PCPU_GET(sc, message_dma.hv_paddr, cpu) >> PAGE_SHIFT) <<
778 MSR_HV_SIMP_PGSHIFT);
779 wrmsr(MSR_HV_SIMP, val);
782 * Setup the SynIC event flags.
784 orig = rdmsr(MSR_HV_SIEFP);
785 val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) |
786 ((VMBUS_PCPU_GET(sc, event_flags_dma.hv_paddr, cpu)
787 >> PAGE_SHIFT) << MSR_HV_SIEFP_PGSHIFT);
788 wrmsr(MSR_HV_SIEFP, val);
792 * Configure and unmask SINT for message and event flags.
794 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE;
796 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI |
797 (orig & MSR_HV_SINT_RSVD_MASK);
801 * Configure and unmask SINT for timer.
803 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER;
805 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI |
806 (orig & MSR_HV_SINT_RSVD_MASK);
810 * All done; enable SynIC.
812 orig = rdmsr(MSR_HV_SCONTROL);
813 val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK);
814 wrmsr(MSR_HV_SCONTROL, val);
818 vmbus_synic_teardown(void *arg)
826 orig = rdmsr(MSR_HV_SCONTROL);
827 wrmsr(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK));
830 * Mask message and event flags SINT.
832 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE;
834 wrmsr(sint, orig | MSR_HV_SINT_MASKED);
839 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER;
841 wrmsr(sint, orig | MSR_HV_SINT_MASKED);
844 * Teardown SynIC message.
846 orig = rdmsr(MSR_HV_SIMP);
847 wrmsr(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK));
850 * Teardown SynIC event flags.
852 orig = rdmsr(MSR_HV_SIEFP);
853 wrmsr(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK));
857 vmbus_dma_alloc(struct vmbus_softc *sc)
859 bus_dma_tag_t parent_dtag;
863 parent_dtag = bus_get_dma_tag(sc->vmbus_dev);
868 * Per-cpu messages and event flags.
870 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
871 PAGE_SIZE, VMBUS_PCPU_PTR(sc, message_dma, cpu),
872 BUS_DMA_WAITOK | BUS_DMA_ZERO);
875 VMBUS_PCPU_GET(sc, message, cpu) = ptr;
877 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
878 PAGE_SIZE, VMBUS_PCPU_PTR(sc, event_flags_dma, cpu),
879 BUS_DMA_WAITOK | BUS_DMA_ZERO);
882 VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr;
885 evtflags = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
886 PAGE_SIZE, &sc->vmbus_evtflags_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
887 if (evtflags == NULL)
889 sc->vmbus_rx_evtflags = (u_long *)evtflags;
890 sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2));
891 sc->vmbus_evtflags = evtflags;
893 sc->vmbus_mnf1 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
894 PAGE_SIZE, &sc->vmbus_mnf1_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
895 if (sc->vmbus_mnf1 == NULL)
898 sc->vmbus_mnf2 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
899 sizeof(struct vmbus_mnf), &sc->vmbus_mnf2_dma,
900 BUS_DMA_WAITOK | BUS_DMA_ZERO);
901 if (sc->vmbus_mnf2 == NULL)
908 vmbus_dma_free(struct vmbus_softc *sc)
912 if (sc->vmbus_evtflags != NULL) {
913 hyperv_dmamem_free(&sc->vmbus_evtflags_dma, sc->vmbus_evtflags);
914 sc->vmbus_evtflags = NULL;
915 sc->vmbus_rx_evtflags = NULL;
916 sc->vmbus_tx_evtflags = NULL;
918 if (sc->vmbus_mnf1 != NULL) {
919 hyperv_dmamem_free(&sc->vmbus_mnf1_dma, sc->vmbus_mnf1);
920 sc->vmbus_mnf1 = NULL;
922 if (sc->vmbus_mnf2 != NULL) {
923 hyperv_dmamem_free(&sc->vmbus_mnf2_dma, sc->vmbus_mnf2);
924 sc->vmbus_mnf2 = NULL;
928 if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) {
930 VMBUS_PCPU_PTR(sc, message_dma, cpu),
931 VMBUS_PCPU_GET(sc, message, cpu));
932 VMBUS_PCPU_GET(sc, message, cpu) = NULL;
934 if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) {
936 VMBUS_PCPU_PTR(sc, event_flags_dma, cpu),
937 VMBUS_PCPU_GET(sc, event_flags, cpu));
938 VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL;
944 vmbus_intr_setup(struct vmbus_softc *sc)
949 char buf[MAXCOMLEN + 1];
952 /* Allocate an interrupt counter for Hyper-V interrupt */
953 snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu);
954 intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu));
957 * Setup taskqueue to handle events. Task will be per-
960 VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast(
961 "hyperv event", M_WAITOK, taskqueue_thread_enqueue,
962 VMBUS_PCPU_PTR(sc, event_tq, cpu));
963 if (vmbus_pin_evttask) {
964 CPU_SETOF(cpu, &cpu_mask);
965 taskqueue_start_threads_cpuset(
966 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET,
967 &cpu_mask, "hvevent%d", cpu);
969 taskqueue_start_threads(
970 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET,
975 * Setup tasks and taskqueues to handle messages.
977 VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast(
978 "hyperv msg", M_WAITOK, taskqueue_thread_enqueue,
979 VMBUS_PCPU_PTR(sc, message_tq, cpu));
980 CPU_SETOF(cpu, &cpu_mask);
981 taskqueue_start_threads_cpuset(
982 VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask,
984 TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0,
988 #if defined(__amd64__) && defined(KLD_MODULE)
989 pmap_pti_add_kva(VMBUS_ISR_ADDR, VMBUS_ISR_ADDR + PAGE_SIZE, true);
993 * All Hyper-V ISR required resources are setup, now let's find a
994 * free IDT vector for Hyper-V ISR and set it up.
996 sc->vmbus_idtvec = lapic_ipi_alloc(pti ? IDTVEC(vmbus_isr_pti) :
998 if (sc->vmbus_idtvec < 0) {
999 #if defined(__amd64__) && defined(KLD_MODULE)
1000 pmap_pti_remove_kva(VMBUS_ISR_ADDR, VMBUS_ISR_ADDR + PAGE_SIZE);
1002 device_printf(sc->vmbus_dev, "cannot find free IDT vector\n");
1006 device_printf(sc->vmbus_dev, "vmbus IDT vector %d\n",
1013 vmbus_intr_teardown(struct vmbus_softc *sc)
1017 if (sc->vmbus_idtvec >= 0) {
1018 lapic_ipi_free(sc->vmbus_idtvec);
1019 sc->vmbus_idtvec = -1;
1022 #if defined(__amd64__) && defined(KLD_MODULE)
1023 pmap_pti_remove_kva(VMBUS_ISR_ADDR, VMBUS_ISR_ADDR + PAGE_SIZE);
1027 if (VMBUS_PCPU_GET(sc, event_tq, cpu) != NULL) {
1028 taskqueue_free(VMBUS_PCPU_GET(sc, event_tq, cpu));
1029 VMBUS_PCPU_GET(sc, event_tq, cpu) = NULL;
1031 if (VMBUS_PCPU_GET(sc, message_tq, cpu) != NULL) {
1032 taskqueue_drain(VMBUS_PCPU_GET(sc, message_tq, cpu),
1033 VMBUS_PCPU_PTR(sc, message_task, cpu));
1034 taskqueue_free(VMBUS_PCPU_GET(sc, message_tq, cpu));
1035 VMBUS_PCPU_GET(sc, message_tq, cpu) = NULL;
1041 vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
1047 vmbus_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb)
1049 const struct vmbus_channel *chan;
1050 char guidbuf[HYPERV_GUID_STRLEN];
1052 chan = vmbus_get_channel(child);
1054 /* Event timer device, which does not belong to a channel */
1058 hyperv_guid2str(&chan->ch_guid_type, guidbuf, sizeof(guidbuf));
1059 sbuf_printf(sb, "classid=%s", guidbuf);
1061 hyperv_guid2str(&chan->ch_guid_inst, guidbuf, sizeof(guidbuf));
1062 sbuf_printf(sb, " deviceid=%s", guidbuf);
1068 vmbus_add_child(struct vmbus_channel *chan)
1070 struct vmbus_softc *sc = chan->ch_vmbus;
1071 device_t parent = sc->vmbus_dev;
1075 chan->ch_dev = device_add_child(parent, NULL, -1);
1076 if (chan->ch_dev == NULL) {
1078 device_printf(parent, "device_add_child for chan%u failed\n",
1082 device_set_ivars(chan->ch_dev, chan);
1083 device_probe_and_attach(chan->ch_dev);
1090 vmbus_delete_child(struct vmbus_channel *chan)
1095 if (chan->ch_dev != NULL) {
1096 error = device_delete_child(chan->ch_vmbus->vmbus_dev,
1098 chan->ch_dev = NULL;
1105 vmbus_sysctl_version(SYSCTL_HANDLER_ARGS)
1107 struct vmbus_softc *sc = arg1;
1110 snprintf(verstr, sizeof(verstr), "%u.%u",
1111 VMBUS_VERSION_MAJOR(sc->vmbus_version),
1112 VMBUS_VERSION_MINOR(sc->vmbus_version));
1113 return sysctl_handle_string(oidp, verstr, sizeof(verstr), req);
1117 * We need the function to make sure the MMIO resource is allocated from the
1118 * ranges found in _CRS.
1120 * For the release function, we can use bus_generic_release_resource().
1122 static struct resource *
1123 vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid,
1124 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1126 device_t parent = device_get_parent(dev);
1127 struct resource *res;
1130 if (type == SYS_RES_MEMORY) {
1131 struct vmbus_softc *sc = device_get_softc(dev);
1133 res = pcib_host_res_alloc(&sc->vmbus_mmio_res, child, type,
1134 rid, start, end, count, flags);
1138 res = BUS_ALLOC_RESOURCE(parent, child, type, rid, start,
1146 vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs)
1149 return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount,
1154 vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs)
1157 return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs));
1161 vmbus_alloc_msix(device_t bus, device_t dev, int *irq)
1164 return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq));
1168 vmbus_release_msix(device_t bus, device_t dev, int irq)
1171 return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq));
1175 vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr,
1179 return (PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data));
1183 vmbus_get_version_method(device_t bus, device_t dev)
1185 struct vmbus_softc *sc = device_get_softc(bus);
1187 return sc->vmbus_version;
1191 vmbus_probe_guid_method(device_t bus, device_t dev,
1192 const struct hyperv_guid *guid)
1194 const struct vmbus_channel *chan = vmbus_get_channel(dev);
1196 if (memcmp(&chan->ch_guid_type, guid, sizeof(struct hyperv_guid)) == 0)
1202 vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu)
1204 const struct vmbus_softc *sc = device_get_softc(bus);
1206 return (VMBUS_PCPU_GET(sc, vcpuid, cpu));
1209 static struct taskqueue *
1210 vmbus_get_eventtq_method(device_t bus, device_t dev __unused, int cpu)
1212 const struct vmbus_softc *sc = device_get_softc(bus);
1214 KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu%d", cpu));
1215 return (VMBUS_PCPU_GET(sc, event_tq, cpu));
1219 #define VTPM_BASE_ADDR 0xfed40000
1220 #define FOUR_GB (1ULL << 32)
1222 enum parse_pass { parse_64, parse_32 };
1224 struct parse_context {
1226 enum parse_pass pass;
1230 parse_crs(ACPI_RESOURCE *res, void *ctx)
1232 const struct parse_context *pc = ctx;
1233 device_t vmbus_dev = pc->vmbus_dev;
1235 struct vmbus_softc *sc = device_get_softc(vmbus_dev);
1238 switch (res->Type) {
1239 case ACPI_RESOURCE_TYPE_ADDRESS32:
1240 start = res->Data.Address32.Address.Minimum;
1241 end = res->Data.Address32.Address.Maximum;
1244 case ACPI_RESOURCE_TYPE_ADDRESS64:
1245 start = res->Data.Address64.Address.Minimum;
1246 end = res->Data.Address64.Address.Maximum;
1255 * We don't use <1MB addresses.
1260 /* Don't conflict with vTPM. */
1261 if (end >= VTPM_BASE_ADDR && start < VTPM_BASE_ADDR)
1262 end = VTPM_BASE_ADDR - 1;
1264 if ((pc->pass == parse_32 && start < FOUR_GB) ||
1265 (pc->pass == parse_64 && start >= FOUR_GB))
1266 pcib_host_res_decodes(&sc->vmbus_mmio_res, SYS_RES_MEMORY,
1273 vmbus_get_crs(device_t dev, device_t vmbus_dev, enum parse_pass pass)
1275 struct parse_context pc;
1279 device_printf(dev, "walking _CRS, pass=%d\n", pass);
1281 pc.vmbus_dev = vmbus_dev;
1283 status = AcpiWalkResources(acpi_get_handle(dev), "_CRS",
1286 if (bootverbose && ACPI_FAILURE(status))
1287 device_printf(dev, "_CRS: not found, pass=%d\n", pass);
1291 vmbus_get_mmio_res_pass(device_t dev, enum parse_pass pass)
1293 device_t acpi0, parent;
1295 parent = device_get_parent(dev);
1297 acpi0 = device_get_parent(parent);
1298 if (strcmp("acpi0", device_get_nameunit(acpi0)) == 0) {
1303 * Try to locate VMBUS resources and find _CRS on them.
1305 if (device_get_children(acpi0, &children, &count) == 0) {
1308 for (i = 0; i < count; ++i) {
1309 if (!device_is_attached(children[i]))
1312 if (strcmp("vmbus_res",
1313 device_get_name(children[i])) == 0)
1314 vmbus_get_crs(children[i], dev, pass);
1316 free(children, M_TEMP);
1320 * Try to find _CRS on acpi.
1322 vmbus_get_crs(acpi0, dev, pass);
1324 device_printf(dev, "not grandchild of acpi\n");
1328 * Try to find _CRS on parent.
1330 vmbus_get_crs(parent, dev, pass);
1334 vmbus_get_mmio_res(device_t dev)
1336 struct vmbus_softc *sc = device_get_softc(dev);
1338 * We walk the resources twice to make sure that: in the resource
1339 * list, the 32-bit resources appear behind the 64-bit resources.
1340 * NB: resource_list_add() uses INSERT_TAIL. This way, when we
1341 * iterate through the list to find a range for a 64-bit BAR in
1342 * vmbus_alloc_resource(), we can make sure we try to use >4GB
1345 pcib_host_res_init(dev, &sc->vmbus_mmio_res);
1347 vmbus_get_mmio_res_pass(dev, parse_64);
1348 vmbus_get_mmio_res_pass(dev, parse_32);
1352 * On Gen2 VMs, Hyper-V provides mmio space for framebuffer.
1353 * This mmio address range is not useable for other PCI devices.
1354 * Currently only efifb and vbefb drivers are using this range without
1355 * reserving it from system.
1356 * Therefore, vmbus driver reserves it before any other PCI device
1357 * drivers start to request mmio addresses.
1359 static struct resource *hv_fb_res;
1362 vmbus_fb_mmio_res(device_t dev)
1364 struct efi_fb *efifb;
1365 struct vbe_fb *vbefb;
1366 rman_res_t fb_start, fb_end, fb_count;
1367 int fb_height, fb_width;
1370 struct vmbus_softc *sc = device_get_softc(dev);
1373 kmdp = preload_search_by_type("elf kernel");
1375 kmdp = preload_search_by_type("elf64 kernel");
1376 efifb = (struct efi_fb *)preload_search_info(kmdp,
1377 MODINFO_METADATA | MODINFOMD_EFI_FB);
1378 vbefb = (struct vbe_fb *)preload_search_info(kmdp,
1379 MODINFO_METADATA | MODINFOMD_VBE_FB);
1380 if (efifb != NULL) {
1381 fb_start = efifb->fb_addr;
1382 fb_end = efifb->fb_addr + efifb->fb_size;
1383 fb_count = efifb->fb_size;
1384 fb_height = efifb->fb_height;
1385 fb_width = efifb->fb_width;
1386 } else if (vbefb != NULL) {
1387 fb_start = vbefb->fb_addr;
1388 fb_end = vbefb->fb_addr + vbefb->fb_size;
1389 fb_count = vbefb->fb_size;
1390 fb_height = vbefb->fb_height;
1391 fb_width = vbefb->fb_width;
1395 "no preloaded kernel fb information\n");
1396 /* We are on Gen1 VM, just return. */
1402 "fb: fb_addr: %#jx, size: %#jx, "
1403 "actual size needed: 0x%x\n",
1404 fb_start, fb_count, fb_height * fb_width);
1406 hv_fb_res = pcib_host_res_alloc(&sc->vmbus_mmio_res, dev,
1407 SYS_RES_MEMORY, &rid, fb_start, fb_end, fb_count,
1408 RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE));
1410 if (hv_fb_res && bootverbose)
1412 "successfully reserved memory for framebuffer "
1413 "starting at %#jx, size %#jx\n",
1414 fb_start, fb_count);
1418 vmbus_free_mmio_res(device_t dev)
1420 struct vmbus_softc *sc = device_get_softc(dev);
1422 pcib_host_res_free(dev, &sc->vmbus_mmio_res);
1427 #endif /* NEW_PCIB */
1430 vmbus_identify(driver_t *driver, device_t parent)
1433 if (device_get_unit(parent) != 0 || vm_guest != VM_GUEST_HV ||
1434 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0)
1436 device_add_child(parent, "vmbus", -1);
1440 vmbus_probe(device_t dev)
1443 if (device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV ||
1444 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0)
1447 device_set_desc(dev, "Hyper-V Vmbus");
1448 return (BUS_PROBE_DEFAULT);
1452 * @brief Main vmbus driver initialization routine.
1455 * - initialize the vmbus driver context
1456 * - setup various driver entry points
1457 * - invoke the vmbus hv main init routine
1458 * - get the irq resource
1459 * - invoke the vmbus to add the vmbus root device
1460 * - setup the vmbus root device
1461 * - retrieve the channel offers
1464 vmbus_doattach(struct vmbus_softc *sc)
1466 struct sysctl_oid_list *child;
1467 struct sysctl_ctx_list *ctx;
1470 if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED)
1474 vmbus_get_mmio_res(sc->vmbus_dev);
1475 vmbus_fb_mmio_res(sc->vmbus_dev);
1478 sc->vmbus_flags |= VMBUS_FLAG_ATTACHED;
1480 sc->vmbus_gpadl = VMBUS_GPADL_START;
1481 mtx_init(&sc->vmbus_prichan_lock, "vmbus prichan", NULL, MTX_DEF);
1482 TAILQ_INIT(&sc->vmbus_prichans);
1483 mtx_init(&sc->vmbus_chan_lock, "vmbus channel", NULL, MTX_DEF);
1484 TAILQ_INIT(&sc->vmbus_chans);
1485 sc->vmbus_chmap = malloc(
1486 sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX, M_DEVBUF,
1490 * Create context for "post message" Hypercalls
1492 sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev),
1493 HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE,
1494 sizeof(struct vmbus_msghc));
1495 if (sc->vmbus_xc == NULL) {
1501 * Allocate DMA stuffs.
1503 ret = vmbus_dma_alloc(sc);
1510 ret = vmbus_intr_setup(sc);
1518 device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started);
1519 smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc);
1520 sc->vmbus_flags |= VMBUS_FLAG_SYNIC;
1523 * Initialize vmbus, e.g. connect to Hypervisor.
1525 ret = vmbus_init(sc);
1529 if (sc->vmbus_version == VMBUS_VERSION_WS2008 ||
1530 sc->vmbus_version == VMBUS_VERSION_WIN7)
1531 sc->vmbus_event_proc = vmbus_event_proc_compat;
1533 sc->vmbus_event_proc = vmbus_event_proc;
1535 ret = vmbus_scan(sc);
1539 ctx = device_get_sysctl_ctx(sc->vmbus_dev);
1540 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vmbus_dev));
1541 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "version",
1542 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
1543 vmbus_sysctl_version, "A", "vmbus version");
1548 vmbus_scan_teardown(sc);
1549 vmbus_intr_teardown(sc);
1551 if (sc->vmbus_xc != NULL) {
1552 vmbus_xact_ctx_destroy(sc->vmbus_xc);
1553 sc->vmbus_xc = NULL;
1555 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF);
1556 mtx_destroy(&sc->vmbus_prichan_lock);
1557 mtx_destroy(&sc->vmbus_chan_lock);
1563 vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused)
1567 #ifdef EARLY_AP_STARTUP
1570 vmbus_intrhook(void *xsc)
1572 struct vmbus_softc *sc = xsc;
1575 device_printf(sc->vmbus_dev, "intrhook\n");
1577 config_intrhook_disestablish(&sc->vmbus_intrhook);
1580 #endif /* EARLY_AP_STARTUP */
1583 vmbus_attach(device_t dev)
1585 vmbus_sc = device_get_softc(dev);
1586 vmbus_sc->vmbus_dev = dev;
1587 vmbus_sc->vmbus_idtvec = -1;
1590 * Event processing logic will be configured:
1591 * - After the vmbus protocol version negotiation.
1592 * - Before we request channel offers.
1594 vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy;
1596 #ifdef EARLY_AP_STARTUP
1598 * Defer the real attach until the pause(9) works as expected.
1600 vmbus_sc->vmbus_intrhook.ich_func = vmbus_intrhook;
1601 vmbus_sc->vmbus_intrhook.ich_arg = vmbus_sc;
1602 config_intrhook_establish(&vmbus_sc->vmbus_intrhook);
1603 #else /* !EARLY_AP_STARTUP */
1605 * If the system has already booted and thread
1606 * scheduling is possible indicated by the global
1607 * cold set to zero, we just call the driver
1608 * initialization directly.
1611 vmbus_doattach(vmbus_sc);
1612 #endif /* EARLY_AP_STARTUP */
1618 vmbus_detach(device_t dev)
1620 struct vmbus_softc *sc = device_get_softc(dev);
1622 bus_generic_detach(dev);
1623 vmbus_chan_destroy_all(sc);
1625 vmbus_scan_teardown(sc);
1627 vmbus_disconnect(sc);
1629 if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) {
1630 sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC;
1631 smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL);
1634 vmbus_intr_teardown(sc);
1637 if (sc->vmbus_xc != NULL) {
1638 vmbus_xact_ctx_destroy(sc->vmbus_xc);
1639 sc->vmbus_xc = NULL;
1642 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF);
1643 mtx_destroy(&sc->vmbus_prichan_lock);
1644 mtx_destroy(&sc->vmbus_chan_lock);
1647 vmbus_free_mmio_res(dev);
1653 #ifndef EARLY_AP_STARTUP
1656 vmbus_sysinit(void *arg __unused)
1658 struct vmbus_softc *sc = vmbus_get_softc();
1660 if (vm_guest != VM_GUEST_HV || sc == NULL)
1664 * If the system has already booted and thread
1665 * scheduling is possible, as indicated by the
1666 * global cold set to zero, we just call the driver
1667 * initialization directly.
1674 * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is
1677 SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL);
1679 #endif /* !EARLY_AP_STARTUP */