2 * Copyright (c) 2016-2017 Microsoft Corp.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
42 #include <sys/sysctl.h>
45 #include <sys/mutex.h>
46 #include <sys/errno.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_kern.h>
53 #include <machine/atomic.h>
54 #include <machine/bus.h>
55 #include <machine/frame.h>
56 #include <machine/pci_cfgreg.h>
57 #include <machine/resource.h>
59 #include <sys/pciio.h>
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pci_private.h>
63 #include <dev/pci/pcib_private.h>
66 #include <machine/intr_machdep.h>
67 #include <x86/apicreg.h>
69 #include <dev/hyperv/include/hyperv.h>
70 #include <dev/hyperv/include/hyperv_busdma.h>
71 #include <dev/hyperv/include/vmbus_xact.h>
72 #include <dev/hyperv/vmbus/vmbus_reg.h>
73 #include <dev/hyperv/vmbus/vmbus_chanvar.h>
77 #if __FreeBSD_version < 1100000
78 typedef u_long rman_res_t;
79 #define RM_MAX_END (~(rman_res_t)0)
88 init_completion(struct completion *c)
90 memset(c, 0, sizeof(*c));
91 mtx_init(&c->lock, "hvcmpl", NULL, MTX_DEF);
96 free_completion(struct completion *c)
98 mtx_destroy(&c->lock);
102 complete(struct completion *c)
106 mtx_unlock(&c->lock);
111 wait_for_completion(struct completion *c)
115 mtx_sleep(c, &c->lock, 0, "hvwfc", 0);
117 mtx_unlock(&c->lock);
120 #define PCI_MAKE_VERSION(major, minor) ((uint32_t)(((major) << 16) | (major)))
123 PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),
124 PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
127 #define PCI_CONFIG_MMIO_LENGTH 0x2000
128 #define CFG_PAGE_OFFSET 0x1000
129 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
135 enum pci_message_type {
139 PCI_MESSAGE_BASE = 0x42490000,
140 PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0,
141 PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1,
142 PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4,
143 PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
144 PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6,
145 PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7,
146 PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8,
147 PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9,
148 PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA,
149 PCI_EJECT = PCI_MESSAGE_BASE + 0xB,
150 PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC,
151 PCI_REENABLE = PCI_MESSAGE_BASE + 0xD,
152 PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE,
153 PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF,
154 PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10,
155 PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11,
156 PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12,
157 PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
158 PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
159 PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
164 * Structures defining the virtual PCI Express protocol.
169 uint16_t minor_version;
170 uint16_t major_version;
176 * This representation is the one used in Windows, which is
177 * what is expected when sending this back and forth with
178 * the Hyper-V parent partition.
180 union win_slot_encoding {
184 uint32_t reserved:24;
189 struct pci_func_desc {
190 uint16_t v_id; /* vendor ID */
191 uint16_t d_id; /* device ID */
196 uint32_t subsystem_id;
197 union win_slot_encoding wslot;
198 uint32_t ser; /* serial number */
203 uint8_t delivery_mode;
204 uint16_t vector_count;
209 struct tran_int_desc {
211 uint16_t vector_count;
220 struct pci_child_message {
221 struct pci_message message_type;
222 union win_slot_encoding wslot;
225 struct pci_incoming_message {
226 struct vmbus_chanpkt_hdr hdr;
227 struct pci_message message_type;
230 struct pci_response {
231 struct vmbus_chanpkt_hdr hdr;
232 int32_t status; /* negative values are failures */
236 void (*completion_func)(void *context, struct pci_response *resp,
237 int resp_packet_size);
240 struct pci_message message[0];
244 * Specific message types supporting the PCI protocol.
247 struct pci_version_request {
248 struct pci_message message_type;
249 uint32_t protocol_version;
250 uint32_t is_last_attempt:1;
251 uint32_t reservedz:31;
254 struct pci_bus_d0_entry {
255 struct pci_message message_type;
260 struct pci_bus_relations {
261 struct pci_incoming_message incoming;
262 uint32_t device_count;
263 struct pci_func_desc func[0];
266 #define MAX_NUM_BARS (PCIR_MAX_BAR_0 + 1)
267 struct pci_q_res_req_response {
268 struct vmbus_chanpkt_hdr hdr;
269 int32_t status; /* negative values are failures */
270 uint32_t probed_bar[MAX_NUM_BARS];
273 struct pci_resources_assigned {
274 struct pci_message message_type;
275 union win_slot_encoding wslot;
276 uint8_t memory_range[0x14][MAX_NUM_BARS]; /* unused here */
277 uint32_t msi_descriptors;
278 uint32_t reserved[4];
281 struct pci_create_interrupt {
282 struct pci_message message_type;
283 union win_slot_encoding wslot;
284 struct hv_msi_desc int_desc;
287 struct pci_create_int_response {
288 struct pci_response response;
290 struct tran_int_desc int_desc;
293 struct pci_delete_interrupt {
294 struct pci_message message_type;
295 union win_slot_encoding wslot;
296 struct tran_int_desc int_desc;
299 struct pci_dev_incoming {
300 struct pci_incoming_message incoming;
301 union win_slot_encoding wslot;
304 struct pci_eject_response {
305 struct pci_message message_type;
306 union win_slot_encoding wslot;
311 * Driver specific state.
314 enum hv_pcibus_state {
322 struct vmbus_pcib_softc *sc;
326 enum hv_pcibus_state state;
328 struct resource *cfg_res;
330 struct completion query_completion, *query_comp;
332 struct mtx config_lock; /* Avoid two threads writing index page */
333 struct mtx device_list_lock; /* Protect lists below */
334 TAILQ_HEAD(, hv_pci_dev) children;
335 TAILQ_HEAD(, hv_dr_state) dr_list;
337 volatile int detaching;
341 TAILQ_ENTRY(hv_pci_dev) link;
343 struct pci_func_desc desc;
345 bool reported_missing;
347 struct hv_pcibus *hbus;
348 struct task eject_task;
350 TAILQ_HEAD(, hv_irq_desc) irq_desc_list;
353 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
354 * read it back, for each of the BAR offsets within config space.
356 uint32_t probed_bar[MAX_NUM_BARS];
360 * Tracks "Device Relations" messages from the host, which must be both
361 * processed in order.
365 struct hv_pcibus *bus;
369 TAILQ_ENTRY(hv_dr_state) link;
370 uint32_t device_count;
371 struct pci_func_desc func[0];
375 TAILQ_ENTRY(hv_irq_desc) link;
376 struct tran_int_desc desc;
380 #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
381 #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
382 #define PCI_FUNC(devfn) ((devfn) & 0x07)
385 devfn_to_wslot(unsigned int devfn)
387 union win_slot_encoding wslot;
390 wslot.bits.slot = PCI_SLOT(devfn);
391 wslot.bits.func = PCI_FUNC(devfn);
397 wslot_to_devfn(uint32_t wslot)
399 union win_slot_encoding encoding;
403 encoding.val = wslot;
405 slot = encoding.bits.slot;
406 func = encoding.bits.func;
408 return (PCI_DEVFN(slot, func));
411 struct vmbus_pcib_softc {
412 struct vmbus_channel *chan;
415 struct taskqueue *taskq;
417 struct hv_pcibus *hbus;
420 /* {44C4F61D-4444-4400-9D52-802E27EDE19F} */
421 static const struct hyperv_guid g_pass_through_dev_type = {
422 .hv_guid = {0x1D, 0xF6, 0xC4, 0x44, 0x44, 0x44, 0x00, 0x44,
423 0x9D, 0x52, 0x80, 0x2E, 0x27, 0xED, 0xE1, 0x9F}
426 struct hv_pci_compl {
427 struct completion host_event;
428 int32_t completion_status;
431 struct q_res_req_compl {
432 struct completion host_event;
433 struct hv_pci_dev *hpdev;
436 struct compose_comp_ctxt {
437 struct hv_pci_compl comp_pkt;
438 struct tran_int_desc int_desc;
442 hv_pci_generic_compl(void *context, struct pci_response *resp,
443 int resp_packet_size)
445 struct hv_pci_compl *comp_pkt = context;
447 if (resp_packet_size >= sizeof(struct pci_response))
448 comp_pkt->completion_status = resp->status;
450 comp_pkt->completion_status = -1;
452 complete(&comp_pkt->host_event);
456 q_resource_requirements(void *context, struct pci_response *resp,
457 int resp_packet_size)
459 struct q_res_req_compl *completion = context;
460 struct pci_q_res_req_response *q_res_req =
461 (struct pci_q_res_req_response *)resp;
464 if (resp->status < 0) {
465 printf("vmbus_pcib: failed to query resource requirements\n");
467 for (i = 0; i < MAX_NUM_BARS; i++)
468 completion->hpdev->probed_bar[i] =
469 q_res_req->probed_bar[i];
472 complete(&completion->host_event);
476 hv_pci_compose_compl(void *context, struct pci_response *resp,
477 int resp_packet_size)
479 struct compose_comp_ctxt *comp_pkt = context;
480 struct pci_create_int_response *int_resp =
481 (struct pci_create_int_response *)resp;
483 comp_pkt->comp_pkt.completion_status = resp->status;
484 comp_pkt->int_desc = int_resp->int_desc;
485 complete(&comp_pkt->comp_pkt.host_event);
489 hv_int_desc_free(struct hv_pci_dev *hpdev, struct hv_irq_desc *hid)
491 struct pci_delete_interrupt *int_pkt;
493 struct pci_packet pkt;
494 uint8_t buffer[sizeof(struct pci_delete_interrupt)];
497 memset(&ctxt, 0, sizeof(ctxt));
498 int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
499 int_pkt->message_type.type = PCI_DELETE_INTERRUPT_MESSAGE;
500 int_pkt->wslot.val = hpdev->desc.wslot.val;
501 int_pkt->int_desc = hid->desc;
503 vmbus_chan_send(hpdev->hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0,
504 int_pkt, sizeof(*int_pkt), 0);
510 hv_pci_delete_device(struct hv_pci_dev *hpdev)
512 struct hv_pcibus *hbus = hpdev->hbus;
513 struct hv_irq_desc *hid, *tmp_hid;
517 devfn = wslot_to_devfn(hpdev->desc.wslot.val);
521 pci_dev = pci_find_dbsf(hbus->pci_domain,
522 0, PCI_SLOT(devfn), PCI_FUNC(devfn));
524 device_delete_child(hbus->pci_bus, pci_dev);
528 mtx_lock(&hbus->device_list_lock);
529 TAILQ_REMOVE(&hbus->children, hpdev, link);
530 mtx_unlock(&hbus->device_list_lock);
532 TAILQ_FOREACH_SAFE(hid, &hpdev->irq_desc_list, link, tmp_hid)
533 hv_int_desc_free(hpdev, hid);
535 free(hpdev, M_DEVBUF);
538 static struct hv_pci_dev *
539 new_pcichild_device(struct hv_pcibus *hbus, struct pci_func_desc *desc)
541 struct hv_pci_dev *hpdev;
542 struct pci_child_message *res_req;
543 struct q_res_req_compl comp_pkt;
545 struct pci_packet pkt;
546 uint8_t buffer[sizeof(struct pci_child_message)];
550 hpdev = malloc(sizeof(*hpdev), M_DEVBUF, M_WAITOK | M_ZERO);
553 TAILQ_INIT(&hpdev->irq_desc_list);
555 init_completion(&comp_pkt.host_event);
556 comp_pkt.hpdev = hpdev;
558 ctxt.pkt.compl_ctxt = &comp_pkt;
559 ctxt.pkt.completion_func = q_resource_requirements;
561 res_req = (struct pci_child_message *)&ctxt.pkt.message;
562 res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
563 res_req->wslot.val = desc->wslot.val;
565 ret = vmbus_chan_send(hbus->sc->chan,
566 VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
567 res_req, sizeof(*res_req), (uint64_t)(uintptr_t)&ctxt.pkt);
571 wait_for_completion(&comp_pkt.host_event);
572 free_completion(&comp_pkt.host_event);
576 mtx_lock(&hbus->device_list_lock);
577 if (TAILQ_EMPTY(&hbus->children))
578 hbus->pci_domain = desc->ser & 0xFFFF;
579 TAILQ_INSERT_TAIL(&hbus->children, hpdev, link);
580 mtx_unlock(&hbus->device_list_lock);
583 free_completion(&comp_pkt.host_event);
584 free(hpdev, M_DEVBUF);
588 #if __FreeBSD_version < 1100000
590 /* Old versions don't have BUS_RESCAN(). Let's copy it from FreeBSD 11. */
592 static struct pci_devinfo *
593 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
594 int slot, int func, size_t dinfo_size)
596 struct pci_devinfo *dinfo;
598 dinfo = pci_read_device(pcib, domain, busno, slot, func, dinfo_size);
600 pci_add_child(dev, dinfo);
606 pci_rescan(device_t dev)
608 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
609 device_t pcib = device_get_parent(dev);
610 struct pci_softc *sc;
611 device_t child, *devlist, *unchanged;
612 int devcount, error, i, j, maxslots, oldcount;
613 int busno, domain, s, f, pcifunchigh;
616 /* No need to check for ARI on a rescan. */
617 error = device_get_children(dev, &devlist, &devcount);
621 unchanged = malloc(devcount * sizeof(device_t), M_TEMP,
623 if (unchanged == NULL) {
624 free(devlist, M_TEMP);
630 sc = device_get_softc(dev);
631 domain = pcib_get_domain(dev);
632 busno = pcib_get_bus(dev);
633 maxslots = PCIB_MAXSLOTS(pcib);
634 for (s = 0; s <= maxslots; s++) {
635 /* If function 0 is not present, skip to the next slot. */
637 if (REG(PCIR_VENDOR, 2) == 0xffff)
640 hdrtype = REG(PCIR_HDRTYPE, 1);
641 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
643 if (hdrtype & PCIM_MFDEV)
644 pcifunchigh = PCIB_MAXFUNCS(pcib);
645 for (f = 0; f <= pcifunchigh; f++) {
646 if (REG(PCIR_VENDOR, 2) == 0xffff)
650 * Found a valid function. Check if a
651 * device_t for this device already exists.
653 for (i = 0; i < devcount; i++) {
657 if (pci_get_slot(child) == s &&
658 pci_get_function(child) == f) {
659 unchanged[i] = child;
664 pci_identify_function(pcib, dev, domain, busno, s, f,
665 sizeof(struct pci_devinfo));
670 /* Remove devices that are no longer present. */
671 for (i = 0; i < devcount; i++) {
672 if (unchanged[i] != NULL)
674 device_delete_child(dev, devlist[i]);
677 free(devlist, M_TEMP);
680 /* Try to attach the devices just added. */
681 error = device_get_children(dev, &devlist, &devcount);
683 free(unchanged, M_TEMP);
687 for (i = 0; i < devcount; i++) {
688 for (j = 0; j < oldcount; j++) {
689 if (devlist[i] == unchanged[j])
693 device_probe_and_attach(devlist[i]);
697 free(unchanged, M_TEMP);
698 free(devlist, M_TEMP);
706 pci_rescan(device_t dev)
708 return (BUS_RESCAN(dev));
714 pci_devices_present_work(void *arg, int pending __unused)
716 struct hv_dr_work *dr_wrk = arg;
717 struct hv_dr_state *dr = NULL;
718 struct hv_pcibus *hbus;
721 struct pci_func_desc *new_desc;
722 struct hv_pci_dev *hpdev, *tmp_hpdev;
723 struct completion *query_comp;
724 bool need_rescan = false;
727 free(dr_wrk, M_DEVBUF);
729 /* Pull this off the queue and process it if it was the last one. */
730 mtx_lock(&hbus->device_list_lock);
731 while (!TAILQ_EMPTY(&hbus->dr_list)) {
732 dr = TAILQ_FIRST(&hbus->dr_list);
733 TAILQ_REMOVE(&hbus->dr_list, dr, link);
735 /* Throw this away if the list still has stuff in it. */
736 if (!TAILQ_EMPTY(&hbus->dr_list)) {
741 mtx_unlock(&hbus->device_list_lock);
746 /* First, mark all existing children as reported missing. */
747 mtx_lock(&hbus->device_list_lock);
748 TAILQ_FOREACH(hpdev, &hbus->children, link)
749 hpdev->reported_missing = true;
750 mtx_unlock(&hbus->device_list_lock);
752 /* Next, add back any reported devices. */
753 for (child_no = 0; child_no < dr->device_count; child_no++) {
755 new_desc = &dr->func[child_no];
757 mtx_lock(&hbus->device_list_lock);
758 TAILQ_FOREACH(hpdev, &hbus->children, link) {
759 if ((hpdev->desc.wslot.val ==
760 new_desc->wslot.val) &&
761 (hpdev->desc.v_id == new_desc->v_id) &&
762 (hpdev->desc.d_id == new_desc->d_id) &&
763 (hpdev->desc.ser == new_desc->ser)) {
764 hpdev->reported_missing = false;
769 mtx_unlock(&hbus->device_list_lock);
775 hpdev = new_pcichild_device(hbus, new_desc);
777 printf("vmbus_pcib: failed to add a child\n");
781 /* Remove missing device(s), if any */
782 TAILQ_FOREACH_SAFE(hpdev, &hbus->children, link, tmp_hpdev) {
783 if (hpdev->reported_missing)
784 hv_pci_delete_device(hpdev);
787 /* Rescan the bus to find any new device, if necessary. */
788 if (hbus->state == hv_pcibus_installed && need_rescan)
789 pci_rescan(hbus->pci_bus);
791 /* Wake up hv_pci_query_relations(), if it's waiting. */
792 query_comp = hbus->query_comp;
794 hbus->query_comp = NULL;
795 complete(query_comp);
801 static struct hv_pci_dev *
802 get_pcichild_wslot(struct hv_pcibus *hbus, uint32_t wslot)
804 struct hv_pci_dev *hpdev, *ret = NULL;
806 mtx_lock(&hbus->device_list_lock);
807 TAILQ_FOREACH(hpdev, &hbus->children, link) {
808 if (hpdev->desc.wslot.val == wslot) {
813 mtx_unlock(&hbus->device_list_lock);
819 hv_pci_devices_present(struct hv_pcibus *hbus,
820 struct pci_bus_relations *relations)
822 struct hv_dr_state *dr;
823 struct hv_dr_work *dr_wrk;
824 unsigned long dr_size;
826 if (hbus->detaching && relations->device_count > 0)
829 dr_size = offsetof(struct hv_dr_state, func) +
830 (sizeof(struct pci_func_desc) * relations->device_count);
831 dr = malloc(dr_size, M_DEVBUF, M_WAITOK | M_ZERO);
833 dr->device_count = relations->device_count;
834 if (dr->device_count != 0)
835 memcpy(dr->func, relations->func,
836 sizeof(struct pci_func_desc) * dr->device_count);
838 mtx_lock(&hbus->device_list_lock);
839 TAILQ_INSERT_TAIL(&hbus->dr_list, dr, link);
840 mtx_unlock(&hbus->device_list_lock);
842 dr_wrk = malloc(sizeof(*dr_wrk), M_DEVBUF, M_WAITOK | M_ZERO);
844 TASK_INIT(&dr_wrk->task, 0, pci_devices_present_work, dr_wrk);
845 taskqueue_enqueue(hbus->sc->taskq, &dr_wrk->task);
849 hv_eject_device_work(void *arg, int pending __unused)
851 struct hv_pci_dev *hpdev = arg;
852 union win_slot_encoding wslot = hpdev->desc.wslot;
853 struct hv_pcibus *hbus = hpdev->hbus;
854 struct pci_eject_response *eject_pkt;
856 struct pci_packet pkt;
857 uint8_t buffer[sizeof(struct pci_eject_response)];
860 hv_pci_delete_device(hpdev);
862 memset(&ctxt, 0, sizeof(ctxt));
863 eject_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
864 eject_pkt->message_type.type = PCI_EJECTION_COMPLETE;
865 eject_pkt->wslot.val = wslot.val;
866 vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0,
867 eject_pkt, sizeof(*eject_pkt), 0);
871 hv_pci_eject_device(struct hv_pci_dev *hpdev)
873 struct hv_pcibus *hbus = hpdev->hbus;
874 struct taskqueue *taskq;
880 * Push this task into the same taskqueue on which
881 * vmbus_pcib_attach() runs, so we're sure this task can't run
882 * concurrently with vmbus_pcib_attach().
884 TASK_INIT(&hpdev->eject_task, 0, hv_eject_device_work, hpdev);
885 taskq = vmbus_chan_mgmt_tq(hbus->sc->chan);
886 taskqueue_enqueue(taskq, &hpdev->eject_task);
889 #define PCIB_PACKET_SIZE 0x100
892 vmbus_pcib_on_channel_callback(struct vmbus_channel *chan, void *arg)
894 struct vmbus_pcib_softc *sc = arg;
895 struct hv_pcibus *hbus = sc->hbus;
898 int bufferlen = PCIB_PACKET_SIZE;
900 struct pci_packet *comp_packet;
901 struct pci_response *response;
902 struct pci_incoming_message *new_msg;
903 struct pci_bus_relations *bus_rel;
904 struct pci_dev_incoming *dev_msg;
905 struct hv_pci_dev *hpdev;
909 struct vmbus_chanpkt_hdr *pkt = buffer;
913 bytes_rxed = bufferlen;
914 ret = vmbus_chan_recv_pkt(chan, pkt, &bytes_rxed);
916 if (ret == ENOBUFS) {
917 /* Handle large packet */
918 if (bufferlen > PCIB_PACKET_SIZE) {
919 free(buffer, M_DEVBUF);
923 /* alloc new buffer */
924 buffer = malloc(bytes_rxed, M_DEVBUF, M_WAITOK | M_ZERO);
925 bufferlen = bytes_rxed;
931 /* ignore EIO or EAGAIN */
935 if (bytes_rxed <= sizeof(struct pci_response))
938 switch (pkt->cph_type) {
939 case VMBUS_CHANPKT_TYPE_COMP:
941 (struct pci_packet *)(uintptr_t)pkt->cph_xactid;
942 response = (struct pci_response *)pkt;
943 comp_packet->completion_func(comp_packet->compl_ctxt,
944 response, bytes_rxed);
946 case VMBUS_CHANPKT_TYPE_INBAND:
947 new_msg = (struct pci_incoming_message *)buffer;
949 switch (new_msg->message_type.type) {
950 case PCI_BUS_RELATIONS:
951 bus_rel = (struct pci_bus_relations *)buffer;
953 if (bus_rel->device_count == 0)
957 offsetof(struct pci_bus_relations, func) +
958 (sizeof(struct pci_func_desc) *
959 (bus_rel->device_count)))
962 hv_pci_devices_present(hbus, bus_rel);
966 dev_msg = (struct pci_dev_incoming *)buffer;
967 hpdev = get_pcichild_wslot(hbus,
971 hv_pci_eject_device(hpdev);
975 printf("vmbus_pcib: Unknown msg type 0x%x\n",
976 new_msg->message_type.type);
981 printf("vmbus_pcib: Unknown VMBus msg type %hd\n",
987 if (bufferlen > PCIB_PACKET_SIZE)
988 free(buffer, M_DEVBUF);
992 hv_pci_protocol_negotiation(struct hv_pcibus *hbus)
994 struct pci_version_request *version_req;
995 struct hv_pci_compl comp_pkt;
997 struct pci_packet pkt;
998 uint8_t buffer[sizeof(struct pci_version_request)];
1002 init_completion(&comp_pkt.host_event);
1004 ctxt.pkt.completion_func = hv_pci_generic_compl;
1005 ctxt.pkt.compl_ctxt = &comp_pkt;
1006 version_req = (struct pci_version_request *)&ctxt.pkt.message;
1007 version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
1008 version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT;
1009 version_req->is_last_attempt = 1;
1011 ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND,
1012 VMBUS_CHANPKT_FLAG_RC, version_req, sizeof(*version_req),
1013 (uint64_t)(uintptr_t)&ctxt.pkt);
1017 wait_for_completion(&comp_pkt.host_event);
1019 if (comp_pkt.completion_status < 0) {
1020 device_printf(hbus->pcib,
1021 "vmbus_pcib version negotiation failed: %x\n",
1022 comp_pkt.completion_status);
1028 free_completion(&comp_pkt.host_event);
1032 /* Ask the host to send along the list of child devices */
1034 hv_pci_query_relations(struct hv_pcibus *hbus)
1036 struct pci_message message;
1039 message.type = PCI_QUERY_BUS_RELATIONS;
1040 ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0,
1041 &message, sizeof(message), 0);
1046 hv_pci_enter_d0(struct hv_pcibus *hbus)
1048 struct pci_bus_d0_entry *d0_entry;
1049 struct hv_pci_compl comp_pkt;
1051 struct pci_packet pkt;
1052 uint8_t buffer[sizeof(struct pci_bus_d0_entry)];
1057 * Tell the host that the bus is ready to use, and moved into the
1058 * powered-on state. This includes telling the host which region
1059 * of memory-mapped I/O space has been chosen for configuration space
1062 init_completion(&comp_pkt.host_event);
1064 ctxt.pkt.completion_func = hv_pci_generic_compl;
1065 ctxt.pkt.compl_ctxt = &comp_pkt;
1067 d0_entry = (struct pci_bus_d0_entry *)&ctxt.pkt.message;
1068 memset(d0_entry, 0, sizeof(*d0_entry));
1069 d0_entry->message_type.type = PCI_BUS_D0ENTRY;
1070 d0_entry->mmio_base = rman_get_start(hbus->cfg_res);
1072 ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND,
1073 VMBUS_CHANPKT_FLAG_RC, d0_entry, sizeof(*d0_entry),
1074 (uint64_t)(uintptr_t)&ctxt.pkt);
1078 wait_for_completion(&comp_pkt.host_event);
1080 if (comp_pkt.completion_status < 0) {
1081 device_printf(hbus->pcib, "vmbus_pcib failed to enable D0\n");
1088 free_completion(&comp_pkt.host_event);
1093 * It looks this is only needed by Windows VM, but let's send the message too
1094 * just to make the host happy.
1097 hv_send_resources_allocated(struct hv_pcibus *hbus)
1099 struct pci_resources_assigned *res_assigned;
1100 struct hv_pci_compl comp_pkt;
1101 struct hv_pci_dev *hpdev;
1102 struct pci_packet *pkt;
1106 pkt = malloc(sizeof(*pkt) + sizeof(*res_assigned),
1107 M_DEVBUF, M_WAITOK | M_ZERO);
1109 for (wslot = 0; wslot < 256; wslot++) {
1110 hpdev = get_pcichild_wslot(hbus, wslot);
1114 init_completion(&comp_pkt.host_event);
1116 memset(pkt, 0, sizeof(*pkt) + sizeof(*res_assigned));
1117 pkt->completion_func = hv_pci_generic_compl;
1118 pkt->compl_ctxt = &comp_pkt;
1120 res_assigned = (struct pci_resources_assigned *)&pkt->message;
1121 res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED;
1122 res_assigned->wslot.val = hpdev->desc.wslot.val;
1124 ret = vmbus_chan_send(hbus->sc->chan,
1125 VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
1126 &pkt->message, sizeof(*res_assigned),
1127 (uint64_t)(uintptr_t)pkt);
1129 free_completion(&comp_pkt.host_event);
1133 wait_for_completion(&comp_pkt.host_event);
1134 free_completion(&comp_pkt.host_event);
1136 if (comp_pkt.completion_status < 0) {
1138 device_printf(hbus->pcib,
1139 "failed to send PCI_RESOURCES_ASSIGNED\n");
1144 free(pkt, M_DEVBUF);
1149 hv_send_resources_released(struct hv_pcibus *hbus)
1151 struct pci_child_message pkt;
1152 struct hv_pci_dev *hpdev;
1156 for (wslot = 0; wslot < 256; wslot++) {
1157 hpdev = get_pcichild_wslot(hbus, wslot);
1161 pkt.message_type.type = PCI_RESOURCES_RELEASED;
1162 pkt.wslot.val = hpdev->desc.wslot.val;
1164 ret = vmbus_chan_send(hbus->sc->chan,
1165 VMBUS_CHANPKT_TYPE_INBAND, 0, &pkt, sizeof(pkt), 0);
1173 #define hv_cfg_read(x, s) \
1174 static inline uint##x##_t hv_cfg_read_##s(struct hv_pcibus *bus, \
1175 bus_size_t offset) \
1177 return (bus_read_##s(bus->cfg_res, offset)); \
1180 #define hv_cfg_write(x, s) \
1181 static inline void hv_cfg_write_##s(struct hv_pcibus *bus, \
1182 bus_size_t offset, uint##x##_t val) \
1184 return (bus_write_##s(bus->cfg_res, offset, val)); \
1196 _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, int size,
1199 struct hv_pcibus *hbus = hpdev->hbus;
1200 bus_size_t addr = CFG_PAGE_OFFSET + where;
1203 * If the attempt is to read the IDs or the ROM BAR, simulate that.
1205 if (where + size <= PCIR_COMMAND) {
1206 memcpy(val, ((uint8_t *)&hpdev->desc.v_id) + where, size);
1207 } else if (where >= PCIR_REVID && where + size <=
1209 memcpy(val, ((uint8_t *)&hpdev->desc.rev) + where -
1211 } else if (where >= PCIR_SUBVEND_0 && where + size <=
1213 memcpy(val, (uint8_t *)&hpdev->desc.subsystem_id + where -
1214 PCIR_SUBVEND_0, size);
1215 } else if (where >= PCIR_BIOS && where + size <=
1217 /* ROM BARs are unimplemented */
1219 } else if ((where >= PCIR_INTLINE && where + size <=
1220 PCIR_INTPIN) ||(where == PCIR_INTPIN && size == 1)) {
1222 * Interrupt Line and Interrupt PIN are hard-wired to zero
1223 * because this front-end only supports message-signaled
1227 } else if (where + size <= CFG_PAGE_SIZE) {
1228 mtx_lock(&hbus->config_lock);
1230 /* Choose the function to be read. */
1231 hv_cfg_write_4(hbus, 0, hpdev->desc.wslot.val);
1233 /* Make sure the function was chosen before we start reading.*/
1236 /* Read from that function's config space. */
1239 *((uint8_t *)val) = hv_cfg_read_1(hbus, addr);
1242 *((uint16_t *)val) = hv_cfg_read_2(hbus, addr);
1245 *((uint32_t *)val) = hv_cfg_read_4(hbus, addr);
1249 * Make sure the write was done before we release the lock,
1250 * allowing consecutive reads/writes.
1254 mtx_unlock(&hbus->config_lock);
1256 /* Invalid config read: it's unlikely to reach here. */
1257 memset(val, 0, size);
1262 _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, int size,
1265 struct hv_pcibus *hbus = hpdev->hbus;
1266 bus_size_t addr = CFG_PAGE_OFFSET + where;
1268 /* SSIDs and ROM BARs are read-only */
1269 if (where >= PCIR_SUBVEND_0 && where + size <= PCIR_CAP_PTR)
1272 if (where >= PCIR_COMMAND && where + size <= CFG_PAGE_SIZE) {
1273 mtx_lock(&hbus->config_lock);
1275 /* Choose the function to be written. */
1276 hv_cfg_write_4(hbus, 0, hpdev->desc.wslot.val);
1278 /* Make sure the function was chosen before we start writing.*/
1281 /* Write to that function's config space. */
1284 hv_cfg_write_1(hbus, addr, (uint8_t)val);
1287 hv_cfg_write_2(hbus, addr, (uint16_t)val);
1290 hv_cfg_write_4(hbus, addr, (uint32_t)val);
1295 * Make sure the write was done before we release the lock,
1296 * allowing consecutive reads/writes.
1300 mtx_unlock(&hbus->config_lock);
1302 /* Invalid config write: it's unlikely to reach here. */
1308 vmbus_pcib_set_detaching(void *arg, int pending __unused)
1310 struct hv_pcibus *hbus = arg;
1312 atomic_set_int(&hbus->detaching, 1);
1316 vmbus_pcib_pre_detach(struct hv_pcibus *hbus)
1320 TASK_INIT(&task, 0, vmbus_pcib_set_detaching, hbus);
1323 * Make sure the channel callback won't push any possible new
1324 * PCI_BUS_RELATIONS and PCI_EJECT tasks to sc->taskq.
1326 vmbus_chan_run_task(hbus->sc->chan, &task);
1328 taskqueue_drain_all(hbus->sc->taskq);
1333 * Standard probe entry point.
1337 vmbus_pcib_probe(device_t dev)
1339 if (VMBUS_PROBE_GUID(device_get_parent(dev), dev,
1340 &g_pass_through_dev_type) == 0) {
1341 device_set_desc(dev, "Hyper-V PCI Express Pass Through");
1342 return (BUS_PROBE_DEFAULT);
1348 * Standard attach entry point.
1352 vmbus_pcib_attach(device_t dev)
1354 const int pci_ring_size = (4 * PAGE_SIZE);
1355 const struct hyperv_guid *inst_guid;
1356 struct vmbus_channel *channel;
1357 struct vmbus_pcib_softc *sc;
1358 struct hv_pcibus *hbus;
1362 hbus = malloc(sizeof(*hbus), M_DEVBUF, M_WAITOK | M_ZERO);
1365 channel = vmbus_get_channel(dev);
1366 inst_guid = vmbus_chan_guid_inst(channel);
1367 hbus->pci_domain = inst_guid->hv_guid[9] |
1368 (inst_guid->hv_guid[8] << 8);
1370 mtx_init(&hbus->config_lock, "hbcfg", NULL, MTX_DEF);
1371 mtx_init(&hbus->device_list_lock, "hbdl", NULL, MTX_DEF);
1372 TAILQ_INIT(&hbus->children);
1373 TAILQ_INIT(&hbus->dr_list);
1375 hbus->cfg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1376 0, RM_MAX_END, PCI_CONFIG_MMIO_LENGTH,
1377 RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE));
1379 if (!hbus->cfg_res) {
1380 device_printf(dev, "failed to get resource for cfg window\n");
1385 sc = device_get_softc(dev);
1387 sc->rx_buf = malloc(PCIB_PACKET_SIZE, M_DEVBUF, M_WAITOK | M_ZERO);
1391 * The taskq is used to handle PCI_BUS_RELATIONS and PCI_EJECT
1392 * messages. NB: we can't handle the messages in the channel callback
1393 * directly, because the message handlers need to send new messages
1394 * to the host and waits for the host's completion messages, which
1395 * must also be handled by the channel callback.
1397 sc->taskq = taskqueue_create("vmbus_pcib_tq", M_WAITOK,
1398 taskqueue_thread_enqueue, &sc->taskq);
1399 taskqueue_start_threads(&sc->taskq, 1, PI_NET, "vmbus_pcib_tq");
1403 init_completion(&hbus->query_completion);
1404 hbus->query_comp = &hbus->query_completion;
1406 ret = vmbus_chan_open(sc->chan, pci_ring_size, pci_ring_size,
1407 NULL, 0, vmbus_pcib_on_channel_callback, sc);
1411 ret = hv_pci_protocol_negotiation(hbus);
1415 ret = hv_pci_query_relations(hbus);
1418 wait_for_completion(hbus->query_comp);
1420 ret = hv_pci_enter_d0(hbus);
1424 ret = hv_send_resources_allocated(hbus);
1428 hbus->pci_bus = device_add_child(dev, "pci", -1);
1429 if (!hbus->pci_bus) {
1430 device_printf(dev, "failed to create pci bus\n");
1435 bus_generic_attach(dev);
1437 hbus->state = hv_pcibus_installed;
1442 vmbus_pcib_pre_detach(hbus);
1443 vmbus_chan_close(sc->chan);
1445 taskqueue_free(sc->taskq);
1446 free_completion(&hbus->query_completion);
1447 free(sc->rx_buf, M_DEVBUF);
1448 bus_release_resource(dev, SYS_RES_MEMORY, 0, hbus->cfg_res);
1450 mtx_destroy(&hbus->device_list_lock);
1451 mtx_destroy(&hbus->config_lock);
1452 free(hbus, M_DEVBUF);
1457 * Standard detach entry point
1460 vmbus_pcib_detach(device_t dev)
1462 struct vmbus_pcib_softc *sc = device_get_softc(dev);
1463 struct hv_pcibus *hbus = sc->hbus;
1464 struct pci_message teardown_packet;
1465 struct pci_bus_relations relations;
1468 vmbus_pcib_pre_detach(hbus);
1470 if (hbus->state == hv_pcibus_installed)
1471 bus_generic_detach(dev);
1473 /* Delete any children which might still exist. */
1474 memset(&relations, 0, sizeof(relations));
1475 hv_pci_devices_present(hbus, &relations);
1477 ret = hv_send_resources_released(hbus);
1479 device_printf(dev, "failed to send PCI_RESOURCES_RELEASED\n");
1481 teardown_packet.type = PCI_BUS_D0EXIT;
1482 ret = vmbus_chan_send(sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0,
1483 &teardown_packet, sizeof(struct pci_message), 0);
1485 device_printf(dev, "failed to send PCI_BUS_D0EXIT\n");
1487 taskqueue_drain_all(hbus->sc->taskq);
1488 vmbus_chan_close(sc->chan);
1489 taskqueue_free(sc->taskq);
1491 free_completion(&hbus->query_completion);
1492 free(sc->rx_buf, M_DEVBUF);
1493 bus_release_resource(dev, SYS_RES_MEMORY, 0, hbus->cfg_res);
1495 mtx_destroy(&hbus->device_list_lock);
1496 mtx_destroy(&hbus->config_lock);
1497 free(hbus, M_DEVBUF);
1503 vmbus_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *val)
1505 struct vmbus_pcib_softc *sc = device_get_softc(dev);
1508 case PCIB_IVAR_DOMAIN:
1509 *val = sc->hbus->pci_domain;
1513 /* There is only bus 0. */
1521 vmbus_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t val)
1526 static struct resource *
1527 vmbus_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
1528 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1530 unsigned int bar_no;
1531 struct hv_pci_dev *hpdev;
1532 struct vmbus_pcib_softc *sc = device_get_softc(dev);
1533 struct resource *res;
1536 if (type == PCI_RES_BUS)
1537 return (pci_domain_alloc_bus(sc->hbus->pci_domain, child, rid,
1538 start, end, count, flags));
1540 /* Devices with port I/O BAR are not supported. */
1541 if (type == SYS_RES_IOPORT)
1544 if (type == SYS_RES_MEMORY) {
1545 devfn = PCI_DEVFN(pci_get_slot(child),
1546 pci_get_function(child));
1547 hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn));
1551 bar_no = PCI_RID2BAR(*rid);
1552 if (bar_no >= MAX_NUM_BARS)
1555 /* Make sure a 32-bit BAR gets a 32-bit address */
1556 if (!(hpdev->probed_bar[bar_no] & PCIM_BAR_MEM_64))
1557 end = ulmin(end, 0xFFFFFFFF);
1560 res = bus_generic_alloc_resource(dev, child, type, rid,
1561 start, end, count, flags);
1563 * If this is a request for a specific range, assume it is
1564 * correct and pass it up to the parent.
1566 if (res == NULL && start + count - 1 == end)
1567 res = bus_generic_alloc_resource(dev, child, type, rid,
1568 start, end, count, flags);
1573 vmbus_pcib_release_resource(device_t dev, device_t child, int type, int rid,
1576 struct vmbus_pcib_softc *sc = device_get_softc(dev);
1578 if (type == PCI_RES_BUS)
1579 return (pci_domain_release_bus(sc->hbus->pci_domain, child,
1582 if (type == SYS_RES_IOPORT)
1585 return (bus_generic_release_resource(dev, child, type, rid, r));
1588 #if __FreeBSD_version >= 1100000
1590 vmbus_pcib_get_cpus(device_t pcib, device_t dev, enum cpu_sets op,
1591 size_t setsize, cpuset_t *cpuset)
1593 return (bus_get_cpus(pcib, op, setsize, cpuset));
1598 vmbus_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func,
1599 u_int reg, int bytes)
1601 struct vmbus_pcib_softc *sc = device_get_softc(dev);
1602 struct hv_pci_dev *hpdev;
1603 unsigned int devfn = PCI_DEVFN(slot, func);
1606 KASSERT(bus == 0, ("bus should be 0, but is %u", bus));
1608 hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn));
1612 _hv_pcifront_read_config(hpdev, reg, bytes, &data);
1618 vmbus_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func,
1619 u_int reg, uint32_t data, int bytes)
1621 struct vmbus_pcib_softc *sc = device_get_softc(dev);
1622 struct hv_pci_dev *hpdev;
1623 unsigned int devfn = PCI_DEVFN(slot, func);
1625 KASSERT(bus == 0, ("bus should be 0, but is %u", bus));
1627 hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn));
1631 _hv_pcifront_write_config(hpdev, reg, bytes, data);
1635 vmbus_pcib_route_intr(device_t pcib, device_t dev, int pin)
1637 /* We only support MSI/MSI-X and don't support INTx interrupt. */
1638 return (PCI_INVALID_IRQ);
1642 vmbus_pcib_alloc_msi(device_t pcib, device_t dev, int count,
1643 int maxcount, int *irqs)
1645 return (PCIB_ALLOC_MSI(device_get_parent(pcib), dev, count, maxcount,
1650 vmbus_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs)
1652 return (PCIB_RELEASE_MSI(device_get_parent(pcib), dev, count, irqs));
1656 vmbus_pcib_alloc_msix(device_t pcib, device_t dev, int *irq)
1658 return (PCIB_ALLOC_MSIX(device_get_parent(pcib), dev, irq));
1662 vmbus_pcib_release_msix(device_t pcib, device_t dev, int irq)
1664 return (PCIB_RELEASE_MSIX(device_get_parent(pcib), dev, irq));
1667 #define MSI_INTEL_ADDR_DEST 0x000ff000
1668 #define MSI_INTEL_DATA_INTVEC IOART_INTVEC /* Interrupt vector. */
1669 #define MSI_INTEL_DATA_DELFIXED IOART_DELFIXED
1672 vmbus_pcib_map_msi(device_t pcib, device_t child, int irq,
1673 uint64_t *addr, uint32_t *data)
1676 struct hv_pci_dev *hpdev;
1680 struct hv_irq_desc *hid, *tmp_hid;
1681 unsigned int cpu, vcpu_id;
1682 unsigned int vector;
1684 struct vmbus_pcib_softc *sc = device_get_softc(pcib);
1685 struct pci_create_interrupt *int_pkt;
1686 struct compose_comp_ctxt comp;
1688 struct pci_packet pkt;
1689 uint8_t buffer[sizeof(struct pci_create_interrupt)];
1694 devfn = PCI_DEVFN(pci_get_slot(child), pci_get_function(child));
1695 hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn));
1699 ret = PCIB_MAP_MSI(device_get_parent(pcib), child, irq,
1704 TAILQ_FOREACH_SAFE(hid, &hpdev->irq_desc_list, link, tmp_hid) {
1705 if (hid->irq == irq) {
1706 TAILQ_REMOVE(&hpdev->irq_desc_list, hid, link);
1707 hv_int_desc_free(hpdev, hid);
1712 cpu = (v_addr & MSI_INTEL_ADDR_DEST) >> 12;
1713 vcpu_id = VMBUS_GET_VCPU_ID(device_get_parent(pcib), pcib, cpu);
1714 vector = v_data & MSI_INTEL_DATA_INTVEC;
1716 init_completion(&comp.comp_pkt.host_event);
1718 memset(&ctxt, 0, sizeof(ctxt));
1719 ctxt.pkt.completion_func = hv_pci_compose_compl;
1720 ctxt.pkt.compl_ctxt = ∁
1722 int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message;
1723 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1724 int_pkt->wslot.val = hpdev->desc.wslot.val;
1725 int_pkt->int_desc.vector = vector;
1726 int_pkt->int_desc.vector_count = 1;
1727 int_pkt->int_desc.delivery_mode = MSI_INTEL_DATA_DELFIXED;
1728 int_pkt->int_desc.cpu_mask = 1ULL << vcpu_id;
1730 ret = vmbus_chan_send(sc->chan, VMBUS_CHANPKT_TYPE_INBAND,
1731 VMBUS_CHANPKT_FLAG_RC, int_pkt, sizeof(*int_pkt),
1732 (uint64_t)(uintptr_t)&ctxt.pkt);
1734 free_completion(&comp.comp_pkt.host_event);
1738 wait_for_completion(&comp.comp_pkt.host_event);
1739 free_completion(&comp.comp_pkt.host_event);
1741 if (comp.comp_pkt.completion_status < 0)
1744 *addr = comp.int_desc.address;
1745 *data = comp.int_desc.data;
1747 hid = malloc(sizeof(struct hv_irq_desc), M_DEVBUF, M_WAITOK | M_ZERO);
1749 hid->desc = comp.int_desc;
1750 TAILQ_INSERT_TAIL(&hpdev->irq_desc_list, hid, link);
1755 static device_method_t vmbus_pcib_methods[] = {
1756 /* Device interface */
1757 DEVMETHOD(device_probe, vmbus_pcib_probe),
1758 DEVMETHOD(device_attach, vmbus_pcib_attach),
1759 DEVMETHOD(device_detach, vmbus_pcib_detach),
1760 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1761 DEVMETHOD(device_suspend, bus_generic_suspend),
1762 DEVMETHOD(device_resume, bus_generic_resume),
1765 DEVMETHOD(bus_read_ivar, vmbus_pcib_read_ivar),
1766 DEVMETHOD(bus_write_ivar, vmbus_pcib_write_ivar),
1767 DEVMETHOD(bus_alloc_resource, vmbus_pcib_alloc_resource),
1768 DEVMETHOD(bus_release_resource, vmbus_pcib_release_resource),
1769 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
1770 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
1771 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
1772 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
1773 #if __FreeBSD_version >= 1100000
1774 DEVMETHOD(bus_get_cpus, vmbus_pcib_get_cpus),
1777 /* pcib interface */
1778 DEVMETHOD(pcib_maxslots, pcib_maxslots),
1779 DEVMETHOD(pcib_read_config, vmbus_pcib_read_config),
1780 DEVMETHOD(pcib_write_config, vmbus_pcib_write_config),
1781 DEVMETHOD(pcib_route_interrupt, vmbus_pcib_route_intr),
1782 DEVMETHOD(pcib_alloc_msi, vmbus_pcib_alloc_msi),
1783 DEVMETHOD(pcib_release_msi, vmbus_pcib_release_msi),
1784 DEVMETHOD(pcib_alloc_msix, vmbus_pcib_alloc_msix),
1785 DEVMETHOD(pcib_release_msix, vmbus_pcib_release_msix),
1786 DEVMETHOD(pcib_map_msi, vmbus_pcib_map_msi),
1787 DEVMETHOD(pcib_request_feature, pcib_request_feature_allow),
1792 static devclass_t pcib_devclass;
1794 DEFINE_CLASS_0(pcib, vmbus_pcib_driver, vmbus_pcib_methods,
1795 sizeof(struct vmbus_pcib_softc));
1796 DRIVER_MODULE(vmbus_pcib, vmbus, vmbus_pcib_driver, pcib_devclass, 0, 0);
1797 MODULE_DEPEND(vmbus_pcib, vmbus, 1, 1, 1);
1798 MODULE_DEPEND(vmbus_pcib, pci, 1, 1, 1);
1800 #endif /* NEW_PCIB */