2 * Copyright (c) 2009-2012 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
35 #include "hv_vmbus_priv.h"
37 typedef void (*hv_pfn_channel_msg_handler)(hv_vmbus_channel_msg_header* msg);
39 typedef struct hv_vmbus_channel_msg_table_entry {
40 hv_vmbus_channel_msg_type messageType;
41 hv_pfn_channel_msg_handler messageHandler;
42 } hv_vmbus_channel_msg_table_entry;
48 static void vmbus_channel_on_offer(hv_vmbus_channel_msg_header* hdr);
49 static void vmbus_channel_on_open_result(hv_vmbus_channel_msg_header* hdr);
50 static void vmbus_channel_on_offer_rescind(hv_vmbus_channel_msg_header* hdr);
51 static void vmbus_channel_on_gpadl_created(hv_vmbus_channel_msg_header* hdr);
52 static void vmbus_channel_on_gpadl_torndown(hv_vmbus_channel_msg_header* hdr);
53 static void vmbus_channel_on_offers_delivered(hv_vmbus_channel_msg_header* hdr);
54 static void vmbus_channel_on_version_response(hv_vmbus_channel_msg_header* hdr);
55 static void vmbus_channel_process_offer(void *context);
56 struct hv_vmbus_channel*
57 vmbus_select_outgoing_channel(struct hv_vmbus_channel *promary);
60 * Channel message dispatch table
62 hv_vmbus_channel_msg_table_entry
63 g_channel_message_table[HV_CHANNEL_MESSAGE_COUNT] = {
64 { HV_CHANNEL_MESSAGE_INVALID, NULL },
65 { HV_CHANNEL_MESSAGE_OFFER_CHANNEL, vmbus_channel_on_offer },
66 { HV_CHANNEL_MESSAGE_RESCIND_CHANNEL_OFFER,
67 vmbus_channel_on_offer_rescind },
68 { HV_CHANNEL_MESSAGE_REQUEST_OFFERS, NULL },
69 { HV_CHANNEL_MESSAGE_ALL_OFFERS_DELIVERED,
70 vmbus_channel_on_offers_delivered },
71 { HV_CHANNEL_MESSAGE_OPEN_CHANNEL, NULL },
72 { HV_CHANNEL_MESSAGE_OPEN_CHANNEL_RESULT,
73 vmbus_channel_on_open_result },
74 { HV_CHANNEL_MESSAGE_CLOSE_CHANNEL, NULL },
75 { HV_CHANNEL_MESSAGEL_GPADL_HEADER, NULL },
76 { HV_CHANNEL_MESSAGE_GPADL_BODY, NULL },
77 { HV_CHANNEL_MESSAGE_GPADL_CREATED,
78 vmbus_channel_on_gpadl_created },
79 { HV_CHANNEL_MESSAGE_GPADL_TEARDOWN, NULL },
80 { HV_CHANNEL_MESSAGE_GPADL_TORNDOWN,
81 vmbus_channel_on_gpadl_torndown },
82 { HV_CHANNEL_MESSAGE_REL_ID_RELEASED, NULL },
83 { HV_CHANNEL_MESSAGE_INITIATED_CONTACT, NULL },
84 { HV_CHANNEL_MESSAGE_VERSION_RESPONSE,
85 vmbus_channel_on_version_response },
86 { HV_CHANNEL_MESSAGE_UNLOAD, NULL }
91 * Implementation of the work abstraction.
94 work_item_callback(void *work, int pending)
96 struct hv_work_item *w = (struct hv_work_item *)work;
99 * Serialize work execution.
101 if (w->wq->work_sema != NULL) {
102 sema_wait(w->wq->work_sema);
105 w->callback(w->context);
107 if (w->wq->work_sema != NULL) {
108 sema_post(w->wq->work_sema);
114 struct hv_work_queue*
115 hv_work_queue_create(char* name)
117 static unsigned int qid = 0;
120 struct hv_work_queue* wq;
122 wq = malloc(sizeof(struct hv_work_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
123 KASSERT(wq != NULL, ("Error VMBUS: Failed to allocate work_queue\n"));
128 * We use work abstraction to handle messages
129 * coming from the host and these are typically offers.
130 * Some FreeBsd drivers appear to have a concurrency issue
131 * where probe/attach needs to be serialized. We ensure that
132 * by having only one thread process work elements in a
133 * specific queue by serializing work execution.
136 if (strcmp(name, "vmbusQ") == 0) {
138 } else { /* control */
141 * Initialize semaphore for this queue by pointing
142 * to the globale semaphore used for synchronizing all
145 wq->work_sema = &hv_vmbus_g_connection.control_sema;
148 sprintf(qname, "hv_%s_%u", name, qid);
151 * Fixme: FreeBSD 8.2 has a different prototype for
152 * taskqueue_create(), and for certain other taskqueue functions.
153 * We need to research the implications of these changes.
154 * Fixme: Not sure when the changes were introduced.
156 wq->queue = taskqueue_create(qname, M_NOWAIT, taskqueue_thread_enqueue,
158 #if __FreeBSD_version < 800000
163 if (wq->queue == NULL) {
168 if (taskqueue_start_threads(&wq->queue, 1, pri, "%s taskq", qname)) {
169 taskqueue_free(wq->queue);
180 hv_work_queue_close(struct hv_work_queue *wq)
183 * KYS: Need to drain the taskqueue
184 * before we close the hv_work_queue.
186 /*KYS: taskqueue_drain(wq->tq, ); */
187 taskqueue_free(wq->queue);
192 * @brief Create work item
196 struct hv_work_queue *wq,
197 void (*callback)(void *), void *context)
199 struct hv_work_item *w = malloc(sizeof(struct hv_work_item),
200 M_DEVBUF, M_NOWAIT | M_ZERO);
201 KASSERT(w != NULL, ("Error VMBUS: Failed to allocate WorkItem\n"));
205 w->callback = callback;
206 w->context = context;
209 TASK_INIT(&w->work, 0, work_item_callback, w);
211 return (taskqueue_enqueue(wq->queue, &w->work));
215 * @brief Rescind the offer by initiating a device removal
218 vmbus_channel_process_rescind_offer(void *context)
220 hv_vmbus_channel* channel = (hv_vmbus_channel*) context;
221 hv_vmbus_child_device_unregister(channel->device);
225 * @brief Allocate and initialize a vmbus channel object
228 hv_vmbus_allocate_channel(void)
230 hv_vmbus_channel* channel;
232 channel = (hv_vmbus_channel*) malloc(
233 sizeof(hv_vmbus_channel),
236 KASSERT(channel != NULL, ("Error VMBUS: Failed to allocate channel!"));
240 mtx_init(&channel->inbound_lock, "channel inbound", NULL, MTX_DEF);
241 mtx_init(&channel->sc_lock, "vmbus multi channel", NULL, MTX_DEF);
243 TAILQ_INIT(&channel->sc_list_anchor);
245 channel->control_work_queue = hv_work_queue_create("control");
247 if (channel->control_work_queue == NULL) {
248 mtx_destroy(&channel->inbound_lock);
249 free(channel, M_DEVBUF);
257 * @brief Release the vmbus channel object itself
260 ReleaseVmbusChannel(void *context)
262 hv_vmbus_channel* channel = (hv_vmbus_channel*) context;
263 hv_work_queue_close(channel->control_work_queue);
264 free(channel, M_DEVBUF);
268 * @brief Release the resources used by the vmbus channel object
271 hv_vmbus_free_vmbus_channel(hv_vmbus_channel* channel)
273 mtx_destroy(&channel->sc_lock);
274 mtx_destroy(&channel->inbound_lock);
276 * We have to release the channel's workqueue/thread in
277 * the vmbus's workqueue/thread context
278 * ie we can't destroy ourselves
280 hv_queue_work_item(hv_vmbus_g_connection.work_queue,
281 ReleaseVmbusChannel, (void *) channel);
285 * @brief Process the offer by creating a channel/device
286 * associated with this offer
289 vmbus_channel_process_offer(void *context)
291 hv_vmbus_channel* new_channel;
293 hv_vmbus_channel* channel;
296 new_channel = (hv_vmbus_channel*) context;
301 * Make sure this is a new offer
303 mtx_lock(&hv_vmbus_g_connection.channel_lock);
305 TAILQ_FOREACH(channel, &hv_vmbus_g_connection.channel_anchor,
308 if (memcmp(&channel->offer_msg.offer.interface_type,
309 &new_channel->offer_msg.offer.interface_type,
310 sizeof(hv_guid)) == 0 &&
311 memcmp(&channel->offer_msg.offer.interface_instance,
312 &new_channel->offer_msg.offer.interface_instance,
313 sizeof(hv_guid)) == 0) {
322 &hv_vmbus_g_connection.channel_anchor,
326 mtx_unlock(&hv_vmbus_g_connection.channel_lock);
328 /*XXX add new channel to percpu_list */
332 * Check if this is a sub channel.
334 if (new_channel->offer_msg.offer.sub_channel_index != 0) {
336 * It is a sub channel offer, process it.
338 new_channel->primary_channel = channel;
339 mtx_lock(&channel->sc_lock);
341 &channel->sc_list_anchor,
344 mtx_unlock(&channel->sc_lock);
346 /* Insert new channel into channel_anchor. */
347 printf("Storvsc get multi-channel offer, rel=%u.\n",
348 new_channel->offer_msg.child_rel_id);
349 mtx_lock(&hv_vmbus_g_connection.channel_lock);
350 TAILQ_INSERT_TAIL(&hv_vmbus_g_connection.channel_anchor,
351 new_channel, list_entry);
352 mtx_unlock(&hv_vmbus_g_connection.channel_lock);
355 printf("VMBUS: new multi-channel offer <%p>.\n",
358 /*XXX add it to percpu_list */
360 new_channel->state = HV_CHANNEL_OPEN_STATE;
361 if (channel->sc_creation_callback != NULL) {
362 channel->sc_creation_callback(new_channel);
367 hv_vmbus_free_vmbus_channel(new_channel);
371 new_channel->state = HV_CHANNEL_OPEN_STATE;
374 * Start the process of binding this offer to the driver
375 * (We need to set the device field before calling
376 * hv_vmbus_child_device_add())
378 new_channel->device = hv_vmbus_child_device_create(
379 new_channel->offer_msg.offer.interface_type,
380 new_channel->offer_msg.offer.interface_instance, new_channel);
383 * Add the new device to the bus. This will kick off device-driver
384 * binding which eventually invokes the device driver's AddDevice()
387 ret = hv_vmbus_child_device_register(new_channel->device);
389 mtx_lock(&hv_vmbus_g_connection.channel_lock);
391 &hv_vmbus_g_connection.channel_anchor,
394 mtx_unlock(&hv_vmbus_g_connection.channel_lock);
395 hv_vmbus_free_vmbus_channel(new_channel);
400 * Array of device guids that are performance critical. We try to distribute
401 * the interrupt load for these devices across all online cpus.
403 static const hv_guid high_perf_devices[] = {
417 * We use this static number to distribute the channel interrupt load.
419 static uint32_t next_vcpu;
422 * Starting with Win8, we can statically distribute the incoming
423 * channel interrupt load by binding a channel to VCPU. We
424 * implement here a simple round robin scheme for distributing
425 * the interrupt load.
426 * We will bind channels that are not performance critical to cpu 0 and
427 * performance critical channels (IDE, SCSI and Network) will be uniformly
428 * distributed across all available CPUs.
431 vmbus_channel_select_cpu(hv_vmbus_channel *channel, hv_guid *guid)
433 uint32_t current_cpu;
435 boolean_t is_perf_channel = FALSE;
437 for (i = PERF_CHN_NIC; i < MAX_PERF_CHN; i++) {
438 if (memcmp(guid->data, high_perf_devices[i].data,
439 sizeof(hv_guid)) == 0) {
440 is_perf_channel = TRUE;
445 if ((hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008) ||
446 (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7) ||
447 (!is_perf_channel)) {
448 /* Host's view of guest cpu */
449 channel->target_vcpu = 0;
450 /* Guest's own view of cpu */
451 channel->target_cpu = 0;
454 /* mp_ncpus should have the number cpus currently online */
455 current_cpu = (++next_vcpu % mp_ncpus);
456 channel->target_cpu = current_cpu;
457 channel->target_vcpu =
458 hv_vmbus_g_context.hv_vcpu_index[current_cpu];
460 printf("VMBUS: Total online cpus %d, assign perf channel %d "
461 "to vcpu %d, cpu %d\n", mp_ncpus, i, channel->target_vcpu,
466 * @brief Handler for channel offers from Hyper-V/Azure
468 * Handler for channel offers from vmbus in parent partition. We ignore
469 * all offers except network and storage offers. For each network and storage
470 * offers, we create a channel object and queue a work item to the channel
471 * object to process the offer synchronously
474 vmbus_channel_on_offer(hv_vmbus_channel_msg_header* hdr)
476 hv_vmbus_channel_offer_channel* offer;
477 hv_vmbus_channel* new_channel;
479 offer = (hv_vmbus_channel_offer_channel*) hdr;
482 hv_guid *guidInstance;
484 guidType = &offer->offer.interface_type;
485 guidInstance = &offer->offer.interface_instance;
487 /* Allocate the channel object and save this offer */
488 new_channel = hv_vmbus_allocate_channel();
489 if (new_channel == NULL)
493 * By default we setup state to enable batched
494 * reading. A specific service can choose to
495 * disable this prior to opening the channel.
497 new_channel->batched_reading = TRUE;
499 new_channel->signal_event_param =
500 (hv_vmbus_input_signal_event *)
501 (HV_ALIGN_UP((unsigned long)
502 &new_channel->signal_event_buffer,
503 HV_HYPERCALL_PARAM_ALIGN));
505 new_channel->signal_event_param->connection_id.as_uint32_t = 0;
506 new_channel->signal_event_param->connection_id.u.id =
507 HV_VMBUS_EVENT_CONNECTION_ID;
508 new_channel->signal_event_param->flag_number = 0;
509 new_channel->signal_event_param->rsvd_z = 0;
511 if (hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008) {
512 new_channel->is_dedicated_interrupt =
513 (offer->is_dedicated_interrupt != 0);
514 new_channel->signal_event_param->connection_id.u.id =
515 offer->connection_id;
519 * Bind the channel to a chosen cpu.
521 vmbus_channel_select_cpu(new_channel,
522 &offer->offer.interface_type);
524 memcpy(&new_channel->offer_msg, offer,
525 sizeof(hv_vmbus_channel_offer_channel));
526 new_channel->monitor_group = (uint8_t) offer->monitor_id / 32;
527 new_channel->monitor_bit = (uint8_t) offer->monitor_id % 32;
529 /* TODO: Make sure the offer comes from our parent partition */
531 new_channel->control_work_queue,
532 vmbus_channel_process_offer,
537 * @brief Rescind offer handler.
539 * We queue a work item to process this offer
543 vmbus_channel_on_offer_rescind(hv_vmbus_channel_msg_header* hdr)
545 hv_vmbus_channel_rescind_offer* rescind;
546 hv_vmbus_channel* channel;
548 rescind = (hv_vmbus_channel_rescind_offer*) hdr;
550 channel = hv_vmbus_get_channel_from_rel_id(rescind->child_rel_id);
554 hv_queue_work_item(channel->control_work_queue,
555 vmbus_channel_process_rescind_offer, channel);
560 * @brief Invoked when all offers have been delivered.
563 vmbus_channel_on_offers_delivered(hv_vmbus_channel_msg_header* hdr)
568 * @brief Open result handler.
570 * This is invoked when we received a response
571 * to our channel open request. Find the matching request, copy the
572 * response and signal the requesting thread.
575 vmbus_channel_on_open_result(hv_vmbus_channel_msg_header* hdr)
577 hv_vmbus_channel_open_result* result;
578 hv_vmbus_channel_msg_info* msg_info;
579 hv_vmbus_channel_msg_header* requestHeader;
580 hv_vmbus_channel_open_channel* openMsg;
582 result = (hv_vmbus_channel_open_result*) hdr;
585 * Find the open msg, copy the result and signal/unblock the wait event
587 mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
589 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor,
591 requestHeader = (hv_vmbus_channel_msg_header*) msg_info->msg;
593 if (requestHeader->message_type ==
594 HV_CHANNEL_MESSAGE_OPEN_CHANNEL) {
595 openMsg = (hv_vmbus_channel_open_channel*) msg_info->msg;
596 if (openMsg->child_rel_id == result->child_rel_id
597 && openMsg->open_id == result->open_id) {
598 memcpy(&msg_info->response.open_result, result,
599 sizeof(hv_vmbus_channel_open_result));
600 sema_post(&msg_info->wait_sema);
605 mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
610 * @brief GPADL created handler.
612 * This is invoked when we received a response
613 * to our gpadl create request. Find the matching request, copy the
614 * response and signal the requesting thread.
617 vmbus_channel_on_gpadl_created(hv_vmbus_channel_msg_header* hdr)
619 hv_vmbus_channel_gpadl_created* gpadl_created;
620 hv_vmbus_channel_msg_info* msg_info;
621 hv_vmbus_channel_msg_header* request_header;
622 hv_vmbus_channel_gpadl_header* gpadl_header;
624 gpadl_created = (hv_vmbus_channel_gpadl_created*) hdr;
626 /* Find the establish msg, copy the result and signal/unblock
629 mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
630 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor,
632 request_header = (hv_vmbus_channel_msg_header*) msg_info->msg;
633 if (request_header->message_type ==
634 HV_CHANNEL_MESSAGEL_GPADL_HEADER) {
636 (hv_vmbus_channel_gpadl_header*) request_header;
638 if ((gpadl_created->child_rel_id == gpadl_header->child_rel_id)
639 && (gpadl_created->gpadl == gpadl_header->gpadl)) {
640 memcpy(&msg_info->response.gpadl_created,
642 sizeof(hv_vmbus_channel_gpadl_created));
643 sema_post(&msg_info->wait_sema);
648 mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
652 * @brief GPADL torndown handler.
654 * This is invoked when we received a respons
655 * to our gpadl teardown request. Find the matching request, copy the
656 * response and signal the requesting thread
659 vmbus_channel_on_gpadl_torndown(hv_vmbus_channel_msg_header* hdr)
661 hv_vmbus_channel_gpadl_torndown* gpadl_torndown;
662 hv_vmbus_channel_msg_info* msg_info;
663 hv_vmbus_channel_msg_header* requestHeader;
664 hv_vmbus_channel_gpadl_teardown* gpadlTeardown;
666 gpadl_torndown = (hv_vmbus_channel_gpadl_torndown*)hdr;
669 * Find the open msg, copy the result and signal/unblock the
673 mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
675 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor,
677 requestHeader = (hv_vmbus_channel_msg_header*) msg_info->msg;
679 if (requestHeader->message_type
680 == HV_CHANNEL_MESSAGE_GPADL_TEARDOWN) {
682 (hv_vmbus_channel_gpadl_teardown*) requestHeader;
684 if (gpadl_torndown->gpadl == gpadlTeardown->gpadl) {
685 memcpy(&msg_info->response.gpadl_torndown,
687 sizeof(hv_vmbus_channel_gpadl_torndown));
688 sema_post(&msg_info->wait_sema);
693 mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
697 * @brief Version response handler.
699 * This is invoked when we received a response
700 * to our initiate contact request. Find the matching request, copy th
701 * response and signal the requesting thread.
704 vmbus_channel_on_version_response(hv_vmbus_channel_msg_header* hdr)
706 hv_vmbus_channel_msg_info* msg_info;
707 hv_vmbus_channel_msg_header* requestHeader;
708 hv_vmbus_channel_initiate_contact* initiate;
709 hv_vmbus_channel_version_response* versionResponse;
711 versionResponse = (hv_vmbus_channel_version_response*)hdr;
713 mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
714 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor,
716 requestHeader = (hv_vmbus_channel_msg_header*) msg_info->msg;
717 if (requestHeader->message_type
718 == HV_CHANNEL_MESSAGE_INITIATED_CONTACT) {
720 (hv_vmbus_channel_initiate_contact*) requestHeader;
721 memcpy(&msg_info->response.version_response,
723 sizeof(hv_vmbus_channel_version_response));
724 sema_post(&msg_info->wait_sema);
727 mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
732 * @brief Handler for channel protocol messages.
734 * This is invoked in the vmbus worker thread context.
737 hv_vmbus_on_channel_message(void *context)
739 hv_vmbus_message* msg;
740 hv_vmbus_channel_msg_header* hdr;
743 msg = (hv_vmbus_message*) context;
744 hdr = (hv_vmbus_channel_msg_header*) msg->u.payload;
745 size = msg->header.payload_size;
747 if (hdr->message_type >= HV_CHANNEL_MESSAGE_COUNT) {
752 if (g_channel_message_table[hdr->message_type].messageHandler) {
753 g_channel_message_table[hdr->message_type].messageHandler(hdr);
756 /* Free the msg that was allocated in VmbusOnMsgDPC() */
761 * @brief Send a request to get all our pending offers.
764 hv_vmbus_request_channel_offers(void)
767 hv_vmbus_channel_msg_header* msg;
768 hv_vmbus_channel_msg_info* msg_info;
770 msg_info = (hv_vmbus_channel_msg_info *)
771 malloc(sizeof(hv_vmbus_channel_msg_info)
772 + sizeof(hv_vmbus_channel_msg_header), M_DEVBUF, M_NOWAIT);
774 if (msg_info == NULL) {
776 printf("Error VMBUS: malloc failed for Request Offers\n");
780 msg = (hv_vmbus_channel_msg_header*) msg_info->msg;
781 msg->message_type = HV_CHANNEL_MESSAGE_REQUEST_OFFERS;
783 ret = hv_vmbus_post_message(msg, sizeof(hv_vmbus_channel_msg_header));
786 free(msg_info, M_DEVBUF);
792 * @brief Release channels that are unattached/unconnected (i.e., no drivers associated)
795 hv_vmbus_release_unattached_channels(void)
797 hv_vmbus_channel *channel;
799 mtx_lock(&hv_vmbus_g_connection.channel_lock);
801 while (!TAILQ_EMPTY(&hv_vmbus_g_connection.channel_anchor)) {
802 channel = TAILQ_FIRST(&hv_vmbus_g_connection.channel_anchor);
803 TAILQ_REMOVE(&hv_vmbus_g_connection.channel_anchor,
804 channel, list_entry);
806 hv_vmbus_child_device_unregister(channel->device);
807 hv_vmbus_free_vmbus_channel(channel);
809 mtx_unlock(&hv_vmbus_g_connection.channel_lock);
813 * @brief Select the best outgoing channel
815 * The channel whose vcpu binding is closest to the currect vcpu will
817 * If no multi-channel, always select primary channel
819 * @param primary - primary channel
821 struct hv_vmbus_channel *
822 vmbus_select_outgoing_channel(struct hv_vmbus_channel *primary)
824 hv_vmbus_channel *new_channel = NULL;
825 hv_vmbus_channel *outgoing_channel = primary;
826 int old_cpu_distance = 0;
827 int new_cpu_distance = 0;
829 int smp_pro_id = PCPU_GET(cpuid);
831 if (TAILQ_EMPTY(&primary->sc_list_anchor)) {
832 return outgoing_channel;
835 if (smp_pro_id >= MAXCPU) {
836 return outgoing_channel;
839 cur_vcpu = hv_vmbus_g_context.hv_vcpu_index[smp_pro_id];
841 TAILQ_FOREACH(new_channel, &primary->sc_list_anchor, sc_list_entry) {
842 if (new_channel->state != HV_CHANNEL_OPENED_STATE){
846 if (new_channel->target_vcpu == cur_vcpu){
850 old_cpu_distance = ((outgoing_channel->target_vcpu > cur_vcpu) ?
851 (outgoing_channel->target_vcpu - cur_vcpu) :
852 (cur_vcpu - outgoing_channel->target_vcpu));
854 new_cpu_distance = ((new_channel->target_vcpu > cur_vcpu) ?
855 (new_channel->target_vcpu - cur_vcpu) :
856 (cur_vcpu - new_channel->target_vcpu));
858 if (old_cpu_distance < new_cpu_distance) {
862 outgoing_channel = new_channel;
865 return(outgoing_channel);