2 * Copyright (c) 2009-2012 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
35 #include "hv_vmbus_priv.h"
41 static void vmbus_channel_on_offer(hv_vmbus_channel_msg_header* hdr);
42 static void vmbus_channel_on_offer_internal(void* context);
43 static void vmbus_channel_on_open_result(hv_vmbus_channel_msg_header* hdr);
44 static void vmbus_channel_on_offer_rescind(hv_vmbus_channel_msg_header* hdr);
45 static void vmbus_channel_on_offer_rescind_internal(void* context);
46 static void vmbus_channel_on_gpadl_created(hv_vmbus_channel_msg_header* hdr);
47 static void vmbus_channel_on_gpadl_torndown(hv_vmbus_channel_msg_header* hdr);
48 static void vmbus_channel_on_offers_delivered(hv_vmbus_channel_msg_header* hdr);
49 static void vmbus_channel_on_version_response(hv_vmbus_channel_msg_header* hdr);
52 * Channel message dispatch table
54 hv_vmbus_channel_msg_table_entry
55 g_channel_message_table[HV_CHANNEL_MESSAGE_COUNT] = {
56 { HV_CHANNEL_MESSAGE_INVALID,
58 { HV_CHANNEL_MESSAGE_OFFER_CHANNEL,
59 vmbus_channel_on_offer },
60 { HV_CHANNEL_MESSAGE_RESCIND_CHANNEL_OFFER,
61 vmbus_channel_on_offer_rescind },
62 { HV_CHANNEL_MESSAGE_REQUEST_OFFERS,
64 { HV_CHANNEL_MESSAGE_ALL_OFFERS_DELIVERED,
65 vmbus_channel_on_offers_delivered },
66 { HV_CHANNEL_MESSAGE_OPEN_CHANNEL,
68 { HV_CHANNEL_MESSAGE_OPEN_CHANNEL_RESULT,
69 vmbus_channel_on_open_result },
70 { HV_CHANNEL_MESSAGE_CLOSE_CHANNEL,
72 { HV_CHANNEL_MESSAGEL_GPADL_HEADER,
74 { HV_CHANNEL_MESSAGE_GPADL_BODY,
76 { HV_CHANNEL_MESSAGE_GPADL_CREATED,
77 vmbus_channel_on_gpadl_created },
78 { HV_CHANNEL_MESSAGE_GPADL_TEARDOWN,
80 { HV_CHANNEL_MESSAGE_GPADL_TORNDOWN,
81 vmbus_channel_on_gpadl_torndown },
82 { HV_CHANNEL_MESSAGE_REL_ID_RELEASED,
84 { HV_CHANNEL_MESSAGE_INITIATED_CONTACT,
86 { HV_CHANNEL_MESSAGE_VERSION_RESPONSE,
87 vmbus_channel_on_version_response },
88 { HV_CHANNEL_MESSAGE_UNLOAD,
92 typedef struct hv_work_item {
94 void (*callback)(void *);
99 * Implementation of the work abstraction.
102 work_item_callback(void *work, int pending)
104 struct hv_work_item *w = (struct hv_work_item *)work;
106 w->callback(w->context);
112 * @brief Create work item
116 void (*callback)(void *), void *context)
118 struct hv_work_item *w = malloc(sizeof(struct hv_work_item),
120 KASSERT(w != NULL, ("Error VMBUS: Failed to allocate WorkItem\n"));
124 w->callback = callback;
125 w->context = context;
127 TASK_INIT(&w->work, 0, work_item_callback, w);
129 return (taskqueue_enqueue(taskqueue_thread, &w->work));
134 * @brief Allocate and initialize a vmbus channel object
137 hv_vmbus_allocate_channel(void)
139 hv_vmbus_channel* channel;
141 channel = (hv_vmbus_channel*) malloc(
142 sizeof(hv_vmbus_channel),
146 mtx_init(&channel->sc_lock, "vmbus multi channel", NULL, MTX_DEF);
147 TAILQ_INIT(&channel->sc_list_anchor);
153 * @brief Release the resources used by the vmbus channel object
156 hv_vmbus_free_vmbus_channel(hv_vmbus_channel* channel)
158 mtx_destroy(&channel->sc_lock);
159 free(channel, M_DEVBUF);
163 * @brief Process the offer by creating a channel/device
164 * associated with this offer
167 vmbus_channel_process_offer(hv_vmbus_channel *new_channel)
170 hv_vmbus_channel* channel;
176 relid = new_channel->offer_msg.child_rel_id;
178 * Make sure this is a new offer
180 mtx_lock(&hv_vmbus_g_connection.channel_lock);
181 hv_vmbus_g_connection.channels[relid] = new_channel;
183 TAILQ_FOREACH(channel, &hv_vmbus_g_connection.channel_anchor,
186 if (memcmp(&channel->offer_msg.offer.interface_type,
187 &new_channel->offer_msg.offer.interface_type,
188 sizeof(hv_guid)) == 0 &&
189 memcmp(&channel->offer_msg.offer.interface_instance,
190 &new_channel->offer_msg.offer.interface_instance,
191 sizeof(hv_guid)) == 0) {
200 &hv_vmbus_g_connection.channel_anchor,
204 mtx_unlock(&hv_vmbus_g_connection.channel_lock);
206 /*XXX add new channel to percpu_list */
210 * Check if this is a sub channel.
212 if (new_channel->offer_msg.offer.sub_channel_index != 0) {
214 * It is a sub channel offer, process it.
216 new_channel->primary_channel = channel;
217 new_channel->device = channel->device;
218 mtx_lock(&channel->sc_lock);
220 &channel->sc_list_anchor,
223 mtx_unlock(&channel->sc_lock);
225 /* Insert new channel into channel_anchor. */
226 printf("VMBUS get multi-channel offer, rel=%u,sub=%u\n",
227 new_channel->offer_msg.child_rel_id,
228 new_channel->offer_msg.offer.sub_channel_index);
229 mtx_lock(&hv_vmbus_g_connection.channel_lock);
230 TAILQ_INSERT_TAIL(&hv_vmbus_g_connection.channel_anchor,
231 new_channel, list_entry);
232 mtx_unlock(&hv_vmbus_g_connection.channel_lock);
235 printf("VMBUS: new multi-channel offer <%p>, "
236 "its primary channel is <%p>.\n",
237 new_channel, new_channel->primary_channel);
239 /*XXX add it to percpu_list */
241 new_channel->state = HV_CHANNEL_OPEN_STATE;
242 if (channel->sc_creation_callback != NULL) {
243 channel->sc_creation_callback(new_channel);
248 hv_vmbus_free_vmbus_channel(new_channel);
252 new_channel->state = HV_CHANNEL_OPEN_STATE;
255 * Start the process of binding this offer to the driver
256 * (We need to set the device field before calling
257 * hv_vmbus_child_device_add())
259 new_channel->device = hv_vmbus_child_device_create(
260 new_channel->offer_msg.offer.interface_type,
261 new_channel->offer_msg.offer.interface_instance, new_channel);
264 * Add the new device to the bus. This will kick off device-driver
265 * binding which eventually invokes the device driver's AddDevice()
268 ret = hv_vmbus_child_device_register(new_channel->device);
270 mtx_lock(&hv_vmbus_g_connection.channel_lock);
272 &hv_vmbus_g_connection.channel_anchor,
275 mtx_unlock(&hv_vmbus_g_connection.channel_lock);
276 hv_vmbus_free_vmbus_channel(new_channel);
281 vmbus_channel_cpu_set(struct hv_vmbus_channel *chan, int cpu)
283 KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu %d", cpu));
285 if (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008 ||
286 hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7) {
287 /* Only cpu0 is supported */
291 chan->target_cpu = cpu;
292 chan->target_vcpu = hv_vmbus_g_context.hv_vcpu_index[cpu];
295 printf("vmbus_chan%u: assigned to cpu%u [vcpu%u]\n",
296 chan->offer_msg.child_rel_id,
297 chan->target_cpu, chan->target_vcpu);
302 * Array of device guids that are performance critical. We try to distribute
303 * the interrupt load for these devices across all online cpus.
305 static const hv_guid high_perf_devices[] = {
319 * We use this static number to distribute the channel interrupt load.
321 static uint32_t next_vcpu;
324 * Starting with Win8, we can statically distribute the incoming
325 * channel interrupt load by binding a channel to VCPU. We
326 * implement here a simple round robin scheme for distributing
327 * the interrupt load.
328 * We will bind channels that are not performance critical to cpu 0 and
329 * performance critical channels (IDE, SCSI and Network) will be uniformly
330 * distributed across all available CPUs.
333 vmbus_channel_select_defcpu(struct hv_vmbus_channel *channel)
335 uint32_t current_cpu;
337 boolean_t is_perf_channel = FALSE;
338 const hv_guid *guid = &channel->offer_msg.offer.interface_type;
340 for (i = PERF_CHN_NIC; i < MAX_PERF_CHN; i++) {
341 if (memcmp(guid->data, high_perf_devices[i].data,
342 sizeof(hv_guid)) == 0) {
343 is_perf_channel = TRUE;
348 if (!is_perf_channel) {
350 vmbus_channel_cpu_set(channel, 0);
353 /* mp_ncpus should have the number cpus currently online */
354 current_cpu = (++next_vcpu % mp_ncpus);
355 vmbus_channel_cpu_set(channel, current_cpu);
359 * @brief Handler for channel offers from Hyper-V/Azure
361 * Handler for channel offers from vmbus in parent partition. We ignore
362 * all offers except network and storage offers. For each network and storage
363 * offers, we create a channel object and queue a work item to the channel
364 * object to process the offer synchronously
367 vmbus_channel_on_offer(hv_vmbus_channel_msg_header* hdr)
369 hv_vmbus_channel_offer_channel* offer;
370 hv_vmbus_channel_offer_channel* copied;
372 offer = (hv_vmbus_channel_offer_channel*) hdr;
375 hv_guid *guidInstance;
377 guidType = &offer->offer.interface_type;
378 guidInstance = &offer->offer.interface_instance;
381 copied = malloc(sizeof(*copied), M_DEVBUF, M_NOWAIT);
382 if (copied == NULL) {
383 printf("fail to allocate memory\n");
387 memcpy(copied, hdr, sizeof(*copied));
388 hv_queue_work_item(vmbus_channel_on_offer_internal, copied);
392 vmbus_channel_on_offer_internal(void* context)
394 hv_vmbus_channel* new_channel;
396 hv_vmbus_channel_offer_channel* offer = (hv_vmbus_channel_offer_channel*)context;
397 /* Allocate the channel object and save this offer */
398 new_channel = hv_vmbus_allocate_channel();
401 * By default we setup state to enable batched
402 * reading. A specific service can choose to
403 * disable this prior to opening the channel.
405 new_channel->batched_reading = TRUE;
407 new_channel->signal_event_param =
408 (hv_vmbus_input_signal_event *)
409 (HV_ALIGN_UP((unsigned long)
410 &new_channel->signal_event_buffer,
411 HV_HYPERCALL_PARAM_ALIGN));
413 new_channel->signal_event_param->connection_id.as_uint32_t = 0;
414 new_channel->signal_event_param->connection_id.u.id =
415 HV_VMBUS_EVENT_CONNECTION_ID;
416 new_channel->signal_event_param->flag_number = 0;
417 new_channel->signal_event_param->rsvd_z = 0;
419 if (hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008) {
420 new_channel->is_dedicated_interrupt =
421 (offer->is_dedicated_interrupt != 0);
422 new_channel->signal_event_param->connection_id.u.id =
423 offer->connection_id;
426 memcpy(&new_channel->offer_msg, offer,
427 sizeof(hv_vmbus_channel_offer_channel));
428 new_channel->monitor_group = (uint8_t) offer->monitor_id / 32;
429 new_channel->monitor_bit = (uint8_t) offer->monitor_id % 32;
431 /* Select default cpu for this channel. */
432 vmbus_channel_select_defcpu(new_channel);
434 vmbus_channel_process_offer(new_channel);
436 free(offer, M_DEVBUF);
440 * @brief Rescind offer handler.
442 * We queue a work item to process this offer
446 vmbus_channel_on_offer_rescind(hv_vmbus_channel_msg_header* hdr)
448 hv_vmbus_channel_rescind_offer* rescind;
449 hv_vmbus_channel* channel;
451 rescind = (hv_vmbus_channel_rescind_offer*) hdr;
453 channel = hv_vmbus_g_connection.channels[rescind->child_rel_id];
457 hv_queue_work_item(vmbus_channel_on_offer_rescind_internal, channel);
458 hv_vmbus_g_connection.channels[rescind->child_rel_id] = NULL;
462 vmbus_channel_on_offer_rescind_internal(void *context)
464 hv_vmbus_channel* channel;
466 channel = (hv_vmbus_channel*)context;
467 if (HV_VMBUS_CHAN_ISPRIMARY(channel)) {
468 /* Only primary channel owns the hv_device */
469 hv_vmbus_child_device_unregister(channel->device);
475 * @brief Invoked when all offers have been delivered.
478 vmbus_channel_on_offers_delivered(hv_vmbus_channel_msg_header* hdr)
483 * @brief Open result handler.
485 * This is invoked when we received a response
486 * to our channel open request. Find the matching request, copy the
487 * response and signal the requesting thread.
490 vmbus_channel_on_open_result(hv_vmbus_channel_msg_header* hdr)
492 hv_vmbus_channel_open_result* result;
493 hv_vmbus_channel_msg_info* msg_info;
494 hv_vmbus_channel_msg_header* requestHeader;
495 hv_vmbus_channel_open_channel* openMsg;
497 result = (hv_vmbus_channel_open_result*) hdr;
500 * Find the open msg, copy the result and signal/unblock the wait event
502 mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
504 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor,
506 requestHeader = (hv_vmbus_channel_msg_header*) msg_info->msg;
508 if (requestHeader->message_type ==
509 HV_CHANNEL_MESSAGE_OPEN_CHANNEL) {
510 openMsg = (hv_vmbus_channel_open_channel*) msg_info->msg;
511 if (openMsg->child_rel_id == result->child_rel_id
512 && openMsg->open_id == result->open_id) {
513 memcpy(&msg_info->response.open_result, result,
514 sizeof(hv_vmbus_channel_open_result));
515 sema_post(&msg_info->wait_sema);
520 mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
525 * @brief GPADL created handler.
527 * This is invoked when we received a response
528 * to our gpadl create request. Find the matching request, copy the
529 * response and signal the requesting thread.
532 vmbus_channel_on_gpadl_created(hv_vmbus_channel_msg_header* hdr)
534 hv_vmbus_channel_gpadl_created* gpadl_created;
535 hv_vmbus_channel_msg_info* msg_info;
536 hv_vmbus_channel_msg_header* request_header;
537 hv_vmbus_channel_gpadl_header* gpadl_header;
539 gpadl_created = (hv_vmbus_channel_gpadl_created*) hdr;
541 /* Find the establish msg, copy the result and signal/unblock
544 mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
545 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor,
547 request_header = (hv_vmbus_channel_msg_header*) msg_info->msg;
548 if (request_header->message_type ==
549 HV_CHANNEL_MESSAGEL_GPADL_HEADER) {
551 (hv_vmbus_channel_gpadl_header*) request_header;
553 if ((gpadl_created->child_rel_id == gpadl_header->child_rel_id)
554 && (gpadl_created->gpadl == gpadl_header->gpadl)) {
555 memcpy(&msg_info->response.gpadl_created,
557 sizeof(hv_vmbus_channel_gpadl_created));
558 sema_post(&msg_info->wait_sema);
563 mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
567 * @brief GPADL torndown handler.
569 * This is invoked when we received a respons
570 * to our gpadl teardown request. Find the matching request, copy the
571 * response and signal the requesting thread
574 vmbus_channel_on_gpadl_torndown(hv_vmbus_channel_msg_header* hdr)
576 hv_vmbus_channel_gpadl_torndown* gpadl_torndown;
577 hv_vmbus_channel_msg_info* msg_info;
578 hv_vmbus_channel_msg_header* requestHeader;
579 hv_vmbus_channel_gpadl_teardown* gpadlTeardown;
581 gpadl_torndown = (hv_vmbus_channel_gpadl_torndown*)hdr;
584 * Find the open msg, copy the result and signal/unblock the
588 mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
590 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor,
592 requestHeader = (hv_vmbus_channel_msg_header*) msg_info->msg;
594 if (requestHeader->message_type
595 == HV_CHANNEL_MESSAGE_GPADL_TEARDOWN) {
597 (hv_vmbus_channel_gpadl_teardown*) requestHeader;
599 if (gpadl_torndown->gpadl == gpadlTeardown->gpadl) {
600 memcpy(&msg_info->response.gpadl_torndown,
602 sizeof(hv_vmbus_channel_gpadl_torndown));
603 sema_post(&msg_info->wait_sema);
608 mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
612 * @brief Version response handler.
614 * This is invoked when we received a response
615 * to our initiate contact request. Find the matching request, copy th
616 * response and signal the requesting thread.
619 vmbus_channel_on_version_response(hv_vmbus_channel_msg_header* hdr)
621 hv_vmbus_channel_msg_info* msg_info;
622 hv_vmbus_channel_msg_header* requestHeader;
623 hv_vmbus_channel_initiate_contact* initiate;
624 hv_vmbus_channel_version_response* versionResponse;
626 versionResponse = (hv_vmbus_channel_version_response*)hdr;
628 mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
629 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor,
631 requestHeader = (hv_vmbus_channel_msg_header*) msg_info->msg;
632 if (requestHeader->message_type
633 == HV_CHANNEL_MESSAGE_INITIATED_CONTACT) {
635 (hv_vmbus_channel_initiate_contact*) requestHeader;
636 memcpy(&msg_info->response.version_response,
638 sizeof(hv_vmbus_channel_version_response));
639 sema_post(&msg_info->wait_sema);
642 mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
647 * @brief Send a request to get all our pending offers.
650 hv_vmbus_request_channel_offers(void)
653 hv_vmbus_channel_msg_header* msg;
654 hv_vmbus_channel_msg_info* msg_info;
656 msg_info = (hv_vmbus_channel_msg_info *)
657 malloc(sizeof(hv_vmbus_channel_msg_info)
658 + sizeof(hv_vmbus_channel_msg_header), M_DEVBUF, M_NOWAIT);
660 if (msg_info == NULL) {
662 printf("Error VMBUS: malloc failed for Request Offers\n");
666 msg = (hv_vmbus_channel_msg_header*) msg_info->msg;
667 msg->message_type = HV_CHANNEL_MESSAGE_REQUEST_OFFERS;
669 ret = hv_vmbus_post_message(msg, sizeof(hv_vmbus_channel_msg_header));
671 free(msg_info, M_DEVBUF);
677 * @brief Release channels that are unattached/unconnected (i.e., no drivers associated)
680 hv_vmbus_release_unattached_channels(void)
682 hv_vmbus_channel *channel;
684 mtx_lock(&hv_vmbus_g_connection.channel_lock);
686 while (!TAILQ_EMPTY(&hv_vmbus_g_connection.channel_anchor)) {
687 channel = TAILQ_FIRST(&hv_vmbus_g_connection.channel_anchor);
688 TAILQ_REMOVE(&hv_vmbus_g_connection.channel_anchor,
689 channel, list_entry);
691 if (HV_VMBUS_CHAN_ISPRIMARY(channel)) {
692 /* Only primary channel owns the hv_device */
693 hv_vmbus_child_device_unregister(channel->device);
695 hv_vmbus_free_vmbus_channel(channel);
697 bzero(hv_vmbus_g_connection.channels,
698 sizeof(hv_vmbus_channel*) * HV_CHANNEL_MAX_COUNT);
699 mtx_unlock(&hv_vmbus_g_connection.channel_lock);
703 * @brief Select the best outgoing channel
705 * The channel whose vcpu binding is closest to the currect vcpu will
707 * If no multi-channel, always select primary channel
709 * @param primary - primary channel
711 struct hv_vmbus_channel *
712 vmbus_select_outgoing_channel(struct hv_vmbus_channel *primary)
714 hv_vmbus_channel *new_channel = NULL;
715 hv_vmbus_channel *outgoing_channel = primary;
716 int old_cpu_distance = 0;
717 int new_cpu_distance = 0;
719 int smp_pro_id = PCPU_GET(cpuid);
721 if (TAILQ_EMPTY(&primary->sc_list_anchor)) {
722 return outgoing_channel;
725 if (smp_pro_id >= MAXCPU) {
726 return outgoing_channel;
729 cur_vcpu = hv_vmbus_g_context.hv_vcpu_index[smp_pro_id];
731 TAILQ_FOREACH(new_channel, &primary->sc_list_anchor, sc_list_entry) {
732 if (new_channel->state != HV_CHANNEL_OPENED_STATE){
736 if (new_channel->target_vcpu == cur_vcpu){
740 old_cpu_distance = ((outgoing_channel->target_vcpu > cur_vcpu) ?
741 (outgoing_channel->target_vcpu - cur_vcpu) :
742 (cur_vcpu - outgoing_channel->target_vcpu));
744 new_cpu_distance = ((new_channel->target_vcpu > cur_vcpu) ?
745 (new_channel->target_vcpu - cur_vcpu) :
746 (cur_vcpu - new_channel->target_vcpu));
748 if (old_cpu_distance < new_cpu_distance) {
752 outgoing_channel = new_channel;
755 return(outgoing_channel);