]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/hyperv/vmbus/hv_channel.c
MFC 296022,296024,296076
[FreeBSD/stable/10.git] / sys / dev / hyperv / vmbus / hv_channel.c
1 /*-
2  * Copyright (c) 2009-2012 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/systm.h>
36 #include <sys/mbuf.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <machine/bus.h>
40 #include <vm/vm.h>
41 #include <vm/vm_param.h>
42 #include <vm/pmap.h>
43
44 #include "hv_vmbus_priv.h"
45
46 static int      vmbus_channel_create_gpadl_header(
47                         /* must be phys and virt contiguous*/
48                         void*                           contig_buffer,
49                         /* page-size multiple */
50                         uint32_t                        size,
51                         hv_vmbus_channel_msg_info**     msg_info,
52                         uint32_t*                       message_count);
53
54 static void     vmbus_channel_set_event(hv_vmbus_channel* channel);
55 static void     VmbusProcessChannelEvent(void* channel, int pending);
56
57 /**
58  *  @brief Trigger an event notification on the specified channel
59  */
60 static void
61 vmbus_channel_set_event(hv_vmbus_channel *channel)
62 {
63         hv_vmbus_monitor_page *monitor_page;
64
65         if (channel->offer_msg.monitor_allocated) {
66                 /* Each uint32_t represents 32 channels */
67                 synch_set_bit((channel->offer_msg.child_rel_id & 31),
68                         ((uint32_t *)hv_vmbus_g_connection.send_interrupt_page
69                                 + ((channel->offer_msg.child_rel_id >> 5))));
70
71                 monitor_page = (hv_vmbus_monitor_page *)
72                         hv_vmbus_g_connection.monitor_page_2;
73
74                 synch_set_bit(channel->monitor_bit,
75                         (uint32_t *)&monitor_page->
76                                 trigger_group[channel->monitor_group].u.pending);
77         } else {
78                 hv_vmbus_set_event(channel);
79         }
80
81 }
82
83 /**
84  * @brief Open the specified channel
85  */
86 int
87 hv_vmbus_channel_open(
88         hv_vmbus_channel*               new_channel,
89         uint32_t                        send_ring_buffer_size,
90         uint32_t                        recv_ring_buffer_size,
91         void*                           user_data,
92         uint32_t                        user_data_len,
93         hv_vmbus_pfn_channel_callback   pfn_on_channel_callback,
94         void*                           context)
95 {
96
97         int ret = 0;
98         void *in, *out;
99         hv_vmbus_channel_open_channel*  open_msg;
100         hv_vmbus_channel_msg_info*      open_info;
101
102         mtx_lock(&new_channel->sc_lock);
103         if (new_channel->state == HV_CHANNEL_OPEN_STATE) {
104             new_channel->state = HV_CHANNEL_OPENING_STATE;
105         } else {
106             mtx_unlock(&new_channel->sc_lock);
107             if(bootverbose)
108                 printf("VMBUS: Trying to open channel <%p> which in "
109                     "%d state.\n", new_channel, new_channel->state);
110             return (EINVAL);
111         }
112         mtx_unlock(&new_channel->sc_lock);
113
114         new_channel->on_channel_callback = pfn_on_channel_callback;
115         new_channel->channel_callback_context = context;
116
117         new_channel->rxq = hv_vmbus_g_context.hv_event_queue[new_channel->target_cpu];
118         TASK_INIT(&new_channel->channel_task, 0, VmbusProcessChannelEvent, new_channel);
119
120         /* Allocate the ring buffer */
121         out = contigmalloc((send_ring_buffer_size + recv_ring_buffer_size),
122             M_DEVBUF, M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
123         KASSERT(out != NULL,
124             ("Error VMBUS: contigmalloc failed to allocate Ring Buffer!"));
125         if (out == NULL)
126                 return (ENOMEM);
127
128         in = ((uint8_t *) out + send_ring_buffer_size);
129
130         new_channel->ring_buffer_pages = out;
131         new_channel->ring_buffer_page_count = (send_ring_buffer_size +
132             recv_ring_buffer_size) >> PAGE_SHIFT;
133         new_channel->ring_buffer_size = send_ring_buffer_size +
134             recv_ring_buffer_size;
135
136         hv_vmbus_ring_buffer_init(
137                 &new_channel->outbound,
138                 out,
139                 send_ring_buffer_size);
140
141         hv_vmbus_ring_buffer_init(
142                 &new_channel->inbound,
143                 in,
144                 recv_ring_buffer_size);
145
146         /**
147          * Establish the gpadl for the ring buffer
148          */
149         new_channel->ring_buffer_gpadl_handle = 0;
150
151         ret = hv_vmbus_channel_establish_gpadl(new_channel,
152                 new_channel->outbound.ring_buffer,
153                 send_ring_buffer_size + recv_ring_buffer_size,
154                 &new_channel->ring_buffer_gpadl_handle);
155
156         /**
157          * Create and init the channel open message
158          */
159         open_info = (hv_vmbus_channel_msg_info*) malloc(
160                 sizeof(hv_vmbus_channel_msg_info) +
161                         sizeof(hv_vmbus_channel_open_channel),
162                 M_DEVBUF,
163                 M_NOWAIT);
164         KASSERT(open_info != NULL,
165             ("Error VMBUS: malloc failed to allocate Open Channel message!"));
166
167         if (open_info == NULL)
168                 return (ENOMEM);
169
170         sema_init(&open_info->wait_sema, 0, "Open Info Sema");
171
172         open_msg = (hv_vmbus_channel_open_channel*) open_info->msg;
173         open_msg->header.message_type = HV_CHANNEL_MESSAGE_OPEN_CHANNEL;
174         open_msg->open_id = new_channel->offer_msg.child_rel_id;
175         open_msg->child_rel_id = new_channel->offer_msg.child_rel_id;
176         open_msg->ring_buffer_gpadl_handle =
177                 new_channel->ring_buffer_gpadl_handle;
178         open_msg->downstream_ring_buffer_page_offset = send_ring_buffer_size
179                 >> PAGE_SHIFT;
180         open_msg->target_vcpu = new_channel->target_vcpu;
181
182         if (user_data_len)
183                 memcpy(open_msg->user_data, user_data, user_data_len);
184
185         mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
186         TAILQ_INSERT_TAIL(
187                 &hv_vmbus_g_connection.channel_msg_anchor,
188                 open_info,
189                 msg_list_entry);
190         mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
191
192         ret = hv_vmbus_post_message(
193                 open_msg, sizeof(hv_vmbus_channel_open_channel));
194
195         if (ret != 0)
196             goto cleanup;
197
198         ret = sema_timedwait(&open_info->wait_sema, 5 * hz); /* KYS 5 seconds */
199
200         if (ret) {
201             if(bootverbose)
202                 printf("VMBUS: channel <%p> open timeout.\n", new_channel);
203             goto cleanup;
204         }
205
206         if (open_info->response.open_result.status == 0) {
207             new_channel->state = HV_CHANNEL_OPENED_STATE;
208             if(bootverbose)
209                 printf("VMBUS: channel <%p> open success.\n", new_channel);
210         } else {
211             if(bootverbose)
212                 printf("Error VMBUS: channel <%p> open failed - %d!\n",
213                         new_channel, open_info->response.open_result.status);
214         }
215
216         cleanup:
217         mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
218         TAILQ_REMOVE(
219                 &hv_vmbus_g_connection.channel_msg_anchor,
220                 open_info,
221                 msg_list_entry);
222         mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
223         sema_destroy(&open_info->wait_sema);
224         free(open_info, M_DEVBUF);
225
226         return (ret);
227 }
228
229 /**
230  * @brief Create a gpadl for the specified buffer
231  */
232 static int
233 vmbus_channel_create_gpadl_header(
234         void*                           contig_buffer,
235         uint32_t                        size,   /* page-size multiple */
236         hv_vmbus_channel_msg_info**     msg_info,
237         uint32_t*                       message_count)
238 {
239         int                             i;
240         int                             page_count;
241         unsigned long long              pfn;
242         uint32_t                        msg_size;
243         hv_vmbus_channel_gpadl_header*  gpa_header;
244         hv_vmbus_channel_gpadl_body*    gpadl_body;
245         hv_vmbus_channel_msg_info*      msg_header;
246         hv_vmbus_channel_msg_info*      msg_body;
247
248         int pfnSum, pfnCount, pfnLeft, pfnCurr, pfnSize;
249
250         page_count = size >> PAGE_SHIFT;
251         pfn = hv_get_phys_addr(contig_buffer) >> PAGE_SHIFT;
252
253         /*do we need a gpadl body msg */
254         pfnSize = HV_MAX_SIZE_CHANNEL_MESSAGE
255             - sizeof(hv_vmbus_channel_gpadl_header)
256             - sizeof(hv_gpa_range);
257         pfnCount = pfnSize / sizeof(uint64_t);
258
259         if (page_count > pfnCount) { /* if(we need a gpadl body)        */
260             /* fill in the header               */
261             msg_size = sizeof(hv_vmbus_channel_msg_info)
262                 + sizeof(hv_vmbus_channel_gpadl_header)
263                 + sizeof(hv_gpa_range)
264                 + pfnCount * sizeof(uint64_t);
265             msg_header = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO);
266             KASSERT(
267                 msg_header != NULL,
268                 ("Error VMBUS: malloc failed to allocate Gpadl Message!"));
269             if (msg_header == NULL)
270                 return (ENOMEM);
271
272             TAILQ_INIT(&msg_header->sub_msg_list_anchor);
273             msg_header->message_size = msg_size;
274
275             gpa_header = (hv_vmbus_channel_gpadl_header*) msg_header->msg;
276             gpa_header->range_count = 1;
277             gpa_header->range_buf_len = sizeof(hv_gpa_range)
278                 + page_count * sizeof(uint64_t);
279             gpa_header->range[0].byte_offset = 0;
280             gpa_header->range[0].byte_count = size;
281             for (i = 0; i < pfnCount; i++) {
282                 gpa_header->range[0].pfn_array[i] = pfn + i;
283             }
284             *msg_info = msg_header;
285             *message_count = 1;
286
287             pfnSum = pfnCount;
288             pfnLeft = page_count - pfnCount;
289
290             /*
291              *  figure out how many pfns we can fit
292              */
293             pfnSize = HV_MAX_SIZE_CHANNEL_MESSAGE
294                 - sizeof(hv_vmbus_channel_gpadl_body);
295             pfnCount = pfnSize / sizeof(uint64_t);
296
297             /*
298              * fill in the body
299              */
300             while (pfnLeft) {
301                 if (pfnLeft > pfnCount) {
302                     pfnCurr = pfnCount;
303                 } else {
304                     pfnCurr = pfnLeft;
305                 }
306
307                 msg_size = sizeof(hv_vmbus_channel_msg_info) +
308                     sizeof(hv_vmbus_channel_gpadl_body) +
309                     pfnCurr * sizeof(uint64_t);
310                 msg_body = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO);
311                 KASSERT(
312                     msg_body != NULL,
313                     ("Error VMBUS: malloc failed to allocate Gpadl msg_body!"));
314                 if (msg_body == NULL)
315                     return (ENOMEM);
316
317                 msg_body->message_size = msg_size;
318                 (*message_count)++;
319                 gpadl_body =
320                     (hv_vmbus_channel_gpadl_body*) msg_body->msg;
321                 /*
322                  * gpadl_body->gpadl = kbuffer;
323                  */
324                 for (i = 0; i < pfnCurr; i++) {
325                     gpadl_body->pfn[i] = pfn + pfnSum + i;
326                 }
327
328                 TAILQ_INSERT_TAIL(
329                     &msg_header->sub_msg_list_anchor,
330                     msg_body,
331                     msg_list_entry);
332                 pfnSum += pfnCurr;
333                 pfnLeft -= pfnCurr;
334             }
335         } else { /* else everything fits in a header */
336
337             msg_size = sizeof(hv_vmbus_channel_msg_info) +
338                 sizeof(hv_vmbus_channel_gpadl_header) +
339                 sizeof(hv_gpa_range) +
340                 page_count * sizeof(uint64_t);
341             msg_header = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO);
342             KASSERT(
343                 msg_header != NULL,
344                 ("Error VMBUS: malloc failed to allocate Gpadl Message!"));
345             if (msg_header == NULL)
346                 return (ENOMEM);
347
348             msg_header->message_size = msg_size;
349
350             gpa_header = (hv_vmbus_channel_gpadl_header*) msg_header->msg;
351             gpa_header->range_count = 1;
352             gpa_header->range_buf_len = sizeof(hv_gpa_range) +
353                 page_count * sizeof(uint64_t);
354             gpa_header->range[0].byte_offset = 0;
355             gpa_header->range[0].byte_count = size;
356             for (i = 0; i < page_count; i++) {
357                 gpa_header->range[0].pfn_array[i] = pfn + i;
358             }
359
360             *msg_info = msg_header;
361             *message_count = 1;
362         }
363
364         return (0);
365 }
366
367 /**
368  * @brief Establish a GPADL for the specified buffer
369  */
370 int
371 hv_vmbus_channel_establish_gpadl(
372         hv_vmbus_channel*       channel,
373         void*                   contig_buffer,
374         uint32_t                size, /* page-size multiple */
375         uint32_t*               gpadl_handle)
376
377 {
378         int ret = 0;
379         hv_vmbus_channel_gpadl_header*  gpadl_msg;
380         hv_vmbus_channel_gpadl_body*    gpadl_body;
381         hv_vmbus_channel_msg_info*      msg_info;
382         hv_vmbus_channel_msg_info*      sub_msg_info;
383         uint32_t                        msg_count;
384         hv_vmbus_channel_msg_info*      curr;
385         uint32_t                        next_gpadl_handle;
386
387         next_gpadl_handle = atomic_fetchadd_int(
388             &hv_vmbus_g_connection.next_gpadl_handle, 1);
389
390         ret = vmbus_channel_create_gpadl_header(
391                 contig_buffer, size, &msg_info, &msg_count);
392
393         if(ret != 0) {
394                 /*
395                  * XXX
396                  * We can _not_ even revert the above incremental,
397                  * if multiple GPADL establishments are running
398                  * parallelly, decrement the global next_gpadl_handle
399                  * is calling for _big_ trouble.  A better solution
400                  * is to have a 0-based GPADL id bitmap ...
401                  */
402                 return ret;
403         }
404
405         sema_init(&msg_info->wait_sema, 0, "Open Info Sema");
406         gpadl_msg = (hv_vmbus_channel_gpadl_header*) msg_info->msg;
407         gpadl_msg->header.message_type = HV_CHANNEL_MESSAGEL_GPADL_HEADER;
408         gpadl_msg->child_rel_id = channel->offer_msg.child_rel_id;
409         gpadl_msg->gpadl = next_gpadl_handle;
410
411         mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
412         TAILQ_INSERT_TAIL(
413                 &hv_vmbus_g_connection.channel_msg_anchor,
414                 msg_info,
415                 msg_list_entry);
416
417         mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
418
419         ret = hv_vmbus_post_message(
420                 gpadl_msg,
421                 msg_info->message_size -
422                     (uint32_t) sizeof(hv_vmbus_channel_msg_info));
423
424         if (ret != 0)
425             goto cleanup;
426
427         if (msg_count > 1) {
428             TAILQ_FOREACH(curr,
429                     &msg_info->sub_msg_list_anchor, msg_list_entry) {
430                 sub_msg_info = curr;
431                 gpadl_body =
432                     (hv_vmbus_channel_gpadl_body*) sub_msg_info->msg;
433
434                 gpadl_body->header.message_type =
435                     HV_CHANNEL_MESSAGE_GPADL_BODY;
436                 gpadl_body->gpadl = next_gpadl_handle;
437
438                 ret = hv_vmbus_post_message(
439                         gpadl_body,
440                         sub_msg_info->message_size
441                             - (uint32_t) sizeof(hv_vmbus_channel_msg_info));
442                  /* if (the post message failed) give up and clean up */
443                 if(ret != 0)
444                     goto cleanup;
445             }
446         }
447
448         ret = sema_timedwait(&msg_info->wait_sema, 5 * hz); /* KYS 5 seconds*/
449         if (ret != 0)
450             goto cleanup;
451
452         *gpadl_handle = gpadl_msg->gpadl;
453
454 cleanup:
455
456         mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
457         TAILQ_REMOVE(&hv_vmbus_g_connection.channel_msg_anchor,
458                 msg_info, msg_list_entry);
459         mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
460
461         sema_destroy(&msg_info->wait_sema);
462         free(msg_info, M_DEVBUF);
463
464         return (ret);
465 }
466
467 /**
468  * @brief Teardown the specified GPADL handle
469  */
470 int
471 hv_vmbus_channel_teardown_gpdal(
472         hv_vmbus_channel*       channel,
473         uint32_t                gpadl_handle)
474 {
475         int                                     ret = 0;
476         hv_vmbus_channel_gpadl_teardown*        msg;
477         hv_vmbus_channel_msg_info*              info;
478
479         info = (hv_vmbus_channel_msg_info *)
480                 malloc( sizeof(hv_vmbus_channel_msg_info) +
481                         sizeof(hv_vmbus_channel_gpadl_teardown),
482                                 M_DEVBUF, M_NOWAIT);
483         KASSERT(info != NULL,
484             ("Error VMBUS: malloc failed to allocate Gpadl Teardown Msg!"));
485         if (info == NULL) {
486             ret = ENOMEM;
487             goto cleanup;
488         }
489
490         sema_init(&info->wait_sema, 0, "Open Info Sema");
491
492         msg = (hv_vmbus_channel_gpadl_teardown*) info->msg;
493
494         msg->header.message_type = HV_CHANNEL_MESSAGE_GPADL_TEARDOWN;
495         msg->child_rel_id = channel->offer_msg.child_rel_id;
496         msg->gpadl = gpadl_handle;
497
498         mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
499         TAILQ_INSERT_TAIL(&hv_vmbus_g_connection.channel_msg_anchor,
500                         info, msg_list_entry);
501         mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
502
503         ret = hv_vmbus_post_message(msg,
504                         sizeof(hv_vmbus_channel_gpadl_teardown));
505         if (ret != 0) 
506             goto cleanup;
507         
508         ret = sema_timedwait(&info->wait_sema, 5 * hz); /* KYS 5 seconds */
509
510 cleanup:
511         /*
512          * Received a torndown response
513          */
514         mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock);
515         TAILQ_REMOVE(&hv_vmbus_g_connection.channel_msg_anchor,
516                         info, msg_list_entry);
517         mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock);
518         sema_destroy(&info->wait_sema);
519         free(info, M_DEVBUF);
520
521         return (ret);
522 }
523
524 static void
525 hv_vmbus_channel_close_internal(hv_vmbus_channel *channel)
526 {
527         int ret = 0;
528         struct taskqueue *rxq = channel->rxq;
529         hv_vmbus_channel_close_channel* msg;
530         hv_vmbus_channel_msg_info* info;
531
532         channel->state = HV_CHANNEL_OPEN_STATE;
533         channel->sc_creation_callback = NULL;
534
535         /*
536          * set rxq to NULL to avoid more requests be scheduled
537          */
538         channel->rxq = NULL;
539         taskqueue_drain(rxq, &channel->channel_task);
540         /*
541          * Grab the lock to prevent race condition when a packet received
542          * and unloading driver is in the process.
543          */
544         mtx_lock(&channel->inbound_lock);
545         channel->on_channel_callback = NULL;
546         mtx_unlock(&channel->inbound_lock);
547
548         /**
549          * Send a closing message
550          */
551         info = (hv_vmbus_channel_msg_info *)
552                 malloc( sizeof(hv_vmbus_channel_msg_info) +
553                         sizeof(hv_vmbus_channel_close_channel),
554                                 M_DEVBUF, M_NOWAIT);
555         KASSERT(info != NULL, ("VMBUS: malloc failed hv_vmbus_channel_close!"));
556         if(info == NULL)
557             return;
558
559         msg = (hv_vmbus_channel_close_channel*) info->msg;
560         msg->header.message_type = HV_CHANNEL_MESSAGE_CLOSE_CHANNEL;
561         msg->child_rel_id = channel->offer_msg.child_rel_id;
562
563         ret = hv_vmbus_post_message(
564                 msg, sizeof(hv_vmbus_channel_close_channel));
565
566         /* Tear down the gpadl for the channel's ring buffer */
567         if (channel->ring_buffer_gpadl_handle) {
568                 hv_vmbus_channel_teardown_gpdal(channel,
569                         channel->ring_buffer_gpadl_handle);
570         }
571
572         /* TODO: Send a msg to release the childRelId */
573
574         /* cleanup the ring buffers for this channel */
575         hv_ring_buffer_cleanup(&channel->outbound);
576         hv_ring_buffer_cleanup(&channel->inbound);
577
578         contigfree(channel->ring_buffer_pages, channel->ring_buffer_size,
579             M_DEVBUF);
580
581         free(info, M_DEVBUF);
582 }
583
584 /**
585  * @brief Close the specified channel
586  */
587 void
588 hv_vmbus_channel_close(hv_vmbus_channel *channel)
589 {
590         hv_vmbus_channel*       sub_channel;
591
592         if (channel->primary_channel != NULL) {
593                 /*
594                  * We only close multi-channels when the primary is
595                  * closed.
596                  */
597                 return;
598         }
599
600         /*
601          * Close all multi-channels first.
602          */
603         TAILQ_FOREACH(sub_channel, &channel->sc_list_anchor,
604             sc_list_entry) {
605                 if (sub_channel->state != HV_CHANNEL_OPENED_STATE)
606                         continue;
607                 hv_vmbus_channel_close_internal(sub_channel);
608         }
609         /*
610          * Then close the primary channel.
611          */
612         hv_vmbus_channel_close_internal(channel);
613 }
614
615 /**
616  * @brief Send the specified buffer on the given channel
617  */
618 int
619 hv_vmbus_channel_send_packet(
620         hv_vmbus_channel*       channel,
621         void*                   buffer,
622         uint32_t                buffer_len,
623         uint64_t                request_id,
624         hv_vmbus_packet_type    type,
625         uint32_t                flags)
626 {
627         int                     ret = 0;
628         hv_vm_packet_descriptor desc;
629         uint32_t                packet_len;
630         uint64_t                aligned_data;
631         uint32_t                packet_len_aligned;
632         boolean_t               need_sig;
633         hv_vmbus_sg_buffer_list buffer_list[3];
634
635         packet_len = sizeof(hv_vm_packet_descriptor) + buffer_len;
636         packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
637         aligned_data = 0;
638
639         /* Setup the descriptor */
640         desc.type = type;   /* HV_VMBUS_PACKET_TYPE_DATA_IN_BAND;             */
641         desc.flags = flags; /* HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED */
642                             /* in 8-bytes granularity */
643         desc.data_offset8 = sizeof(hv_vm_packet_descriptor) >> 3;
644         desc.length8 = (uint16_t) (packet_len_aligned >> 3);
645         desc.transaction_id = request_id;
646
647         buffer_list[0].data = &desc;
648         buffer_list[0].length = sizeof(hv_vm_packet_descriptor);
649
650         buffer_list[1].data = buffer;
651         buffer_list[1].length = buffer_len;
652
653         buffer_list[2].data = &aligned_data;
654         buffer_list[2].length = packet_len_aligned - packet_len;
655
656         ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3,
657             &need_sig);
658
659         /* TODO: We should determine if this is optional */
660         if (ret == 0 && need_sig) {
661                 vmbus_channel_set_event(channel);
662         }
663
664         return (ret);
665 }
666
667 /**
668  * @brief Send a range of single-page buffer packets using
669  * a GPADL Direct packet type
670  */
671 int
672 hv_vmbus_channel_send_packet_pagebuffer(
673         hv_vmbus_channel*       channel,
674         hv_vmbus_page_buffer    page_buffers[],
675         uint32_t                page_count,
676         void*                   buffer,
677         uint32_t                buffer_len,
678         uint64_t                request_id)
679 {
680
681         int                                     ret = 0;
682         boolean_t                               need_sig;
683         uint32_t                                packet_len;
684         uint32_t                                page_buflen;
685         uint32_t                                packetLen_aligned;
686         hv_vmbus_sg_buffer_list                 buffer_list[4];
687         hv_vmbus_channel_packet_page_buffer     desc;
688         uint32_t                                descSize;
689         uint64_t                                alignedData = 0;
690
691         if (page_count > HV_MAX_PAGE_BUFFER_COUNT)
692                 return (EINVAL);
693
694         /*
695          * Adjust the size down since hv_vmbus_channel_packet_page_buffer
696          *  is the largest size we support
697          */
698         descSize = __offsetof(hv_vmbus_channel_packet_page_buffer, range);
699         page_buflen = sizeof(hv_vmbus_page_buffer) * page_count;
700         packet_len = descSize + page_buflen + buffer_len;
701         packetLen_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
702
703         /* Setup the descriptor */
704         desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT;
705         desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
706         /* in 8-bytes granularity */
707         desc.data_offset8 = (descSize + page_buflen) >> 3;
708         desc.length8 = (uint16_t) (packetLen_aligned >> 3);
709         desc.transaction_id = request_id;
710         desc.range_count = page_count;
711
712         buffer_list[0].data = &desc;
713         buffer_list[0].length = descSize;
714
715         buffer_list[1].data = page_buffers;
716         buffer_list[1].length = page_buflen;
717
718         buffer_list[2].data = buffer;
719         buffer_list[2].length = buffer_len;
720
721         buffer_list[3].data = &alignedData;
722         buffer_list[3].length = packetLen_aligned - packet_len;
723
724         ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 4,
725             &need_sig);
726
727         /* TODO: We should determine if this is optional */
728         if (ret == 0 && need_sig) {
729                 vmbus_channel_set_event(channel);
730         }
731
732         return (ret);
733 }
734
735 /**
736  * @brief Send a multi-page buffer packet using a GPADL Direct packet type
737  */
738 int
739 hv_vmbus_channel_send_packet_multipagebuffer(
740         hv_vmbus_channel*               channel,
741         hv_vmbus_multipage_buffer*      multi_page_buffer,
742         void*                           buffer,
743         uint32_t                        buffer_len,
744         uint64_t                        request_id)
745 {
746
747         int                     ret = 0;
748         uint32_t                desc_size;
749         boolean_t               need_sig;
750         uint32_t                packet_len;
751         uint32_t                packet_len_aligned;
752         uint32_t                pfn_count;
753         uint64_t                aligned_data = 0;
754         hv_vmbus_sg_buffer_list buffer_list[3];
755         hv_vmbus_channel_packet_multipage_buffer desc;
756
757         pfn_count =
758             HV_NUM_PAGES_SPANNED(
759                     multi_page_buffer->offset,
760                     multi_page_buffer->length);
761
762         if ((pfn_count == 0) || (pfn_count > HV_MAX_MULTIPAGE_BUFFER_COUNT))
763             return (EINVAL);
764         /*
765          * Adjust the size down since hv_vmbus_channel_packet_multipage_buffer
766          * is the largest size we support
767          */
768         desc_size =
769             sizeof(hv_vmbus_channel_packet_multipage_buffer) -
770                     ((HV_MAX_MULTIPAGE_BUFFER_COUNT - pfn_count) *
771                         sizeof(uint64_t));
772         packet_len = desc_size + buffer_len;
773         packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
774
775         /*
776          * Setup the descriptor
777          */
778         desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT;
779         desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
780         desc.data_offset8 = desc_size >> 3; /* in 8-bytes granularity */
781         desc.length8 = (uint16_t) (packet_len_aligned >> 3);
782         desc.transaction_id = request_id;
783         desc.range_count = 1;
784
785         desc.range.length = multi_page_buffer->length;
786         desc.range.offset = multi_page_buffer->offset;
787
788         memcpy(desc.range.pfn_array, multi_page_buffer->pfn_array,
789                 pfn_count * sizeof(uint64_t));
790
791         buffer_list[0].data = &desc;
792         buffer_list[0].length = desc_size;
793
794         buffer_list[1].data = buffer;
795         buffer_list[1].length = buffer_len;
796
797         buffer_list[2].data = &aligned_data;
798         buffer_list[2].length = packet_len_aligned - packet_len;
799
800         ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3,
801             &need_sig);
802
803         /* TODO: We should determine if this is optional */
804         if (ret == 0 && need_sig) {
805             vmbus_channel_set_event(channel);
806         }
807
808         return (ret);
809 }
810
811 /**
812  * @brief Retrieve the user packet on the specified channel
813  */
814 int
815 hv_vmbus_channel_recv_packet(
816         hv_vmbus_channel*       channel,
817         void*                   Buffer,
818         uint32_t                buffer_len,
819         uint32_t*               buffer_actual_len,
820         uint64_t*               request_id)
821 {
822         int                     ret;
823         uint32_t                user_len;
824         uint32_t                packet_len;
825         hv_vm_packet_descriptor desc;
826
827         *buffer_actual_len = 0;
828         *request_id = 0;
829
830         ret = hv_ring_buffer_peek(&channel->inbound, &desc,
831                 sizeof(hv_vm_packet_descriptor));
832         if (ret != 0)
833                 return (0);
834
835         packet_len = desc.length8 << 3;
836         user_len = packet_len - (desc.data_offset8 << 3);
837
838         *buffer_actual_len = user_len;
839
840         if (user_len > buffer_len)
841                 return (EINVAL);
842
843         *request_id = desc.transaction_id;
844
845         /* Copy over the packet to the user buffer */
846         ret = hv_ring_buffer_read(&channel->inbound, Buffer, user_len,
847                 (desc.data_offset8 << 3));
848
849         return (0);
850 }
851
852 /**
853  * @brief Retrieve the raw packet on the specified channel
854  */
855 int
856 hv_vmbus_channel_recv_packet_raw(
857         hv_vmbus_channel*       channel,
858         void*                   buffer,
859         uint32_t                buffer_len,
860         uint32_t*               buffer_actual_len,
861         uint64_t*               request_id)
862 {
863         int             ret;
864         uint32_t        packetLen;
865         uint32_t        userLen;
866         hv_vm_packet_descriptor desc;
867
868         *buffer_actual_len = 0;
869         *request_id = 0;
870
871         ret = hv_ring_buffer_peek(
872                 &channel->inbound, &desc,
873                 sizeof(hv_vm_packet_descriptor));
874
875         if (ret != 0)
876             return (0);
877
878         packetLen = desc.length8 << 3;
879         userLen = packetLen - (desc.data_offset8 << 3);
880
881         *buffer_actual_len = packetLen;
882
883         if (packetLen > buffer_len)
884             return (ENOBUFS);
885
886         *request_id = desc.transaction_id;
887
888         /* Copy over the entire packet to the user buffer */
889         ret = hv_ring_buffer_read(&channel->inbound, buffer, packetLen, 0);
890
891         return (0);
892 }
893
894
895 /**
896  * Process a channel event notification
897  */
898 static void
899 VmbusProcessChannelEvent(void* context, int pending)
900 {
901         void* arg;
902         uint32_t bytes_to_read;
903         hv_vmbus_channel* channel = (hv_vmbus_channel*)context;
904         boolean_t is_batched_reading;
905
906         /**
907          * Find the channel based on this relid and invokes
908          * the channel callback to process the event
909          */
910
911         if (channel == NULL) {
912                 return;
913         }
914         /**
915          * To deal with the race condition where we might
916          * receive a packet while the relevant driver is
917          * being unloaded, dispatch the callback while
918          * holding the channel lock. The unloading driver
919          * will acquire the same channel lock to set the
920          * callback to NULL. This closes the window.
921          */
922
923         /*
924          * Disable the lock due to newly added WITNESS check in r277723.
925          * Will seek other way to avoid race condition.
926          * -- whu
927          */
928         // mtx_lock(&channel->inbound_lock);
929         if (channel->on_channel_callback != NULL) {
930                 arg = channel->channel_callback_context;
931                 is_batched_reading = channel->batched_reading;
932                 /*
933                  * Optimize host to guest signaling by ensuring:
934                  * 1. While reading the channel, we disable interrupts from
935                  *    host.
936                  * 2. Ensure that we process all posted messages from the host
937                  *    before returning from this callback.
938                  * 3. Once we return, enable signaling from the host. Once this
939                  *    state is set we check to see if additional packets are
940                  *    available to read. In this case we repeat the process.
941                  */
942                 do {
943                         if (is_batched_reading)
944                                 hv_ring_buffer_read_begin(&channel->inbound);
945
946                         channel->on_channel_callback(arg);
947
948                         if (is_batched_reading)
949                                 bytes_to_read =
950                                     hv_ring_buffer_read_end(&channel->inbound);
951                         else
952                                 bytes_to_read = 0;
953                 } while (is_batched_reading && (bytes_to_read != 0));
954         }
955         // mtx_unlock(&channel->inbound_lock);
956 }