]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/hyperv/vmbus/hv_channel.c
hyperv/vmbus: Process event timer before checking events
[FreeBSD/FreeBSD.git] / sys / dev / hyperv / vmbus / hv_channel.c
1 /*-
2  * Copyright (c) 2009-2012,2016 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/systm.h>
36 #include <sys/mbuf.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <machine/bus.h>
41 #include <vm/vm.h>
42 #include <vm/vm_param.h>
43 #include <vm/pmap.h>
44
45 #include <dev/hyperv/vmbus/hv_vmbus_priv.h>
46 #include <dev/hyperv/vmbus/vmbus_var.h>
47
48 static int      vmbus_channel_create_gpadl_header(
49                         /* must be phys and virt contiguous*/
50                         void*                           contig_buffer,
51                         /* page-size multiple */
52                         uint32_t                        size,
53                         hv_vmbus_channel_msg_info**     msg_info,
54                         uint32_t*                       message_count);
55
56 static void     vmbus_channel_set_event(hv_vmbus_channel* channel);
57 static void     VmbusProcessChannelEvent(void* channel, int pending);
58
59 /**
60  *  @brief Trigger an event notification on the specified channel
61  */
62 static void
63 vmbus_channel_set_event(hv_vmbus_channel *channel)
64 {
65         hv_vmbus_monitor_page *monitor_page;
66
67         if (channel->offer_msg.monitor_allocated) {
68                 /* Each uint32_t represents 32 channels */
69                 synch_set_bit((channel->offer_msg.child_rel_id & 31),
70                         ((uint32_t *)hv_vmbus_g_connection.send_interrupt_page
71                                 + ((channel->offer_msg.child_rel_id >> 5))));
72
73                 monitor_page = (hv_vmbus_monitor_page *)
74                         hv_vmbus_g_connection.monitor_page_2;
75
76                 synch_set_bit(channel->monitor_bit,
77                         (uint32_t *)&monitor_page->
78                                 trigger_group[channel->monitor_group].u.pending);
79         } else {
80                 hv_vmbus_set_event(channel);
81         }
82
83 }
84
85 static int
86 vmbus_channel_sysctl_monalloc(SYSCTL_HANDLER_ARGS)
87 {
88         struct hv_vmbus_channel *chan = arg1;
89         int alloc = 0;
90
91         if (chan->offer_msg.monitor_allocated)
92                 alloc = 1;
93         return sysctl_handle_int(oidp, &alloc, 0, req);
94 }
95
96 static void
97 vmbus_channel_sysctl_create(hv_vmbus_channel* channel)
98 {
99         device_t dev;
100         struct sysctl_oid *devch_sysctl;
101         struct sysctl_oid *devch_id_sysctl, *devch_sub_sysctl;
102         struct sysctl_oid *devch_id_in_sysctl, *devch_id_out_sysctl;
103         struct sysctl_ctx_list *ctx;
104         uint32_t ch_id;
105         uint16_t sub_ch_id;
106         char name[16];
107         
108         hv_vmbus_channel* primary_ch = channel->primary_channel;
109
110         if (primary_ch == NULL) {
111                 dev = channel->device->device;
112                 ch_id = channel->offer_msg.child_rel_id;
113         } else {
114                 dev = primary_ch->device->device;
115                 ch_id = primary_ch->offer_msg.child_rel_id;
116                 sub_ch_id = channel->offer_msg.offer.sub_channel_index;
117         }
118         ctx = device_get_sysctl_ctx(dev);
119         /* This creates dev.DEVNAME.DEVUNIT.channel tree */
120         devch_sysctl = SYSCTL_ADD_NODE(ctx,
121                     SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
122                     OID_AUTO, "channel", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
123         /* This creates dev.DEVNAME.DEVUNIT.channel.CHANID tree */
124         snprintf(name, sizeof(name), "%d", ch_id);
125         devch_id_sysctl = SYSCTL_ADD_NODE(ctx,
126                     SYSCTL_CHILDREN(devch_sysctl),
127                     OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
128
129         if (primary_ch != NULL) {
130                 devch_sub_sysctl = SYSCTL_ADD_NODE(ctx,
131                         SYSCTL_CHILDREN(devch_id_sysctl),
132                         OID_AUTO, "sub", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
133                 snprintf(name, sizeof(name), "%d", sub_ch_id);
134                 devch_id_sysctl = SYSCTL_ADD_NODE(ctx,
135                         SYSCTL_CHILDREN(devch_sub_sysctl),
136                         OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
137
138                 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(devch_id_sysctl),
139                     OID_AUTO, "chanid", CTLFLAG_RD,
140                     &channel->offer_msg.child_rel_id, 0, "channel id");
141         }
142         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO,
143             "cpu", CTLFLAG_RD, &channel->target_cpu, 0, "owner CPU id");
144         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO,
145             "monitor_allocated", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
146             channel, 0, vmbus_channel_sysctl_monalloc, "I",
147             "is monitor allocated to this channel");
148
149         devch_id_in_sysctl = SYSCTL_ADD_NODE(ctx,
150                     SYSCTL_CHILDREN(devch_id_sysctl),
151                     OID_AUTO,
152                     "in",
153                     CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
154         devch_id_out_sysctl = SYSCTL_ADD_NODE(ctx,
155                     SYSCTL_CHILDREN(devch_id_sysctl),
156                     OID_AUTO,
157                     "out",
158                     CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
159         hv_ring_buffer_stat(ctx,
160                 SYSCTL_CHILDREN(devch_id_in_sysctl),
161                 &(channel->inbound),
162                 "inbound ring buffer stats");
163         hv_ring_buffer_stat(ctx,
164                 SYSCTL_CHILDREN(devch_id_out_sysctl),
165                 &(channel->outbound),
166                 "outbound ring buffer stats");
167 }
168
169 /**
170  * @brief Open the specified channel
171  */
172 int
173 hv_vmbus_channel_open(
174         hv_vmbus_channel*               new_channel,
175         uint32_t                        send_ring_buffer_size,
176         uint32_t                        recv_ring_buffer_size,
177         void*                           user_data,
178         uint32_t                        user_data_len,
179         hv_vmbus_pfn_channel_callback   pfn_on_channel_callback,
180         void*                           context)
181 {
182
183         int ret = 0;
184         void *in, *out;
185         hv_vmbus_channel_open_channel*  open_msg;
186         hv_vmbus_channel_msg_info*      open_info;
187
188         mtx_lock(&new_channel->sc_lock);
189         if (new_channel->state == HV_CHANNEL_OPEN_STATE) {
190             new_channel->state = HV_CHANNEL_OPENING_STATE;
191         } else {
192             mtx_unlock(&new_channel->sc_lock);
193             if(bootverbose)
194                 printf("VMBUS: Trying to open channel <%p> which in "
195                     "%d state.\n", new_channel, new_channel->state);
196             return (EINVAL);
197         }
198         mtx_unlock(&new_channel->sc_lock);
199
200         new_channel->on_channel_callback = pfn_on_channel_callback;
201         new_channel->channel_callback_context = context;
202
203         vmbus_on_channel_open(new_channel);
204
205         new_channel->rxq = VMBUS_PCPU_GET(vmbus_get_softc(), event_tq,
206             new_channel->target_cpu);
207         TASK_INIT(&new_channel->channel_task, 0, VmbusProcessChannelEvent, new_channel);
208
209         /* Allocate the ring buffer */
210         out = contigmalloc((send_ring_buffer_size + recv_ring_buffer_size),
211             M_DEVBUF, M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
212         KASSERT(out != NULL,
213             ("Error VMBUS: contigmalloc failed to allocate Ring Buffer!"));
214         if (out == NULL)
215                 return (ENOMEM);
216
217         in = ((uint8_t *) out + send_ring_buffer_size);
218
219         new_channel->ring_buffer_pages = out;
220         new_channel->ring_buffer_page_count = (send_ring_buffer_size +
221             recv_ring_buffer_size) >> PAGE_SHIFT;
222         new_channel->ring_buffer_size = send_ring_buffer_size +
223             recv_ring_buffer_size;
224
225         hv_vmbus_ring_buffer_init(
226                 &new_channel->outbound,
227                 out,
228                 send_ring_buffer_size);
229
230         hv_vmbus_ring_buffer_init(
231                 &new_channel->inbound,
232                 in,
233                 recv_ring_buffer_size);
234
235         /* Create sysctl tree for this channel */
236         vmbus_channel_sysctl_create(new_channel);
237
238         /**
239          * Establish the gpadl for the ring buffer
240          */
241         new_channel->ring_buffer_gpadl_handle = 0;
242
243         ret = hv_vmbus_channel_establish_gpadl(new_channel,
244                 new_channel->outbound.ring_buffer,
245                 send_ring_buffer_size + recv_ring_buffer_size,
246                 &new_channel->ring_buffer_gpadl_handle);
247
248         /**
249          * Create and init the channel open message
250          */
251         open_info = (hv_vmbus_channel_msg_info*) malloc(
252                 sizeof(hv_vmbus_channel_msg_info) +
253                         sizeof(hv_vmbus_channel_open_channel),
254                 M_DEVBUF,
255                 M_NOWAIT);
256         KASSERT(open_info != NULL,
257             ("Error VMBUS: malloc failed to allocate Open Channel message!"));
258
259         if (open_info == NULL)
260                 return (ENOMEM);
261
262         sema_init(&open_info->wait_sema, 0, "Open Info Sema");
263
264         open_msg = (hv_vmbus_channel_open_channel*) open_info->msg;
265         open_msg->header.message_type = HV_CHANNEL_MESSAGE_OPEN_CHANNEL;
266         open_msg->open_id = new_channel->offer_msg.child_rel_id;
267         open_msg->child_rel_id = new_channel->offer_msg.child_rel_id;
268         open_msg->ring_buffer_gpadl_handle =
269                 new_channel->ring_buffer_gpadl_handle;
270         open_msg->downstream_ring_buffer_page_offset = send_ring_buffer_size
271                 >> PAGE_SHIFT;
272         open_msg->target_vcpu = new_channel->target_vcpu;
273
274         if (user_data_len)
275                 memcpy(open_msg->user_data, user_data, user_data_len);
276
277         mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
278         TAILQ_INSERT_TAIL(
279                 &hv_vmbus_g_connection.channel_msg_anchor,
280                 open_info,
281                 msg_list_entry);
282         mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
283
284         ret = hv_vmbus_post_message(
285                 open_msg, sizeof(hv_vmbus_channel_open_channel));
286
287         if (ret != 0)
288             goto cleanup;
289
290         ret = sema_timedwait(&open_info->wait_sema, 5 * hz); /* KYS 5 seconds */
291
292         if (ret) {
293             if(bootverbose)
294                 printf("VMBUS: channel <%p> open timeout.\n", new_channel);
295             goto cleanup;
296         }
297
298         if (open_info->response.open_result.status == 0) {
299             new_channel->state = HV_CHANNEL_OPENED_STATE;
300             if(bootverbose)
301                 printf("VMBUS: channel <%p> open success.\n", new_channel);
302         } else {
303             if(bootverbose)
304                 printf("Error VMBUS: channel <%p> open failed - %d!\n",
305                         new_channel, open_info->response.open_result.status);
306         }
307
308         cleanup:
309         mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
310         TAILQ_REMOVE(
311                 &hv_vmbus_g_connection.channel_msg_anchor,
312                 open_info,
313                 msg_list_entry);
314         mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
315         sema_destroy(&open_info->wait_sema);
316         free(open_info, M_DEVBUF);
317
318         return (ret);
319 }
320
321 /**
322  * @brief Create a gpadl for the specified buffer
323  */
324 static int
325 vmbus_channel_create_gpadl_header(
326         void*                           contig_buffer,
327         uint32_t                        size,   /* page-size multiple */
328         hv_vmbus_channel_msg_info**     msg_info,
329         uint32_t*                       message_count)
330 {
331         int                             i;
332         int                             page_count;
333         unsigned long long              pfn;
334         uint32_t                        msg_size;
335         hv_vmbus_channel_gpadl_header*  gpa_header;
336         hv_vmbus_channel_gpadl_body*    gpadl_body;
337         hv_vmbus_channel_msg_info*      msg_header;
338         hv_vmbus_channel_msg_info*      msg_body;
339
340         int pfnSum, pfnCount, pfnLeft, pfnCurr, pfnSize;
341
342         page_count = size >> PAGE_SHIFT;
343         pfn = hv_get_phys_addr(contig_buffer) >> PAGE_SHIFT;
344
345         /*do we need a gpadl body msg */
346         pfnSize = HV_MAX_SIZE_CHANNEL_MESSAGE
347             - sizeof(hv_vmbus_channel_gpadl_header)
348             - sizeof(hv_gpa_range);
349         pfnCount = pfnSize / sizeof(uint64_t);
350
351         if (page_count > pfnCount) { /* if(we need a gpadl body)        */
352             /* fill in the header               */
353             msg_size = sizeof(hv_vmbus_channel_msg_info)
354                 + sizeof(hv_vmbus_channel_gpadl_header)
355                 + sizeof(hv_gpa_range)
356                 + pfnCount * sizeof(uint64_t);
357             msg_header = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO);
358             KASSERT(
359                 msg_header != NULL,
360                 ("Error VMBUS: malloc failed to allocate Gpadl Message!"));
361             if (msg_header == NULL)
362                 return (ENOMEM);
363
364             TAILQ_INIT(&msg_header->sub_msg_list_anchor);
365             msg_header->message_size = msg_size;
366
367             gpa_header = (hv_vmbus_channel_gpadl_header*) msg_header->msg;
368             gpa_header->range_count = 1;
369             gpa_header->range_buf_len = sizeof(hv_gpa_range)
370                 + page_count * sizeof(uint64_t);
371             gpa_header->range[0].byte_offset = 0;
372             gpa_header->range[0].byte_count = size;
373             for (i = 0; i < pfnCount; i++) {
374                 gpa_header->range[0].pfn_array[i] = pfn + i;
375             }
376             *msg_info = msg_header;
377             *message_count = 1;
378
379             pfnSum = pfnCount;
380             pfnLeft = page_count - pfnCount;
381
382             /*
383              *  figure out how many pfns we can fit
384              */
385             pfnSize = HV_MAX_SIZE_CHANNEL_MESSAGE
386                 - sizeof(hv_vmbus_channel_gpadl_body);
387             pfnCount = pfnSize / sizeof(uint64_t);
388
389             /*
390              * fill in the body
391              */
392             while (pfnLeft) {
393                 if (pfnLeft > pfnCount) {
394                     pfnCurr = pfnCount;
395                 } else {
396                     pfnCurr = pfnLeft;
397                 }
398
399                 msg_size = sizeof(hv_vmbus_channel_msg_info) +
400                     sizeof(hv_vmbus_channel_gpadl_body) +
401                     pfnCurr * sizeof(uint64_t);
402                 msg_body = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO);
403                 KASSERT(
404                     msg_body != NULL,
405                     ("Error VMBUS: malloc failed to allocate Gpadl msg_body!"));
406                 if (msg_body == NULL)
407                     return (ENOMEM);
408
409                 msg_body->message_size = msg_size;
410                 (*message_count)++;
411                 gpadl_body =
412                     (hv_vmbus_channel_gpadl_body*) msg_body->msg;
413                 /*
414                  * gpadl_body->gpadl = kbuffer;
415                  */
416                 for (i = 0; i < pfnCurr; i++) {
417                     gpadl_body->pfn[i] = pfn + pfnSum + i;
418                 }
419
420                 TAILQ_INSERT_TAIL(
421                     &msg_header->sub_msg_list_anchor,
422                     msg_body,
423                     msg_list_entry);
424                 pfnSum += pfnCurr;
425                 pfnLeft -= pfnCurr;
426             }
427         } else { /* else everything fits in a header */
428
429             msg_size = sizeof(hv_vmbus_channel_msg_info) +
430                 sizeof(hv_vmbus_channel_gpadl_header) +
431                 sizeof(hv_gpa_range) +
432                 page_count * sizeof(uint64_t);
433             msg_header = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO);
434             KASSERT(
435                 msg_header != NULL,
436                 ("Error VMBUS: malloc failed to allocate Gpadl Message!"));
437             if (msg_header == NULL)
438                 return (ENOMEM);
439
440             msg_header->message_size = msg_size;
441
442             gpa_header = (hv_vmbus_channel_gpadl_header*) msg_header->msg;
443             gpa_header->range_count = 1;
444             gpa_header->range_buf_len = sizeof(hv_gpa_range) +
445                 page_count * sizeof(uint64_t);
446             gpa_header->range[0].byte_offset = 0;
447             gpa_header->range[0].byte_count = size;
448             for (i = 0; i < page_count; i++) {
449                 gpa_header->range[0].pfn_array[i] = pfn + i;
450             }
451
452             *msg_info = msg_header;
453             *message_count = 1;
454         }
455
456         return (0);
457 }
458
459 /**
460  * @brief Establish a GPADL for the specified buffer
461  */
462 int
463 hv_vmbus_channel_establish_gpadl(
464         hv_vmbus_channel*       channel,
465         void*                   contig_buffer,
466         uint32_t                size, /* page-size multiple */
467         uint32_t*               gpadl_handle)
468
469 {
470         int ret = 0;
471         hv_vmbus_channel_gpadl_header*  gpadl_msg;
472         hv_vmbus_channel_gpadl_body*    gpadl_body;
473         hv_vmbus_channel_msg_info*      msg_info;
474         hv_vmbus_channel_msg_info*      sub_msg_info;
475         uint32_t                        msg_count;
476         hv_vmbus_channel_msg_info*      curr;
477         uint32_t                        next_gpadl_handle;
478
479         next_gpadl_handle = atomic_fetchadd_int(
480             &hv_vmbus_g_connection.next_gpadl_handle, 1);
481
482         ret = vmbus_channel_create_gpadl_header(
483                 contig_buffer, size, &msg_info, &msg_count);
484
485         if(ret != 0) {
486                 /*
487                  * XXX
488                  * We can _not_ even revert the above incremental,
489                  * if multiple GPADL establishments are running
490                  * parallelly, decrement the global next_gpadl_handle
491                  * is calling for _big_ trouble.  A better solution
492                  * is to have a 0-based GPADL id bitmap ...
493                  */
494                 return ret;
495         }
496
497         sema_init(&msg_info->wait_sema, 0, "Open Info Sema");
498         gpadl_msg = (hv_vmbus_channel_gpadl_header*) msg_info->msg;
499         gpadl_msg->header.message_type = HV_CHANNEL_MESSAGEL_GPADL_HEADER;
500         gpadl_msg->child_rel_id = channel->offer_msg.child_rel_id;
501         gpadl_msg->gpadl = next_gpadl_handle;
502
503         mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
504         TAILQ_INSERT_TAIL(
505                 &hv_vmbus_g_connection.channel_msg_anchor,
506                 msg_info,
507                 msg_list_entry);
508
509         mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
510
511         ret = hv_vmbus_post_message(
512                 gpadl_msg,
513                 msg_info->message_size -
514                     (uint32_t) sizeof(hv_vmbus_channel_msg_info));
515
516         if (ret != 0)
517             goto cleanup;
518
519         if (msg_count > 1) {
520             TAILQ_FOREACH(curr,
521                     &msg_info->sub_msg_list_anchor, msg_list_entry) {
522                 sub_msg_info = curr;
523                 gpadl_body =
524                     (hv_vmbus_channel_gpadl_body*) sub_msg_info->msg;
525
526                 gpadl_body->header.message_type =
527                     HV_CHANNEL_MESSAGE_GPADL_BODY;
528                 gpadl_body->gpadl = next_gpadl_handle;
529
530                 ret = hv_vmbus_post_message(
531                         gpadl_body,
532                         sub_msg_info->message_size
533                             - (uint32_t) sizeof(hv_vmbus_channel_msg_info));
534                  /* if (the post message failed) give up and clean up */
535                 if(ret != 0)
536                     goto cleanup;
537             }
538         }
539
540         ret = sema_timedwait(&msg_info->wait_sema, 5 * hz); /* KYS 5 seconds*/
541         if (ret != 0)
542             goto cleanup;
543
544         *gpadl_handle = gpadl_msg->gpadl;
545
546 cleanup:
547
548         mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
549         TAILQ_REMOVE(&hv_vmbus_g_connection.channel_msg_anchor,
550                 msg_info, msg_list_entry);
551         mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
552
553         sema_destroy(&msg_info->wait_sema);
554         free(msg_info, M_DEVBUF);
555
556         return (ret);
557 }
558
559 /**
560  * @brief Teardown the specified GPADL handle
561  */
562 int
563 hv_vmbus_channel_teardown_gpdal(
564         hv_vmbus_channel*       channel,
565         uint32_t                gpadl_handle)
566 {
567         int                                     ret = 0;
568         hv_vmbus_channel_gpadl_teardown*        msg;
569         hv_vmbus_channel_msg_info*              info;
570
571         info = (hv_vmbus_channel_msg_info *)
572                 malloc( sizeof(hv_vmbus_channel_msg_info) +
573                         sizeof(hv_vmbus_channel_gpadl_teardown),
574                                 M_DEVBUF, M_NOWAIT);
575         KASSERT(info != NULL,
576             ("Error VMBUS: malloc failed to allocate Gpadl Teardown Msg!"));
577         if (info == NULL) {
578             ret = ENOMEM;
579             goto cleanup;
580         }
581
582         sema_init(&info->wait_sema, 0, "Open Info Sema");
583
584         msg = (hv_vmbus_channel_gpadl_teardown*) info->msg;
585
586         msg->header.message_type = HV_CHANNEL_MESSAGE_GPADL_TEARDOWN;
587         msg->child_rel_id = channel->offer_msg.child_rel_id;
588         msg->gpadl = gpadl_handle;
589
590         mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
591         TAILQ_INSERT_TAIL(&hv_vmbus_g_connection.channel_msg_anchor,
592                         info, msg_list_entry);
593         mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
594
595         ret = hv_vmbus_post_message(msg,
596                         sizeof(hv_vmbus_channel_gpadl_teardown));
597         if (ret != 0) 
598             goto cleanup;
599         
600         ret = sema_timedwait(&info->wait_sema, 5 * hz); /* KYS 5 seconds */
601
602 cleanup:
603         /*
604          * Received a torndown response
605          */
606         mtx_lock(&hv_vmbus_g_connection.channel_msg_lock);
607         TAILQ_REMOVE(&hv_vmbus_g_connection.channel_msg_anchor,
608                         info, msg_list_entry);
609         mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock);
610         sema_destroy(&info->wait_sema);
611         free(info, M_DEVBUF);
612
613         return (ret);
614 }
615
616 static void
617 hv_vmbus_channel_close_internal(hv_vmbus_channel *channel)
618 {
619         int ret = 0;
620         struct taskqueue *rxq = channel->rxq;
621         hv_vmbus_channel_close_channel* msg;
622         hv_vmbus_channel_msg_info* info;
623
624         channel->state = HV_CHANNEL_OPEN_STATE;
625
626         /*
627          * set rxq to NULL to avoid more requests be scheduled
628          */
629         channel->rxq = NULL;
630         taskqueue_drain(rxq, &channel->channel_task);
631         channel->on_channel_callback = NULL;
632
633         /**
634          * Send a closing message
635          */
636         info = (hv_vmbus_channel_msg_info *)
637                 malloc( sizeof(hv_vmbus_channel_msg_info) +
638                         sizeof(hv_vmbus_channel_close_channel),
639                                 M_DEVBUF, M_NOWAIT);
640         KASSERT(info != NULL, ("VMBUS: malloc failed hv_vmbus_channel_close!"));
641         if(info == NULL)
642             return;
643
644         msg = (hv_vmbus_channel_close_channel*) info->msg;
645         msg->header.message_type = HV_CHANNEL_MESSAGE_CLOSE_CHANNEL;
646         msg->child_rel_id = channel->offer_msg.child_rel_id;
647
648         ret = hv_vmbus_post_message(
649                 msg, sizeof(hv_vmbus_channel_close_channel));
650
651         /* Tear down the gpadl for the channel's ring buffer */
652         if (channel->ring_buffer_gpadl_handle) {
653                 hv_vmbus_channel_teardown_gpdal(channel,
654                         channel->ring_buffer_gpadl_handle);
655         }
656
657         /* TODO: Send a msg to release the childRelId */
658
659         /* cleanup the ring buffers for this channel */
660         hv_ring_buffer_cleanup(&channel->outbound);
661         hv_ring_buffer_cleanup(&channel->inbound);
662
663         contigfree(channel->ring_buffer_pages, channel->ring_buffer_size,
664             M_DEVBUF);
665
666         free(info, M_DEVBUF);
667 }
668
669 /**
670  * @brief Close the specified channel
671  */
672 void
673 hv_vmbus_channel_close(hv_vmbus_channel *channel)
674 {
675         hv_vmbus_channel*       sub_channel;
676
677         if (channel->primary_channel != NULL) {
678                 /*
679                  * We only close multi-channels when the primary is
680                  * closed.
681                  */
682                 return;
683         }
684
685         /*
686          * Close all multi-channels first.
687          */
688         TAILQ_FOREACH(sub_channel, &channel->sc_list_anchor,
689             sc_list_entry) {
690                 if (sub_channel->state != HV_CHANNEL_OPENED_STATE)
691                         continue;
692                 hv_vmbus_channel_close_internal(sub_channel);
693         }
694         /*
695          * Then close the primary channel.
696          */
697         hv_vmbus_channel_close_internal(channel);
698 }
699
700 /**
701  * @brief Send the specified buffer on the given channel
702  */
703 int
704 hv_vmbus_channel_send_packet(
705         hv_vmbus_channel*       channel,
706         void*                   buffer,
707         uint32_t                buffer_len,
708         uint64_t                request_id,
709         hv_vmbus_packet_type    type,
710         uint32_t                flags)
711 {
712         int                     ret = 0;
713         hv_vm_packet_descriptor desc;
714         uint32_t                packet_len;
715         uint64_t                aligned_data;
716         uint32_t                packet_len_aligned;
717         boolean_t               need_sig;
718         hv_vmbus_sg_buffer_list buffer_list[3];
719
720         packet_len = sizeof(hv_vm_packet_descriptor) + buffer_len;
721         packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
722         aligned_data = 0;
723
724         /* Setup the descriptor */
725         desc.type = type;   /* HV_VMBUS_PACKET_TYPE_DATA_IN_BAND;             */
726         desc.flags = flags; /* HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED */
727                             /* in 8-bytes granularity */
728         desc.data_offset8 = sizeof(hv_vm_packet_descriptor) >> 3;
729         desc.length8 = (uint16_t) (packet_len_aligned >> 3);
730         desc.transaction_id = request_id;
731
732         buffer_list[0].data = &desc;
733         buffer_list[0].length = sizeof(hv_vm_packet_descriptor);
734
735         buffer_list[1].data = buffer;
736         buffer_list[1].length = buffer_len;
737
738         buffer_list[2].data = &aligned_data;
739         buffer_list[2].length = packet_len_aligned - packet_len;
740
741         ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3,
742             &need_sig);
743
744         /* TODO: We should determine if this is optional */
745         if (ret == 0 && need_sig) {
746                 vmbus_channel_set_event(channel);
747         }
748
749         return (ret);
750 }
751
752 /**
753  * @brief Send a range of single-page buffer packets using
754  * a GPADL Direct packet type
755  */
756 int
757 hv_vmbus_channel_send_packet_pagebuffer(
758         hv_vmbus_channel*       channel,
759         hv_vmbus_page_buffer    page_buffers[],
760         uint32_t                page_count,
761         void*                   buffer,
762         uint32_t                buffer_len,
763         uint64_t                request_id)
764 {
765
766         int                                     ret = 0;
767         boolean_t                               need_sig;
768         uint32_t                                packet_len;
769         uint32_t                                page_buflen;
770         uint32_t                                packetLen_aligned;
771         hv_vmbus_sg_buffer_list                 buffer_list[4];
772         hv_vmbus_channel_packet_page_buffer     desc;
773         uint32_t                                descSize;
774         uint64_t                                alignedData = 0;
775
776         if (page_count > HV_MAX_PAGE_BUFFER_COUNT)
777                 return (EINVAL);
778
779         /*
780          * Adjust the size down since hv_vmbus_channel_packet_page_buffer
781          *  is the largest size we support
782          */
783         descSize = __offsetof(hv_vmbus_channel_packet_page_buffer, range);
784         page_buflen = sizeof(hv_vmbus_page_buffer) * page_count;
785         packet_len = descSize + page_buflen + buffer_len;
786         packetLen_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
787
788         /* Setup the descriptor */
789         desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT;
790         desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
791         /* in 8-bytes granularity */
792         desc.data_offset8 = (descSize + page_buflen) >> 3;
793         desc.length8 = (uint16_t) (packetLen_aligned >> 3);
794         desc.transaction_id = request_id;
795         desc.range_count = page_count;
796
797         buffer_list[0].data = &desc;
798         buffer_list[0].length = descSize;
799
800         buffer_list[1].data = page_buffers;
801         buffer_list[1].length = page_buflen;
802
803         buffer_list[2].data = buffer;
804         buffer_list[2].length = buffer_len;
805
806         buffer_list[3].data = &alignedData;
807         buffer_list[3].length = packetLen_aligned - packet_len;
808
809         ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 4,
810             &need_sig);
811
812         /* TODO: We should determine if this is optional */
813         if (ret == 0 && need_sig) {
814                 vmbus_channel_set_event(channel);
815         }
816
817         return (ret);
818 }
819
820 /**
821  * @brief Send a multi-page buffer packet using a GPADL Direct packet type
822  */
823 int
824 hv_vmbus_channel_send_packet_multipagebuffer(
825         hv_vmbus_channel*               channel,
826         hv_vmbus_multipage_buffer*      multi_page_buffer,
827         void*                           buffer,
828         uint32_t                        buffer_len,
829         uint64_t                        request_id)
830 {
831
832         int                     ret = 0;
833         uint32_t                desc_size;
834         boolean_t               need_sig;
835         uint32_t                packet_len;
836         uint32_t                packet_len_aligned;
837         uint32_t                pfn_count;
838         uint64_t                aligned_data = 0;
839         hv_vmbus_sg_buffer_list buffer_list[3];
840         hv_vmbus_channel_packet_multipage_buffer desc;
841
842         pfn_count =
843             HV_NUM_PAGES_SPANNED(
844                     multi_page_buffer->offset,
845                     multi_page_buffer->length);
846
847         if ((pfn_count == 0) || (pfn_count > HV_MAX_MULTIPAGE_BUFFER_COUNT))
848             return (EINVAL);
849         /*
850          * Adjust the size down since hv_vmbus_channel_packet_multipage_buffer
851          * is the largest size we support
852          */
853         desc_size =
854             sizeof(hv_vmbus_channel_packet_multipage_buffer) -
855                     ((HV_MAX_MULTIPAGE_BUFFER_COUNT - pfn_count) *
856                         sizeof(uint64_t));
857         packet_len = desc_size + buffer_len;
858         packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t));
859
860         /*
861          * Setup the descriptor
862          */
863         desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT;
864         desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
865         desc.data_offset8 = desc_size >> 3; /* in 8-bytes granularity */
866         desc.length8 = (uint16_t) (packet_len_aligned >> 3);
867         desc.transaction_id = request_id;
868         desc.range_count = 1;
869
870         desc.range.length = multi_page_buffer->length;
871         desc.range.offset = multi_page_buffer->offset;
872
873         memcpy(desc.range.pfn_array, multi_page_buffer->pfn_array,
874                 pfn_count * sizeof(uint64_t));
875
876         buffer_list[0].data = &desc;
877         buffer_list[0].length = desc_size;
878
879         buffer_list[1].data = buffer;
880         buffer_list[1].length = buffer_len;
881
882         buffer_list[2].data = &aligned_data;
883         buffer_list[2].length = packet_len_aligned - packet_len;
884
885         ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3,
886             &need_sig);
887
888         /* TODO: We should determine if this is optional */
889         if (ret == 0 && need_sig) {
890             vmbus_channel_set_event(channel);
891         }
892
893         return (ret);
894 }
895
896 /**
897  * @brief Retrieve the user packet on the specified channel
898  */
899 int
900 hv_vmbus_channel_recv_packet(
901         hv_vmbus_channel*       channel,
902         void*                   Buffer,
903         uint32_t                buffer_len,
904         uint32_t*               buffer_actual_len,
905         uint64_t*               request_id)
906 {
907         int                     ret;
908         uint32_t                user_len;
909         uint32_t                packet_len;
910         hv_vm_packet_descriptor desc;
911
912         *buffer_actual_len = 0;
913         *request_id = 0;
914
915         ret = hv_ring_buffer_peek(&channel->inbound, &desc,
916                 sizeof(hv_vm_packet_descriptor));
917         if (ret != 0)
918                 return (0);
919
920         packet_len = desc.length8 << 3;
921         user_len = packet_len - (desc.data_offset8 << 3);
922
923         *buffer_actual_len = user_len;
924
925         if (user_len > buffer_len)
926                 return (EINVAL);
927
928         *request_id = desc.transaction_id;
929
930         /* Copy over the packet to the user buffer */
931         ret = hv_ring_buffer_read(&channel->inbound, Buffer, user_len,
932                 (desc.data_offset8 << 3));
933
934         return (0);
935 }
936
937 /**
938  * @brief Retrieve the raw packet on the specified channel
939  */
940 int
941 hv_vmbus_channel_recv_packet_raw(
942         hv_vmbus_channel*       channel,
943         void*                   buffer,
944         uint32_t                buffer_len,
945         uint32_t*               buffer_actual_len,
946         uint64_t*               request_id)
947 {
948         int             ret;
949         uint32_t        packetLen;
950         hv_vm_packet_descriptor desc;
951
952         *buffer_actual_len = 0;
953         *request_id = 0;
954
955         ret = hv_ring_buffer_peek(
956                 &channel->inbound, &desc,
957                 sizeof(hv_vm_packet_descriptor));
958
959         if (ret != 0)
960             return (0);
961
962         packetLen = desc.length8 << 3;
963         *buffer_actual_len = packetLen;
964
965         if (packetLen > buffer_len)
966             return (ENOBUFS);
967
968         *request_id = desc.transaction_id;
969
970         /* Copy over the entire packet to the user buffer */
971         ret = hv_ring_buffer_read(&channel->inbound, buffer, packetLen, 0);
972
973         return (0);
974 }
975
976
977 /**
978  * Process a channel event notification
979  */
980 static void
981 VmbusProcessChannelEvent(void* context, int pending)
982 {
983         void* arg;
984         uint32_t bytes_to_read;
985         hv_vmbus_channel* channel = (hv_vmbus_channel*)context;
986         boolean_t is_batched_reading;
987
988         /**
989          * Find the channel based on this relid and invokes
990          * the channel callback to process the event
991          */
992
993         if (channel == NULL) {
994                 return;
995         }
996         /**
997          * To deal with the race condition where we might
998          * receive a packet while the relevant driver is
999          * being unloaded, dispatch the callback while
1000          * holding the channel lock. The unloading driver
1001          * will acquire the same channel lock to set the
1002          * callback to NULL. This closes the window.
1003          */
1004
1005         if (channel->on_channel_callback != NULL) {
1006                 arg = channel->channel_callback_context;
1007                 is_batched_reading = channel->batched_reading;
1008                 /*
1009                  * Optimize host to guest signaling by ensuring:
1010                  * 1. While reading the channel, we disable interrupts from
1011                  *    host.
1012                  * 2. Ensure that we process all posted messages from the host
1013                  *    before returning from this callback.
1014                  * 3. Once we return, enable signaling from the host. Once this
1015                  *    state is set we check to see if additional packets are
1016                  *    available to read. In this case we repeat the process.
1017                  */
1018                 do {
1019                         if (is_batched_reading)
1020                                 hv_ring_buffer_read_begin(&channel->inbound);
1021
1022                         channel->on_channel_callback(arg);
1023
1024                         if (is_batched_reading)
1025                                 bytes_to_read =
1026                                     hv_ring_buffer_read_end(&channel->inbound);
1027                         else
1028                                 bytes_to_read = 0;
1029                 } while (is_batched_reading && (bytes_to_read != 0));
1030         }
1031 }