2 * Copyright (c) 2009-2012,2016 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/systm.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
41 #include <machine/atomic.h>
42 #include <machine/bus.h>
45 #include <vm/vm_param.h>
48 #include <dev/hyperv/include/hyperv_busdma.h>
49 #include <dev/hyperv/vmbus/hv_vmbus_priv.h>
50 #include <dev/hyperv/vmbus/hyperv_var.h>
51 #include <dev/hyperv/vmbus/vmbus_reg.h>
52 #include <dev/hyperv/vmbus/vmbus_var.h>
54 static void vmbus_chan_signal_tx(struct hv_vmbus_channel *chan);
55 static void vmbus_chan_update_evtflagcnt(struct vmbus_softc *,
56 const struct hv_vmbus_channel *);
58 static void vmbus_chan_task(void *, int);
59 static void vmbus_chan_task_nobatch(void *, int);
60 static void vmbus_chan_detach_task(void *, int);
62 static void vmbus_chan_msgproc_choffer(struct vmbus_softc *,
63 const struct vmbus_message *);
64 static void vmbus_chan_msgproc_chrescind(struct vmbus_softc *,
65 const struct vmbus_message *);
68 * Vmbus channel message processing.
70 static const vmbus_chanmsg_proc_t
71 vmbus_chan_msgprocs[VMBUS_CHANMSG_TYPE_MAX] = {
72 VMBUS_CHANMSG_PROC(CHOFFER, vmbus_chan_msgproc_choffer),
73 VMBUS_CHANMSG_PROC(CHRESCIND, vmbus_chan_msgproc_chrescind),
75 VMBUS_CHANMSG_PROC_WAKEUP(CHOPEN_RESP),
76 VMBUS_CHANMSG_PROC_WAKEUP(GPADL_CONNRESP),
77 VMBUS_CHANMSG_PROC_WAKEUP(GPADL_DISCONNRESP)
81 * @brief Trigger an event notification on the specified channel
84 vmbus_chan_signal_tx(struct hv_vmbus_channel *chan)
86 struct vmbus_softc *sc = chan->vmbus_sc;
87 uint32_t chanid = chan->ch_id;
89 atomic_set_long(&sc->vmbus_tx_evtflags[chanid >> VMBUS_EVTFLAG_SHIFT],
90 1UL << (chanid & VMBUS_EVTFLAG_MASK));
92 if (chan->ch_flags & VMBUS_CHAN_FLAG_HASMNF) {
94 &sc->vmbus_mnf2->mnf_trigs[chan->ch_montrig_idx].mt_pending,
95 chan->ch_montrig_mask);
97 hypercall_signal_event(chan->ch_monprm_dma.hv_paddr);
102 vmbus_chan_sysctl_mnf(SYSCTL_HANDLER_ARGS)
104 struct hv_vmbus_channel *chan = arg1;
107 if (chan->ch_flags & VMBUS_CHAN_FLAG_HASMNF)
109 return sysctl_handle_int(oidp, &mnf, 0, req);
113 vmbus_chan_sysctl_create(struct hv_vmbus_channel *chan)
115 struct sysctl_oid *ch_tree, *chid_tree, *br_tree;
116 struct sysctl_ctx_list *ctx;
121 * Add sysctl nodes related to this channel to this
122 * channel's sysctl ctx, so that they can be destroyed
123 * independently upon close of this channel, which can
124 * happen even if the device is not detached.
126 ctx = &chan->ch_sysctl_ctx;
127 sysctl_ctx_init(ctx);
130 * Create dev.NAME.UNIT.channel tree.
132 ch_tree = SYSCTL_ADD_NODE(ctx,
133 SYSCTL_CHILDREN(device_get_sysctl_tree(chan->ch_dev)),
134 OID_AUTO, "channel", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
139 * Create dev.NAME.UNIT.channel.CHANID tree.
141 if (VMBUS_CHAN_ISPRIMARY(chan))
144 ch_id = chan->ch_prichan->ch_id;
145 snprintf(name, sizeof(name), "%d", ch_id);
146 chid_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(ch_tree),
147 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
148 if (chid_tree == NULL)
151 if (!VMBUS_CHAN_ISPRIMARY(chan)) {
153 * Create dev.NAME.UNIT.channel.CHANID.sub tree.
155 ch_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree),
156 OID_AUTO, "sub", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
161 * Create dev.NAME.UNIT.channel.CHANID.sub.SUBIDX tree.
164 * chid_tree is changed to this new sysctl tree.
166 snprintf(name, sizeof(name), "%d", chan->ch_subidx);
167 chid_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(ch_tree),
168 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
169 if (chid_tree == NULL)
172 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
173 "chanid", CTLFLAG_RD, &chan->ch_id, 0, "channel id");
176 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
177 "cpu", CTLFLAG_RD, &chan->ch_cpuid, 0, "owner CPU id");
178 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
179 "mnf", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
180 chan, 0, vmbus_chan_sysctl_mnf, "I",
181 "has monitor notification facilities");
184 * Create sysctl tree for RX bufring.
186 br_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
187 "in", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
188 if (br_tree != NULL) {
189 hv_ring_buffer_stat(ctx, SYSCTL_CHILDREN(br_tree),
190 &chan->inbound, "inbound ring buffer stats");
194 * Create sysctl tree for TX bufring.
196 br_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
197 "out", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
198 if (br_tree != NULL) {
199 hv_ring_buffer_stat(ctx, SYSCTL_CHILDREN(br_tree),
200 &chan->outbound, "outbound ring buffer stats");
205 vmbus_chan_open(struct hv_vmbus_channel *chan, int txbr_size, int rxbr_size,
206 const void *udata, int udlen, vmbus_chan_callback_t cb, void *cbarg)
208 struct vmbus_softc *sc = chan->vmbus_sc;
209 const struct vmbus_chanmsg_chopen_resp *resp;
210 const struct vmbus_message *msg;
211 struct vmbus_chanmsg_chopen *req;
212 struct vmbus_msghc *mh;
217 if (udlen > VMBUS_CHANMSG_CHOPEN_UDATA_SIZE) {
218 device_printf(sc->vmbus_dev,
219 "invalid udata len %d for chan%u\n", udlen, chan->ch_id);
222 KASSERT((txbr_size & PAGE_MASK) == 0,
223 ("send bufring size is not multiple page"));
224 KASSERT((rxbr_size & PAGE_MASK) == 0,
225 ("recv bufring size is not multiple page"));
227 if (atomic_testandset_int(&chan->ch_stflags,
228 VMBUS_CHAN_ST_OPENED_SHIFT))
229 panic("double-open chan%u", chan->ch_id);
232 chan->ch_cbarg = cbarg;
234 vmbus_chan_update_evtflagcnt(sc, chan);
236 chan->ch_tq = VMBUS_PCPU_GET(chan->vmbus_sc, event_tq, chan->ch_cpuid);
237 if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
238 TASK_INIT(&chan->ch_task, 0, vmbus_chan_task, chan);
240 TASK_INIT(&chan->ch_task, 0, vmbus_chan_task_nobatch, chan);
243 * Allocate the TX+RX bufrings.
244 * XXX should use ch_dev dtag
246 br = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev),
247 PAGE_SIZE, 0, txbr_size + rxbr_size, &chan->ch_bufring_dma,
248 BUS_DMA_WAITOK | BUS_DMA_ZERO);
250 device_printf(sc->vmbus_dev, "bufring allocation failed\n");
254 chan->ch_bufring = br;
256 /* TX bufring comes first */
257 hv_vmbus_ring_buffer_init(&chan->outbound, br, txbr_size);
258 /* RX bufring immediately follows TX bufring */
259 hv_vmbus_ring_buffer_init(&chan->inbound, br + txbr_size, rxbr_size);
261 /* Create sysctl tree for this channel */
262 vmbus_chan_sysctl_create(chan);
265 * Connect the bufrings, both RX and TX, to this channel.
267 error = vmbus_chan_gpadl_connect(chan, chan->ch_bufring_dma.hv_paddr,
268 txbr_size + rxbr_size, &chan->ch_bufring_gpadl);
270 device_printf(sc->vmbus_dev,
271 "failed to connect bufring GPADL to chan%u\n", chan->ch_id);
276 * Open channel w/ the bufring GPADL on the target CPU.
278 mh = vmbus_msghc_get(sc, sizeof(*req));
280 device_printf(sc->vmbus_dev,
281 "can not get msg hypercall for chopen(chan%u)\n",
287 req = vmbus_msghc_dataptr(mh);
288 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHOPEN;
289 req->chm_chanid = chan->ch_id;
290 req->chm_openid = chan->ch_id;
291 req->chm_gpadl = chan->ch_bufring_gpadl;
292 req->chm_vcpuid = chan->ch_vcpuid;
293 req->chm_txbr_pgcnt = txbr_size >> PAGE_SHIFT;
295 memcpy(req->chm_udata, udata, udlen);
297 error = vmbus_msghc_exec(sc, mh);
299 device_printf(sc->vmbus_dev,
300 "chopen(chan%u) msg hypercall exec failed: %d\n",
302 vmbus_msghc_put(sc, mh);
306 msg = vmbus_msghc_wait_result(sc, mh);
307 resp = (const struct vmbus_chanmsg_chopen_resp *)msg->msg_data;
308 status = resp->chm_status;
310 vmbus_msghc_put(sc, mh);
314 device_printf(sc->vmbus_dev, "chan%u opened\n",
320 device_printf(sc->vmbus_dev, "failed to open chan%u\n", chan->ch_id);
324 if (chan->ch_bufring_gpadl) {
325 vmbus_chan_gpadl_disconnect(chan, chan->ch_bufring_gpadl);
326 chan->ch_bufring_gpadl = 0;
328 if (chan->ch_bufring != NULL) {
329 hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
330 chan->ch_bufring = NULL;
332 atomic_clear_int(&chan->ch_stflags, VMBUS_CHAN_ST_OPENED);
337 vmbus_chan_gpadl_connect(struct hv_vmbus_channel *chan, bus_addr_t paddr,
338 int size, uint32_t *gpadl0)
340 struct vmbus_softc *sc = chan->vmbus_sc;
341 struct vmbus_msghc *mh;
342 struct vmbus_chanmsg_gpadl_conn *req;
343 const struct vmbus_message *msg;
345 uint32_t gpadl, status;
346 int page_count, range_len, i, cnt, error;
350 * Preliminary checks.
353 KASSERT((size & PAGE_MASK) == 0,
354 ("invalid GPA size %d, not multiple page size", size));
355 page_count = size >> PAGE_SHIFT;
357 KASSERT((paddr & PAGE_MASK) == 0,
358 ("GPA is not page aligned %jx", (uintmax_t)paddr));
359 page_id = paddr >> PAGE_SHIFT;
361 range_len = __offsetof(struct vmbus_gpa_range, gpa_page[page_count]);
363 * We don't support multiple GPA ranges.
365 if (range_len > UINT16_MAX) {
366 device_printf(sc->vmbus_dev, "GPA too large, %d pages\n",
374 gpadl = vmbus_gpadl_alloc(sc);
378 * Connect this GPADL to the target channel.
381 * Since each message can only hold small set of page
382 * addresses, several messages may be required to
383 * complete the connection.
385 if (page_count > VMBUS_CHANMSG_GPADL_CONN_PGMAX)
386 cnt = VMBUS_CHANMSG_GPADL_CONN_PGMAX;
391 reqsz = __offsetof(struct vmbus_chanmsg_gpadl_conn,
392 chm_range.gpa_page[cnt]);
393 mh = vmbus_msghc_get(sc, reqsz);
395 device_printf(sc->vmbus_dev,
396 "can not get msg hypercall for gpadl->chan%u\n",
401 req = vmbus_msghc_dataptr(mh);
402 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_CONN;
403 req->chm_chanid = chan->ch_id;
404 req->chm_gpadl = gpadl;
405 req->chm_range_len = range_len;
406 req->chm_range_cnt = 1;
407 req->chm_range.gpa_len = size;
408 req->chm_range.gpa_ofs = 0;
409 for (i = 0; i < cnt; ++i)
410 req->chm_range.gpa_page[i] = page_id++;
412 error = vmbus_msghc_exec(sc, mh);
414 device_printf(sc->vmbus_dev,
415 "gpadl->chan%u msg hypercall exec failed: %d\n",
417 vmbus_msghc_put(sc, mh);
421 while (page_count > 0) {
422 struct vmbus_chanmsg_gpadl_subconn *subreq;
424 if (page_count > VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX)
425 cnt = VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX;
430 reqsz = __offsetof(struct vmbus_chanmsg_gpadl_subconn,
432 vmbus_msghc_reset(mh, reqsz);
434 subreq = vmbus_msghc_dataptr(mh);
435 subreq->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_SUBCONN;
436 subreq->chm_gpadl = gpadl;
437 for (i = 0; i < cnt; ++i)
438 subreq->chm_gpa_page[i] = page_id++;
440 vmbus_msghc_exec_noresult(mh);
442 KASSERT(page_count == 0, ("invalid page count %d", page_count));
444 msg = vmbus_msghc_wait_result(sc, mh);
445 status = ((const struct vmbus_chanmsg_gpadl_connresp *)
446 msg->msg_data)->chm_status;
448 vmbus_msghc_put(sc, mh);
451 device_printf(sc->vmbus_dev, "gpadl->chan%u failed: "
452 "status %u\n", chan->ch_id, status);
456 device_printf(sc->vmbus_dev, "gpadl->chan%u "
457 "succeeded\n", chan->ch_id);
464 * Disconnect the GPA from the target channel
467 vmbus_chan_gpadl_disconnect(struct hv_vmbus_channel *chan, uint32_t gpadl)
469 struct vmbus_softc *sc = chan->vmbus_sc;
470 struct vmbus_msghc *mh;
471 struct vmbus_chanmsg_gpadl_disconn *req;
474 mh = vmbus_msghc_get(sc, sizeof(*req));
476 device_printf(sc->vmbus_dev,
477 "can not get msg hypercall for gpa x->chan%u\n",
482 req = vmbus_msghc_dataptr(mh);
483 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_DISCONN;
484 req->chm_chanid = chan->ch_id;
485 req->chm_gpadl = gpadl;
487 error = vmbus_msghc_exec(sc, mh);
489 device_printf(sc->vmbus_dev,
490 "gpa x->chan%u msg hypercall exec failed: %d\n",
492 vmbus_msghc_put(sc, mh);
496 vmbus_msghc_wait_result(sc, mh);
497 /* Discard result; no useful information */
498 vmbus_msghc_put(sc, mh);
504 vmbus_chan_close_internal(struct hv_vmbus_channel *chan)
506 struct vmbus_softc *sc = chan->vmbus_sc;
507 struct vmbus_msghc *mh;
508 struct vmbus_chanmsg_chclose *req;
509 struct taskqueue *tq = chan->ch_tq;
512 /* TODO: stringent check */
513 atomic_clear_int(&chan->ch_stflags, VMBUS_CHAN_ST_OPENED);
516 * Free this channel's sysctl tree attached to its device's
519 sysctl_ctx_free(&chan->ch_sysctl_ctx);
522 * Set ch_tq to NULL to avoid more requests be scheduled.
523 * XXX pretty broken; need rework.
526 taskqueue_drain(tq, &chan->ch_task);
530 * Close this channel.
532 mh = vmbus_msghc_get(sc, sizeof(*req));
534 device_printf(sc->vmbus_dev,
535 "can not get msg hypercall for chclose(chan%u)\n",
540 req = vmbus_msghc_dataptr(mh);
541 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHCLOSE;
542 req->chm_chanid = chan->ch_id;
544 error = vmbus_msghc_exec_noresult(mh);
545 vmbus_msghc_put(sc, mh);
548 device_printf(sc->vmbus_dev,
549 "chclose(chan%u) msg hypercall exec failed: %d\n",
552 } else if (bootverbose) {
553 device_printf(sc->vmbus_dev, "close chan%u\n", chan->ch_id);
557 * Disconnect the TX+RX bufrings from this channel.
559 if (chan->ch_bufring_gpadl) {
560 vmbus_chan_gpadl_disconnect(chan, chan->ch_bufring_gpadl);
561 chan->ch_bufring_gpadl = 0;
565 * Destroy the TX+RX bufrings.
567 hv_ring_buffer_cleanup(&chan->outbound);
568 hv_ring_buffer_cleanup(&chan->inbound);
569 if (chan->ch_bufring != NULL) {
570 hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
571 chan->ch_bufring = NULL;
576 * Caller should make sure that all sub-channels have
577 * been added to 'chan' and all to-be-closed channels
578 * are not being opened.
581 vmbus_chan_close(struct hv_vmbus_channel *chan)
585 if (!VMBUS_CHAN_ISPRIMARY(chan)) {
587 * Sub-channel is closed when its primary channel
594 * Close all sub-channels, if any.
596 subchan_cnt = chan->ch_subchan_cnt;
597 if (subchan_cnt > 0) {
598 struct hv_vmbus_channel **subchan;
601 subchan = vmbus_subchan_get(chan, subchan_cnt);
602 for (i = 0; i < subchan_cnt; ++i)
603 vmbus_chan_close_internal(subchan[i]);
604 vmbus_subchan_rel(subchan, subchan_cnt);
607 /* Then close the primary channel. */
608 vmbus_chan_close_internal(chan);
612 vmbus_chan_send(struct hv_vmbus_channel *chan, uint16_t type, uint16_t flags,
613 void *data, int dlen, uint64_t xactid)
615 struct vmbus_chanpkt pkt;
616 int pktlen, pad_pktlen, hlen, error;
622 pktlen = hlen + dlen;
623 pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
625 pkt.cp_hdr.cph_type = type;
626 pkt.cp_hdr.cph_flags = flags;
627 VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
628 VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
629 pkt.cp_hdr.cph_xactid = xactid;
631 iov[0].iov_base = &pkt;
632 iov[0].iov_len = hlen;
633 iov[1].iov_base = data;
634 iov[1].iov_len = dlen;
635 iov[2].iov_base = &pad;
636 iov[2].iov_len = pad_pktlen - pktlen;
638 error = hv_ring_buffer_write(&chan->outbound, iov, 3, &send_evt);
639 if (!error && send_evt)
640 vmbus_chan_signal_tx(chan);
645 vmbus_chan_send_sglist(struct hv_vmbus_channel *chan,
646 struct vmbus_gpa sg[], int sglen, void *data, int dlen, uint64_t xactid)
648 struct vmbus_chanpkt_sglist pkt;
649 int pktlen, pad_pktlen, hlen, error;
654 KASSERT(sglen < VMBUS_CHAN_SGLIST_MAX,
655 ("invalid sglist len %d", sglen));
657 hlen = __offsetof(struct vmbus_chanpkt_sglist, cp_gpa[sglen]);
658 pktlen = hlen + dlen;
659 pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
661 pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
662 pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
663 VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
664 VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
665 pkt.cp_hdr.cph_xactid = xactid;
667 pkt.cp_gpa_cnt = sglen;
669 iov[0].iov_base = &pkt;
670 iov[0].iov_len = sizeof(pkt);
671 iov[1].iov_base = sg;
672 iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen;
673 iov[2].iov_base = data;
674 iov[2].iov_len = dlen;
675 iov[3].iov_base = &pad;
676 iov[3].iov_len = pad_pktlen - pktlen;
678 error = hv_ring_buffer_write(&chan->outbound, iov, 4, &send_evt);
679 if (!error && send_evt)
680 vmbus_chan_signal_tx(chan);
685 vmbus_chan_send_prplist(struct hv_vmbus_channel *chan,
686 struct vmbus_gpa_range *prp, int prp_cnt, void *data, int dlen,
689 struct vmbus_chanpkt_prplist pkt;
690 int pktlen, pad_pktlen, hlen, error;
695 KASSERT(prp_cnt < VMBUS_CHAN_PRPLIST_MAX,
696 ("invalid prplist entry count %d", prp_cnt));
698 hlen = __offsetof(struct vmbus_chanpkt_prplist,
699 cp_range[0].gpa_page[prp_cnt]);
700 pktlen = hlen + dlen;
701 pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
703 pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
704 pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
705 VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
706 VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
707 pkt.cp_hdr.cph_xactid = xactid;
709 pkt.cp_range_cnt = 1;
711 iov[0].iov_base = &pkt;
712 iov[0].iov_len = sizeof(pkt);
713 iov[1].iov_base = prp;
714 iov[1].iov_len = __offsetof(struct vmbus_gpa_range, gpa_page[prp_cnt]);
715 iov[2].iov_base = data;
716 iov[2].iov_len = dlen;
717 iov[3].iov_base = &pad;
718 iov[3].iov_len = pad_pktlen - pktlen;
720 error = hv_ring_buffer_write(&chan->outbound, iov, 4, &send_evt);
721 if (!error && send_evt)
722 vmbus_chan_signal_tx(chan);
727 vmbus_chan_recv(struct hv_vmbus_channel *chan, void *data, int *dlen0,
730 struct vmbus_chanpkt_hdr pkt;
731 int error, dlen, hlen;
733 error = hv_ring_buffer_peek(&chan->inbound, &pkt, sizeof(pkt));
737 hlen = VMBUS_CHANPKT_GETLEN(pkt.cph_hlen);
738 dlen = VMBUS_CHANPKT_GETLEN(pkt.cph_tlen) - hlen;
741 /* Return the size of this packet's data. */
746 *xactid = pkt.cph_xactid;
749 /* Skip packet header */
750 error = hv_ring_buffer_read(&chan->inbound, data, dlen, hlen);
751 KASSERT(!error, ("hv_ring_buffer_read failed"));
757 vmbus_chan_recv_pkt(struct hv_vmbus_channel *chan,
758 struct vmbus_chanpkt_hdr *pkt0, int *pktlen0)
760 struct vmbus_chanpkt_hdr pkt;
763 error = hv_ring_buffer_peek(&chan->inbound, &pkt, sizeof(pkt));
767 pktlen = VMBUS_CHANPKT_GETLEN(pkt.cph_tlen);
768 if (*pktlen0 < pktlen) {
769 /* Return the size of this packet. */
775 /* Include packet header */
776 error = hv_ring_buffer_read(&chan->inbound, pkt0, pktlen, 0);
777 KASSERT(!error, ("hv_ring_buffer_read failed"));
783 vmbus_chan_task(void *xchan, int pending __unused)
785 struct hv_vmbus_channel *chan = xchan;
786 vmbus_chan_callback_t cb = chan->ch_cb;
787 void *cbarg = chan->ch_cbarg;
790 * Optimize host to guest signaling by ensuring:
791 * 1. While reading the channel, we disable interrupts from
793 * 2. Ensure that we process all posted messages from the host
794 * before returning from this callback.
795 * 3. Once we return, enable signaling from the host. Once this
796 * state is set we check to see if additional packets are
797 * available to read. In this case we repeat the process.
799 * NOTE: Interrupt has been disabled in the ISR.
806 left = hv_ring_buffer_read_end(&chan->inbound);
808 /* No more data in RX bufring; done */
811 hv_ring_buffer_read_begin(&chan->inbound);
816 vmbus_chan_task_nobatch(void *xchan, int pending __unused)
818 struct hv_vmbus_channel *chan = xchan;
820 chan->ch_cb(chan->ch_cbarg);
824 vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags,
829 for (f = 0; f < flag_cnt; ++f) {
834 if (event_flags[f] == 0)
837 flags = atomic_swap_long(&event_flags[f], 0);
838 chid_base = f << VMBUS_EVTFLAG_SHIFT;
840 while ((chid_ofs = ffsl(flags)) != 0) {
841 struct hv_vmbus_channel *chan;
843 --chid_ofs; /* NOTE: ffsl is 1-based */
844 flags &= ~(1UL << chid_ofs);
846 chan = sc->vmbus_chmap[chid_base + chid_ofs];
848 /* if channel is closed or closing */
849 if (chan == NULL || chan->ch_tq == NULL)
852 if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
853 hv_ring_buffer_read_begin(&chan->inbound);
854 taskqueue_enqueue(chan->ch_tq, &chan->ch_task);
860 vmbus_event_proc(struct vmbus_softc *sc, int cpu)
862 struct vmbus_evtflags *eventf;
865 * On Host with Win8 or above, the event page can be checked directly
866 * to get the id of the channel that has the pending interrupt.
868 eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
869 vmbus_event_flags_proc(sc, eventf->evt_flags,
870 VMBUS_PCPU_GET(sc, event_flags_cnt, cpu));
874 vmbus_event_proc_compat(struct vmbus_softc *sc, int cpu)
876 struct vmbus_evtflags *eventf;
878 eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
879 if (atomic_testandclear_long(&eventf->evt_flags[0], 0)) {
880 vmbus_event_flags_proc(sc, sc->vmbus_rx_evtflags,
881 VMBUS_CHAN_MAX_COMPAT >> VMBUS_EVTFLAG_SHIFT);
886 vmbus_chan_update_evtflagcnt(struct vmbus_softc *sc,
887 const struct hv_vmbus_channel *chan)
889 volatile int *flag_cnt_ptr;
892 flag_cnt = (chan->ch_id / VMBUS_EVTFLAG_LEN) + 1;
893 flag_cnt_ptr = VMBUS_PCPU_PTR(sc, event_flags_cnt, chan->ch_cpuid);
898 old_flag_cnt = *flag_cnt_ptr;
899 if (old_flag_cnt >= flag_cnt)
901 if (atomic_cmpset_int(flag_cnt_ptr, old_flag_cnt, flag_cnt)) {
903 device_printf(sc->vmbus_dev,
904 "channel%u update cpu%d flag_cnt to %d\n",
905 chan->ch_id, chan->ch_cpuid, flag_cnt);
912 static struct hv_vmbus_channel *
913 vmbus_chan_alloc(struct vmbus_softc *sc)
915 struct hv_vmbus_channel *chan;
917 chan = malloc(sizeof(*chan), M_DEVBUF, M_WAITOK | M_ZERO);
919 chan->ch_monprm = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev),
920 HYPERCALL_PARAM_ALIGN, 0, sizeof(struct hyperv_mon_param),
921 &chan->ch_monprm_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
922 if (chan->ch_monprm == NULL) {
923 device_printf(sc->vmbus_dev, "monprm alloc failed\n");
924 free(chan, M_DEVBUF);
929 mtx_init(&chan->ch_subchan_lock, "vmbus subchan", NULL, MTX_DEF);
930 TAILQ_INIT(&chan->ch_subchans);
931 TASK_INIT(&chan->ch_detach_task, 0, vmbus_chan_detach_task, chan);
937 vmbus_chan_free(struct hv_vmbus_channel *chan)
939 /* TODO: assert sub-channel list is empty */
940 /* TODO: asset no longer on the primary channel's sub-channel list */
941 /* TODO: asset no longer on the vmbus channel list */
942 hyperv_dmamem_free(&chan->ch_monprm_dma, chan->ch_monprm);
943 mtx_destroy(&chan->ch_subchan_lock);
944 free(chan, M_DEVBUF);
948 vmbus_chan_add(struct hv_vmbus_channel *newchan)
950 struct vmbus_softc *sc = newchan->vmbus_sc;
951 struct hv_vmbus_channel *prichan;
953 if (newchan->ch_id == 0) {
956 * Chan0 will neither be processed nor should be offered;
959 device_printf(sc->vmbus_dev, "got chan0 offer, discard\n");
961 } else if (newchan->ch_id >= VMBUS_CHAN_MAX) {
962 device_printf(sc->vmbus_dev, "invalid chan%u offer\n",
966 sc->vmbus_chmap[newchan->ch_id] = newchan;
969 device_printf(sc->vmbus_dev, "chan%u subidx%u offer\n",
970 newchan->ch_id, newchan->ch_subidx);
973 mtx_lock(&sc->vmbus_prichan_lock);
974 TAILQ_FOREACH(prichan, &sc->vmbus_prichans, ch_prilink) {
976 * Sub-channel will have the same type GUID and instance
977 * GUID as its primary channel.
979 if (memcmp(&prichan->ch_guid_type, &newchan->ch_guid_type,
980 sizeof(struct hyperv_guid)) == 0 &&
981 memcmp(&prichan->ch_guid_inst, &newchan->ch_guid_inst,
982 sizeof(struct hyperv_guid)) == 0)
985 if (VMBUS_CHAN_ISPRIMARY(newchan)) {
986 if (prichan == NULL) {
987 /* Install the new primary channel */
988 TAILQ_INSERT_TAIL(&sc->vmbus_prichans, newchan,
990 mtx_unlock(&sc->vmbus_prichan_lock);
993 mtx_unlock(&sc->vmbus_prichan_lock);
994 device_printf(sc->vmbus_dev, "duplicated primary "
995 "chan%u\n", newchan->ch_id);
998 } else { /* Sub-channel */
999 if (prichan == NULL) {
1000 mtx_unlock(&sc->vmbus_prichan_lock);
1001 device_printf(sc->vmbus_dev, "no primary chan for "
1002 "chan%u\n", newchan->ch_id);
1006 * Found the primary channel for this sub-channel and
1009 * XXX refcnt prichan
1012 mtx_unlock(&sc->vmbus_prichan_lock);
1015 * This is a sub-channel; link it with the primary channel.
1017 KASSERT(!VMBUS_CHAN_ISPRIMARY(newchan),
1018 ("new channel is not sub-channel"));
1019 KASSERT(prichan != NULL, ("no primary channel"));
1021 newchan->ch_prichan = prichan;
1022 newchan->ch_dev = prichan->ch_dev;
1024 mtx_lock(&prichan->ch_subchan_lock);
1025 TAILQ_INSERT_TAIL(&prichan->ch_subchans, newchan, ch_sublink);
1027 * Bump up sub-channel count and notify anyone that is
1028 * interested in this sub-channel, after this sub-channel
1031 prichan->ch_subchan_cnt++;
1032 mtx_unlock(&prichan->ch_subchan_lock);
1039 vmbus_chan_cpu_set(struct hv_vmbus_channel *chan, int cpu)
1041 KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu %d", cpu));
1043 if (chan->vmbus_sc->vmbus_version == VMBUS_VERSION_WS2008 ||
1044 chan->vmbus_sc->vmbus_version == VMBUS_VERSION_WIN7) {
1045 /* Only cpu0 is supported */
1049 chan->ch_cpuid = cpu;
1050 chan->ch_vcpuid = VMBUS_PCPU_GET(chan->vmbus_sc, vcpuid, cpu);
1053 printf("vmbus_chan%u: assigned to cpu%u [vcpu%u]\n",
1054 chan->ch_id, chan->ch_cpuid, chan->ch_vcpuid);
1059 vmbus_chan_cpu_rr(struct hv_vmbus_channel *chan)
1061 static uint32_t vmbus_chan_nextcpu;
1064 cpu = atomic_fetchadd_int(&vmbus_chan_nextcpu, 1) % mp_ncpus;
1065 vmbus_chan_cpu_set(chan, cpu);
1069 vmbus_chan_cpu_default(struct hv_vmbus_channel *chan)
1072 * By default, pin the channel to cpu0. Devices having
1073 * special channel-cpu mapping requirement should call
1074 * vmbus_chan_cpu_{set,rr}().
1076 vmbus_chan_cpu_set(chan, 0);
1080 vmbus_chan_msgproc_choffer(struct vmbus_softc *sc,
1081 const struct vmbus_message *msg)
1083 const struct vmbus_chanmsg_choffer *offer;
1084 struct hv_vmbus_channel *chan;
1087 offer = (const struct vmbus_chanmsg_choffer *)msg->msg_data;
1089 chan = vmbus_chan_alloc(sc);
1091 device_printf(sc->vmbus_dev, "allocate chan%u failed\n",
1096 chan->ch_id = offer->chm_chanid;
1097 chan->ch_subidx = offer->chm_subidx;
1098 chan->ch_guid_type = offer->chm_chtype;
1099 chan->ch_guid_inst = offer->chm_chinst;
1101 /* Batch reading is on by default */
1102 chan->ch_flags |= VMBUS_CHAN_FLAG_BATCHREAD;
1104 chan->ch_monprm->mp_connid = VMBUS_CONNID_EVENT;
1105 if (sc->vmbus_version != VMBUS_VERSION_WS2008)
1106 chan->ch_monprm->mp_connid = offer->chm_connid;
1108 if (offer->chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) {
1112 chan->ch_flags |= VMBUS_CHAN_FLAG_HASMNF;
1113 chan->ch_montrig_idx = offer->chm_montrig / VMBUS_MONTRIG_LEN;
1114 if (chan->ch_montrig_idx >= VMBUS_MONTRIGS_MAX)
1115 panic("invalid monitor trigger %u", offer->chm_montrig);
1116 chan->ch_montrig_mask =
1117 1 << (offer->chm_montrig % VMBUS_MONTRIG_LEN);
1120 /* Select default cpu for this channel. */
1121 vmbus_chan_cpu_default(chan);
1123 error = vmbus_chan_add(chan);
1125 device_printf(sc->vmbus_dev, "add chan%u failed: %d\n",
1126 chan->ch_id, error);
1127 vmbus_chan_free(chan);
1131 if (VMBUS_CHAN_ISPRIMARY(chan)) {
1133 * Add device for this primary channel.
1136 * Error is ignored here; don't have much to do if error
1139 vmbus_add_child(chan);
1144 * XXX pretty broken; need rework.
1147 vmbus_chan_msgproc_chrescind(struct vmbus_softc *sc,
1148 const struct vmbus_message *msg)
1150 const struct vmbus_chanmsg_chrescind *note;
1151 struct hv_vmbus_channel *chan;
1153 note = (const struct vmbus_chanmsg_chrescind *)msg->msg_data;
1154 if (note->chm_chanid > VMBUS_CHAN_MAX) {
1155 device_printf(sc->vmbus_dev, "invalid rescinded chan%u\n",
1161 device_printf(sc->vmbus_dev, "chan%u rescinded\n",
1165 chan = sc->vmbus_chmap[note->chm_chanid];
1168 sc->vmbus_chmap[note->chm_chanid] = NULL;
1170 taskqueue_enqueue(taskqueue_thread, &chan->ch_detach_task);
1174 vmbus_chan_detach_task(void *xchan, int pending __unused)
1176 struct hv_vmbus_channel *chan = xchan;
1178 if (VMBUS_CHAN_ISPRIMARY(chan)) {
1179 /* Only primary channel owns the device */
1180 vmbus_delete_child(chan);
1181 /* NOTE: DO NOT free primary channel for now */
1183 struct vmbus_softc *sc = chan->vmbus_sc;
1184 struct hv_vmbus_channel *pri_chan = chan->ch_prichan;
1185 struct vmbus_chanmsg_chfree *req;
1186 struct vmbus_msghc *mh;
1189 mh = vmbus_msghc_get(sc, sizeof(*req));
1191 device_printf(sc->vmbus_dev,
1192 "can not get msg hypercall for chfree(chan%u)\n",
1197 req = vmbus_msghc_dataptr(mh);
1198 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHFREE;
1199 req->chm_chanid = chan->ch_id;
1201 error = vmbus_msghc_exec_noresult(mh);
1202 vmbus_msghc_put(sc, mh);
1205 device_printf(sc->vmbus_dev,
1206 "chfree(chan%u) failed: %d",
1207 chan->ch_id, error);
1208 /* NOTE: Move on! */
1211 device_printf(sc->vmbus_dev, "chan%u freed\n",
1216 mtx_lock(&pri_chan->ch_subchan_lock);
1217 TAILQ_REMOVE(&pri_chan->ch_subchans, chan, ch_sublink);
1218 KASSERT(pri_chan->ch_subchan_cnt > 0,
1219 ("invalid subchan_cnt %d", pri_chan->ch_subchan_cnt));
1220 pri_chan->ch_subchan_cnt--;
1221 mtx_unlock(&pri_chan->ch_subchan_lock);
1224 vmbus_chan_free(chan);
1229 * Detach all devices and destroy the corresponding primary channels.
1232 vmbus_chan_destroy_all(struct vmbus_softc *sc)
1234 struct hv_vmbus_channel *chan;
1236 mtx_lock(&sc->vmbus_prichan_lock);
1237 while ((chan = TAILQ_FIRST(&sc->vmbus_prichans)) != NULL) {
1238 KASSERT(VMBUS_CHAN_ISPRIMARY(chan), ("not primary channel"));
1239 TAILQ_REMOVE(&sc->vmbus_prichans, chan, ch_prilink);
1240 mtx_unlock(&sc->vmbus_prichan_lock);
1242 vmbus_delete_child(chan);
1243 vmbus_chan_free(chan);
1245 mtx_lock(&sc->vmbus_prichan_lock);
1247 bzero(sc->vmbus_chmap,
1248 sizeof(struct hv_vmbus_channel *) * VMBUS_CHAN_MAX);
1249 mtx_unlock(&sc->vmbus_prichan_lock);
1253 * The channel whose vcpu binding is closest to the currect vcpu will
1255 * If no multi-channel, always select primary channel.
1257 struct hv_vmbus_channel *
1258 vmbus_chan_cpu2chan(struct hv_vmbus_channel *prichan, int cpu)
1260 struct hv_vmbus_channel *sel, *chan;
1261 uint32_t vcpu, sel_dist;
1263 KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpuid %d", cpu));
1264 if (TAILQ_EMPTY(&prichan->ch_subchans))
1267 vcpu = VMBUS_PCPU_GET(prichan->vmbus_sc, vcpuid, cpu);
1269 #define CHAN_VCPU_DIST(ch, vcpu) \
1270 (((ch)->ch_vcpuid > (vcpu)) ? \
1271 ((ch)->ch_vcpuid - (vcpu)) : ((vcpu) - (ch)->ch_vcpuid))
1273 #define CHAN_SELECT(ch) \
1276 sel_dist = CHAN_VCPU_DIST(ch, vcpu); \
1279 CHAN_SELECT(prichan);
1281 mtx_lock(&prichan->ch_subchan_lock);
1282 TAILQ_FOREACH(chan, &prichan->ch_subchans, ch_sublink) {
1285 KASSERT(chan->ch_stflags & VMBUS_CHAN_ST_OPENED,
1286 ("chan%u is not opened", chan->ch_id));
1288 if (chan->ch_vcpuid == vcpu) {
1289 /* Exact match; done */
1294 dist = CHAN_VCPU_DIST(chan, vcpu);
1295 if (sel_dist <= dist) {
1296 /* Far or same distance; skip */
1300 /* Select the closer channel. */
1303 mtx_unlock(&prichan->ch_subchan_lock);
1306 #undef CHAN_VCPU_DIST
1311 struct hv_vmbus_channel **
1312 vmbus_subchan_get(struct hv_vmbus_channel *pri_chan, int subchan_cnt)
1314 struct hv_vmbus_channel **ret, *chan;
1317 ret = malloc(subchan_cnt * sizeof(struct hv_vmbus_channel *), M_TEMP,
1320 mtx_lock(&pri_chan->ch_subchan_lock);
1322 while (pri_chan->ch_subchan_cnt < subchan_cnt)
1323 mtx_sleep(pri_chan, &pri_chan->ch_subchan_lock, 0, "subch", 0);
1326 TAILQ_FOREACH(chan, &pri_chan->ch_subchans, ch_sublink) {
1327 /* TODO: refcnt chan */
1331 if (i == subchan_cnt)
1334 KASSERT(i == subchan_cnt, ("invalid subchan count %d, should be %d",
1335 pri_chan->ch_subchan_cnt, subchan_cnt));
1337 mtx_unlock(&pri_chan->ch_subchan_lock);
1343 vmbus_subchan_rel(struct hv_vmbus_channel **subchan, int subchan_cnt __unused)
1346 free(subchan, M_TEMP);
1350 vmbus_subchan_drain(struct hv_vmbus_channel *pri_chan)
1352 mtx_lock(&pri_chan->ch_subchan_lock);
1353 while (pri_chan->ch_subchan_cnt > 0)
1354 mtx_sleep(pri_chan, &pri_chan->ch_subchan_lock, 0, "dsubch", 0);
1355 mtx_unlock(&pri_chan->ch_subchan_lock);
1359 vmbus_chan_msgproc(struct vmbus_softc *sc, const struct vmbus_message *msg)
1361 vmbus_chanmsg_proc_t msg_proc;
1364 msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type;
1365 KASSERT(msg_type < VMBUS_CHANMSG_TYPE_MAX,
1366 ("invalid message type %u", msg_type));
1368 msg_proc = vmbus_chan_msgprocs[msg_type];
1369 if (msg_proc != NULL)
1374 vmbus_chan_set_readbatch(struct hv_vmbus_channel *chan, bool on)
1377 chan->ch_flags &= ~VMBUS_CHAN_FLAG_BATCHREAD;
1379 chan->ch_flags |= VMBUS_CHAN_FLAG_BATCHREAD;