2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
34 #include <sys/select.h>
36 #include <sys/ioctl.h>
37 #include <machine/atomic.h>
38 #include <net/ethernet.h>
39 #ifndef NETMAP_WITH_LIBS
40 #define NETMAP_WITH_LIBS
42 #include <net/netmap_user.h>
55 #include <pthread_np.h>
62 #define VTNET_RINGSZ 1024
64 #define VTNET_MAXSEGS 256
67 * Host capabilities. Note that we only offer a few of these.
69 #define VIRTIO_NET_F_CSUM (1 << 0) /* host handles partial cksum */
70 #define VIRTIO_NET_F_GUEST_CSUM (1 << 1) /* guest handles partial cksum */
71 #define VIRTIO_NET_F_MAC (1 << 5) /* host supplies MAC */
72 #define VIRTIO_NET_F_GSO_DEPREC (1 << 6) /* deprecated: host handles GSO */
73 #define VIRTIO_NET_F_GUEST_TSO4 (1 << 7) /* guest can rcv TSOv4 */
74 #define VIRTIO_NET_F_GUEST_TSO6 (1 << 8) /* guest can rcv TSOv6 */
75 #define VIRTIO_NET_F_GUEST_ECN (1 << 9) /* guest can rcv TSO with ECN */
76 #define VIRTIO_NET_F_GUEST_UFO (1 << 10) /* guest can rcv UFO */
77 #define VIRTIO_NET_F_HOST_TSO4 (1 << 11) /* host can rcv TSOv4 */
78 #define VIRTIO_NET_F_HOST_TSO6 (1 << 12) /* host can rcv TSOv6 */
79 #define VIRTIO_NET_F_HOST_ECN (1 << 13) /* host can rcv TSO with ECN */
80 #define VIRTIO_NET_F_HOST_UFO (1 << 14) /* host can rcv UFO */
81 #define VIRTIO_NET_F_MRG_RXBUF (1 << 15) /* host can merge RX buffers */
82 #define VIRTIO_NET_F_STATUS (1 << 16) /* config status field available */
83 #define VIRTIO_NET_F_CTRL_VQ (1 << 17) /* control channel available */
84 #define VIRTIO_NET_F_CTRL_RX (1 << 18) /* control channel RX mode support */
85 #define VIRTIO_NET_F_CTRL_VLAN (1 << 19) /* control channel VLAN filtering */
86 #define VIRTIO_NET_F_GUEST_ANNOUNCE \
87 (1 << 21) /* guest can send gratuitous pkts */
89 #define VTNET_S_HOSTCAPS \
90 ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_MRG_RXBUF | VIRTIO_NET_F_STATUS | \
91 VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC)
94 * PCI config-space "registers"
96 struct virtio_net_config {
106 #define VTNET_CTLQ 2 /* NB: not yet supported */
111 * Fixed network header size
113 struct virtio_net_rxhdr {
115 uint8_t vrh_gso_type;
116 uint16_t vrh_hdr_len;
117 uint16_t vrh_gso_size;
118 uint16_t vrh_csum_start;
119 uint16_t vrh_csum_offset;
126 static int pci_vtnet_debug;
127 #define DPRINTF(params) if (pci_vtnet_debug) printf params
128 #define WPRINTF(params) printf params
133 struct pci_vtnet_softc {
134 struct virtio_softc vsc_vs;
135 struct vqueue_info vsc_queues[VTNET_MAXQ - 1];
136 pthread_mutex_t vsc_mtx;
137 struct mevent *vsc_mevp;
140 struct nm_desc *vsc_nmd;
143 volatile int resetting; /* set and checked outside lock */
145 uint64_t vsc_features; /* negotiated features */
147 struct virtio_net_config vsc_config;
149 pthread_mutex_t rx_mtx;
152 int rx_merge; /* merged rx bufs in use */
155 pthread_mutex_t tx_mtx;
156 pthread_cond_t tx_cond;
159 void (*pci_vtnet_rx)(struct pci_vtnet_softc *sc);
160 void (*pci_vtnet_tx)(struct pci_vtnet_softc *sc, struct iovec *iov,
161 int iovcnt, int len);
164 static void pci_vtnet_reset(void *);
165 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */
166 static int pci_vtnet_cfgread(void *, int, int, uint32_t *);
167 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t);
168 static void pci_vtnet_neg_features(void *, uint64_t);
170 static struct virtio_consts vtnet_vi_consts = {
171 "vtnet", /* our name */
172 VTNET_MAXQ - 1, /* we currently support 2 virtqueues */
173 sizeof(struct virtio_net_config), /* config reg size */
174 pci_vtnet_reset, /* reset */
175 NULL, /* device-wide qnotify -- not used */
176 pci_vtnet_cfgread, /* read PCI config */
177 pci_vtnet_cfgwrite, /* write PCI config */
178 pci_vtnet_neg_features, /* apply negotiated features */
179 VTNET_S_HOSTCAPS, /* our capabilities */
183 * If the transmit thread is active then stall until it is done.
186 pci_vtnet_txwait(struct pci_vtnet_softc *sc)
189 pthread_mutex_lock(&sc->tx_mtx);
190 while (sc->tx_in_progress) {
191 pthread_mutex_unlock(&sc->tx_mtx);
193 pthread_mutex_lock(&sc->tx_mtx);
195 pthread_mutex_unlock(&sc->tx_mtx);
199 * If the receive thread is active then stall until it is done.
202 pci_vtnet_rxwait(struct pci_vtnet_softc *sc)
205 pthread_mutex_lock(&sc->rx_mtx);
206 while (sc->rx_in_progress) {
207 pthread_mutex_unlock(&sc->rx_mtx);
209 pthread_mutex_lock(&sc->rx_mtx);
211 pthread_mutex_unlock(&sc->rx_mtx);
215 pci_vtnet_reset(void *vsc)
217 struct pci_vtnet_softc *sc = vsc;
219 DPRINTF(("vtnet: device reset requested !\n"));
224 * Wait for the transmit and receive threads to finish their
227 pci_vtnet_txwait(sc);
228 pci_vtnet_rxwait(sc);
230 sc->vsc_rx_ready = 0;
232 sc->rx_vhdrlen = sizeof(struct virtio_net_rxhdr);
234 /* now reset rings, MSI-X vectors, and negotiated capabilities */
235 vi_reset_dev(&sc->vsc_vs);
241 * Called to send a buffer chain out to the tap device
244 pci_vtnet_tap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt,
247 static char pad[60]; /* all zero bytes */
249 if (sc->vsc_tapfd == -1)
253 * If the length is < 60, pad out to that and add the
254 * extra zero'd segment to the iov. It is guaranteed that
255 * there is always an extra iov available by the caller.
258 iov[iovcnt].iov_base = pad;
259 iov[iovcnt].iov_len = 60 - len;
262 (void) writev(sc->vsc_tapfd, iov, iovcnt);
266 * Called when there is read activity on the tap file descriptor.
267 * Each buffer posted by the guest is assumed to be able to contain
268 * an entire ethernet frame + rx header.
269 * MP note: the dummybuf is only used for discarding frames, so there
270 * is no need for it to be per-vtnet or locked.
272 static uint8_t dummybuf[2048];
274 static __inline struct iovec *
275 rx_iov_trim(struct iovec *iov, int *niov, int tlen)
279 /* XXX short-cut: assume first segment is >= tlen */
280 assert(iov[0].iov_len >= tlen);
282 iov[0].iov_len -= tlen;
283 if (iov[0].iov_len == 0) {
288 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + tlen);
296 pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
298 struct iovec iov[VTNET_MAXSEGS], *riov;
299 struct vqueue_info *vq;
305 * Should never be called without a valid tap fd
307 assert(sc->vsc_tapfd != -1);
310 * But, will be called when the rx ring hasn't yet
311 * been set up or the guest is resetting the device.
313 if (!sc->vsc_rx_ready || sc->resetting) {
315 * Drop the packet and try later.
317 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
322 * Check for available rx buffers
324 vq = &sc->vsc_queues[VTNET_RXQ];
325 if (!vq_has_descs(vq)) {
327 * Drop the packet and try later. Interrupt on
328 * empty, if that's negotiated.
330 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
337 * Get descriptor chain.
339 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
340 assert(n >= 1 && n <= VTNET_MAXSEGS);
343 * Get a pointer to the rx header, and use the
344 * data immediately following it for the packet buffer.
346 vrx = iov[0].iov_base;
347 riov = rx_iov_trim(iov, &n, sc->rx_vhdrlen);
349 len = readv(sc->vsc_tapfd, riov, n);
351 if (len < 0 && errno == EWOULDBLOCK) {
353 * No more packets, but still some avail ring
354 * entries. Interrupt if needed/appropriate.
362 * The only valid field in the rx packet header is the
363 * number of buffers if merged rx bufs were negotiated.
365 memset(vrx, 0, sc->rx_vhdrlen);
368 struct virtio_net_rxhdr *vrxh;
375 * Release this chain and handle more chains.
377 vq_relchain(vq, idx, len + sc->rx_vhdrlen);
378 } while (vq_has_descs(vq));
380 /* Interrupt if needed, including for NOTIFY_ON_EMPTY. */
385 pci_vtnet_netmap_writev(struct nm_desc *nmd, struct iovec *iov, int iovcnt)
390 for (r = nmd->cur_tx_ring; ; ) {
391 struct netmap_ring *ring = NETMAP_TXRING(nmd->nifp, r);
395 if (nm_ring_empty(ring)) {
397 if (r > nmd->last_tx_ring)
398 r = nmd->first_tx_ring;
399 if (r == nmd->cur_rx_ring)
404 idx = ring->slot[cur].buf_idx;
405 buf = NETMAP_BUF(ring, idx);
407 for (i = 0; i < iovcnt; i++) {
408 memcpy(&buf[len], iov[i].iov_base, iov[i].iov_len);
409 len += iov[i].iov_len;
411 ring->slot[cur].len = len;
412 ring->head = ring->cur = nm_ring_next(ring, cur);
413 nmd->cur_tx_ring = r;
414 ioctl(nmd->fd, NIOCTXSYNC, NULL);
422 pci_vtnet_netmap_readv(struct nm_desc *nmd, struct iovec *iov, int iovcnt)
428 for (r = nmd->cur_rx_ring; ; ) {
429 struct netmap_ring *ring = NETMAP_RXRING(nmd->nifp, r);
434 if (nm_ring_empty(ring)) {
436 if (r > nmd->last_rx_ring)
437 r = nmd->first_rx_ring;
438 if (r == nmd->cur_rx_ring)
443 idx = ring->slot[cur].buf_idx;
444 buf = NETMAP_BUF(ring, idx);
445 left = ring->slot[cur].len;
447 for (i = 0; i < iovcnt && left > 0; i++) {
448 if (iov[i].iov_len > left)
449 iov[i].iov_len = left;
450 memcpy(iov[i].iov_base, &buf[len], iov[i].iov_len);
451 len += iov[i].iov_len;
452 left -= iov[i].iov_len;
454 ring->head = ring->cur = nm_ring_next(ring, cur);
455 nmd->cur_rx_ring = r;
456 ioctl(nmd->fd, NIOCRXSYNC, NULL);
459 for (; i < iovcnt; i++)
466 * Called to send a buffer chain out to the vale port
469 pci_vtnet_netmap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt,
472 static char pad[60]; /* all zero bytes */
474 if (sc->vsc_nmd == NULL)
478 * If the length is < 60, pad out to that and add the
479 * extra zero'd segment to the iov. It is guaranteed that
480 * there is always an extra iov available by the caller.
483 iov[iovcnt].iov_base = pad;
484 iov[iovcnt].iov_len = 60 - len;
487 (void) pci_vtnet_netmap_writev(sc->vsc_nmd, iov, iovcnt);
491 pci_vtnet_netmap_rx(struct pci_vtnet_softc *sc)
493 struct iovec iov[VTNET_MAXSEGS], *riov;
494 struct vqueue_info *vq;
500 * Should never be called without a valid netmap descriptor
502 assert(sc->vsc_nmd != NULL);
505 * But, will be called when the rx ring hasn't yet
506 * been set up or the guest is resetting the device.
508 if (!sc->vsc_rx_ready || sc->resetting) {
510 * Drop the packet and try later.
512 (void) nm_nextpkt(sc->vsc_nmd, (void *)dummybuf);
517 * Check for available rx buffers
519 vq = &sc->vsc_queues[VTNET_RXQ];
520 if (!vq_has_descs(vq)) {
522 * Drop the packet and try later. Interrupt on
523 * empty, if that's negotiated.
525 (void) nm_nextpkt(sc->vsc_nmd, (void *)dummybuf);
532 * Get descriptor chain.
534 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
535 assert(n >= 1 && n <= VTNET_MAXSEGS);
538 * Get a pointer to the rx header, and use the
539 * data immediately following it for the packet buffer.
541 vrx = iov[0].iov_base;
542 riov = rx_iov_trim(iov, &n, sc->rx_vhdrlen);
544 len = pci_vtnet_netmap_readv(sc->vsc_nmd, riov, n);
548 * No more packets, but still some avail ring
549 * entries. Interrupt if needed/appropriate.
556 * The only valid field in the rx packet header is the
557 * number of buffers if merged rx bufs were negotiated.
559 memset(vrx, 0, sc->rx_vhdrlen);
562 struct virtio_net_rxhdr *vrxh;
569 * Release this chain and handle more chains.
571 vq_relchain(vq, idx, len + sc->rx_vhdrlen);
572 } while (vq_has_descs(vq));
574 /* Interrupt if needed, including for NOTIFY_ON_EMPTY. */
579 pci_vtnet_rx_callback(int fd, enum ev_type type, void *param)
581 struct pci_vtnet_softc *sc = param;
583 pthread_mutex_lock(&sc->rx_mtx);
584 sc->rx_in_progress = 1;
585 sc->pci_vtnet_rx(sc);
586 sc->rx_in_progress = 0;
587 pthread_mutex_unlock(&sc->rx_mtx);
592 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq)
594 struct pci_vtnet_softc *sc = vsc;
597 * A qnotify means that the rx process can now begin
599 if (sc->vsc_rx_ready == 0) {
600 sc->vsc_rx_ready = 1;
601 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY;
606 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq)
608 struct iovec iov[VTNET_MAXSEGS + 1];
614 * Obtain chain of descriptors. The first one is
615 * really the header descriptor, so we need to sum
616 * up two lengths: packet length and transfer length.
618 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
619 assert(n >= 1 && n <= VTNET_MAXSEGS);
621 tlen = iov[0].iov_len;
622 for (i = 1; i < n; i++) {
623 plen += iov[i].iov_len;
624 tlen += iov[i].iov_len;
627 DPRINTF(("virtio: packet send, %d bytes, %d segs\n\r", plen, n));
628 sc->pci_vtnet_tx(sc, &iov[1], n - 1, plen);
630 /* chain is processed, release it and set tlen */
631 vq_relchain(vq, idx, tlen);
635 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq)
637 struct pci_vtnet_softc *sc = vsc;
640 * Any ring entries to process?
642 if (!vq_has_descs(vq))
645 /* Signal the tx thread for processing */
646 pthread_mutex_lock(&sc->tx_mtx);
647 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY;
648 if (sc->tx_in_progress == 0)
649 pthread_cond_signal(&sc->tx_cond);
650 pthread_mutex_unlock(&sc->tx_mtx);
654 * Thread which will handle processing of TX desc
657 pci_vtnet_tx_thread(void *param)
659 struct pci_vtnet_softc *sc = param;
660 struct vqueue_info *vq;
663 vq = &sc->vsc_queues[VTNET_TXQ];
666 * Let us wait till the tx queue pointers get initialised &
669 pthread_mutex_lock(&sc->tx_mtx);
670 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx);
674 /* note - tx mutex is locked here */
675 while (sc->resetting || !vq_has_descs(vq)) {
676 vq->vq_used->vu_flags &= ~VRING_USED_F_NO_NOTIFY;
678 if (!sc->resetting && vq_has_descs(vq))
681 sc->tx_in_progress = 0;
682 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx);
685 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY;
686 sc->tx_in_progress = 1;
687 pthread_mutex_unlock(&sc->tx_mtx);
691 * Run through entries, placing them into
692 * iovecs and sending when an end-of-packet
695 pci_vtnet_proctx(sc, vq);
696 } while (vq_has_descs(vq));
699 * Generate an interrupt if needed.
703 pthread_mutex_lock(&sc->tx_mtx);
709 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq)
712 DPRINTF(("vtnet: control qnotify!\n\r"));
717 pci_vtnet_parsemac(char *mac_str, uint8_t *mac_addr)
719 struct ether_addr *ea;
721 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
723 tmpstr = strsep(&mac_str,"=");
725 if ((mac_str != NULL) && (!strcmp(tmpstr,"mac"))) {
726 ea = ether_aton(mac_str);
728 if (ea == NULL || ETHER_IS_MULTICAST(ea->octet) ||
729 memcmp(ea->octet, zero_addr, ETHER_ADDR_LEN) == 0) {
730 fprintf(stderr, "Invalid MAC %s\n", mac_str);
733 memcpy(mac_addr, ea->octet, ETHER_ADDR_LEN);
740 pci_vtnet_tap_setup(struct pci_vtnet_softc *sc, char *devname)
744 strcpy(tbuf, "/dev/");
745 strlcat(tbuf, devname, sizeof(tbuf));
747 sc->pci_vtnet_rx = pci_vtnet_tap_rx;
748 sc->pci_vtnet_tx = pci_vtnet_tap_tx;
750 sc->vsc_tapfd = open(tbuf, O_RDWR);
751 if (sc->vsc_tapfd == -1) {
752 WPRINTF(("open of tap device %s failed\n", tbuf));
757 * Set non-blocking and register for read
758 * notifications with the event loop
761 if (ioctl(sc->vsc_tapfd, FIONBIO, &opt) < 0) {
762 WPRINTF(("tap device O_NONBLOCK failed\n"));
763 close(sc->vsc_tapfd);
767 sc->vsc_mevp = mevent_add(sc->vsc_tapfd,
769 pci_vtnet_rx_callback,
771 if (sc->vsc_mevp == NULL) {
772 WPRINTF(("Could not register event\n"));
773 close(sc->vsc_tapfd);
779 pci_vtnet_netmap_setup(struct pci_vtnet_softc *sc, char *ifname)
781 sc->pci_vtnet_rx = pci_vtnet_netmap_rx;
782 sc->pci_vtnet_tx = pci_vtnet_netmap_tx;
784 sc->vsc_nmd = nm_open(ifname, NULL, 0, 0);
785 if (sc->vsc_nmd == NULL) {
786 WPRINTF(("open of netmap device %s failed\n", ifname));
790 sc->vsc_mevp = mevent_add(sc->vsc_nmd->fd,
792 pci_vtnet_rx_callback,
794 if (sc->vsc_mevp == NULL) {
795 WPRINTF(("Could not register event\n"));
796 nm_close(sc->vsc_nmd);
802 pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
805 unsigned char digest[16];
807 char tname[MAXCOMLEN + 1];
808 struct pci_vtnet_softc *sc;
813 sc = calloc(1, sizeof(struct pci_vtnet_softc));
815 pthread_mutex_init(&sc->vsc_mtx, NULL);
817 vi_softc_linkup(&sc->vsc_vs, &vtnet_vi_consts, sc, pi, sc->vsc_queues);
818 sc->vsc_vs.vs_mtx = &sc->vsc_mtx;
820 sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ;
821 sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq;
822 sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ;
823 sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq;
825 sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ;
826 sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq;
830 * Attempt to open the tap device and read the MAC address
839 devname = vtopts = strdup(opts);
840 (void) strsep(&vtopts, ",");
842 if (vtopts != NULL) {
843 err = pci_vtnet_parsemac(vtopts, sc->vsc_config.mac);
851 if (strncmp(devname, "vale", 4) == 0)
852 pci_vtnet_netmap_setup(sc, devname);
853 if ((strncmp(devname, "tap", 3) == 0) ||
854 (strncmp(devname, "vmnet", 5) == 0))
855 pci_vtnet_tap_setup(sc, devname);
861 * The default MAC address is the standard NetApp OUI of 00-a0-98,
862 * followed by an MD5 of the PCI slot/func number and dev name
865 snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot,
866 pi->pi_func, vmname);
869 MD5Update(&mdctx, nstr, strlen(nstr));
870 MD5Final(digest, &mdctx);
872 sc->vsc_config.mac[0] = 0x00;
873 sc->vsc_config.mac[1] = 0xa0;
874 sc->vsc_config.mac[2] = 0x98;
875 sc->vsc_config.mac[3] = digest[0];
876 sc->vsc_config.mac[4] = digest[1];
877 sc->vsc_config.mac[5] = digest[2];
880 /* initialize config space */
881 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET);
882 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
883 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
884 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET);
885 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
887 /* Link is up if we managed to open tap device. */
888 sc->vsc_config.status = (opts == NULL || sc->vsc_tapfd >= 0);
890 /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */
891 if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix()))
894 /* use BAR 0 to map config regs in IO space */
895 vi_set_io_bar(&sc->vsc_vs, 0);
900 sc->rx_vhdrlen = sizeof(struct virtio_net_rxhdr);
901 sc->rx_in_progress = 0;
902 pthread_mutex_init(&sc->rx_mtx, NULL);
905 * Initialize tx semaphore & spawn TX processing thread.
906 * As of now, only one thread for TX desc processing is
909 sc->tx_in_progress = 0;
910 pthread_mutex_init(&sc->tx_mtx, NULL);
911 pthread_cond_init(&sc->tx_cond, NULL);
912 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc);
913 snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot,
915 pthread_set_name_np(sc->tx_tid, tname);
921 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value)
923 struct pci_vtnet_softc *sc = vsc;
927 assert(offset + size <= 6);
929 * The driver is allowed to change the MAC address
931 ptr = &sc->vsc_config.mac[offset];
932 memcpy(ptr, &value, size);
934 /* silently ignore other writes */
935 DPRINTF(("vtnet: write to readonly reg %d\n\r", offset));
942 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval)
944 struct pci_vtnet_softc *sc = vsc;
947 ptr = (uint8_t *)&sc->vsc_config + offset;
948 memcpy(retval, ptr, size);
953 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features)
955 struct pci_vtnet_softc *sc = vsc;
957 sc->vsc_features = negotiated_features;
959 if (!(sc->vsc_features & VIRTIO_NET_F_MRG_RXBUF)) {
961 /* non-merge rx header is 2 bytes shorter */
966 struct pci_devemu pci_de_vnet = {
967 .pe_emu = "virtio-net",
968 .pe_init = pci_vtnet_init,
969 .pe_barwrite = vi_pci_write,
970 .pe_barread = vi_pci_read
972 PCI_EMUL_SET(pci_de_vnet);