2 * Copyright (c) 2004-2006 Kip Macy
3 * Copyright (c) 2015 Wei Liu <wei.liu2@citrix.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
34 #include <sys/param.h>
35 #include <sys/sockio.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/kernel.h>
41 #include <sys/socket.h>
42 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
46 #include <net/if_var.h>
47 #include <net/if_arp.h>
48 #include <net/ethernet.h>
49 #include <net/if_media.h>
51 #include <net/if_types.h>
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/if_ether.h>
56 #include <netinet/tcp.h>
57 #include <netinet/tcp_lro.h>
64 #include <xen/xen-os.h>
65 #include <xen/hypervisor.h>
66 #include <xen/xen_intr.h>
67 #include <xen/gnttab.h>
68 #include <xen/interface/memory.h>
69 #include <xen/interface/io/netif.h>
70 #include <xen/xenbus/xenbusvar.h>
72 #include "xenbus_if.h"
74 /* Features supported by all backends. TSO and LRO can be negotiated */
75 #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
77 #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE)
78 #define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE)
80 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
83 * Should the driver do LRO on the RX end
84 * this can be toggled on the fly, but the
85 * interface must be reset (down/up) for it
88 static int xn_enable_lro = 1;
89 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
92 * Number of pairs of queues.
94 static unsigned long xn_num_queues = 4;
95 TUNABLE_ULONG("hw.xn.num_queues", &xn_num_queues);
98 * \brief The maximum allowed data fragments in a single transmit
101 * This limit is imposed by the backend driver. We assume here that
102 * we are dealing with a Linux driver domain and have set our limit
103 * to mirror the Linux MAX_SKB_FRAGS constant.
105 #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
107 #define RX_COPY_THRESHOLD 256
109 #define net_ratelimit() 0
113 struct netfront_info;
114 struct netfront_rx_info;
116 static void xn_txeof(struct netfront_txq *);
117 static void xn_rxeof(struct netfront_rxq *);
118 static void xn_alloc_rx_buffers(struct netfront_rxq *);
119 static void xn_alloc_rx_buffers_callout(void *arg);
121 static void xn_release_rx_bufs(struct netfront_rxq *);
122 static void xn_release_tx_bufs(struct netfront_txq *);
124 static void xn_rxq_intr(struct netfront_rxq *);
125 static void xn_txq_intr(struct netfront_txq *);
126 static void xn_intr(void *);
127 static inline int xn_count_frags(struct mbuf *m);
128 static int xn_assemble_tx_request(struct netfront_txq *, struct mbuf *);
129 static int xn_ioctl(struct ifnet *, u_long, caddr_t);
130 static void xn_ifinit_locked(struct netfront_info *);
131 static void xn_ifinit(void *);
132 static void xn_stop(struct netfront_info *);
133 static void xn_query_features(struct netfront_info *np);
134 static int xn_configure_features(struct netfront_info *np);
135 static void netif_free(struct netfront_info *info);
136 static int netfront_detach(device_t dev);
138 static int xn_txq_mq_start_locked(struct netfront_txq *, struct mbuf *);
139 static int xn_txq_mq_start(struct ifnet *, struct mbuf *);
141 static int talk_to_backend(device_t dev, struct netfront_info *info);
142 static int create_netdev(device_t dev);
143 static void netif_disconnect_backend(struct netfront_info *info);
144 static int setup_device(device_t dev, struct netfront_info *info,
146 static int xn_ifmedia_upd(struct ifnet *ifp);
147 static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
149 static int xn_connect(struct netfront_info *);
150 static void xn_kick_rings(struct netfront_info *);
152 static int xn_get_responses(struct netfront_rxq *,
153 struct netfront_rx_info *, RING_IDX, RING_IDX *,
156 #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT)
158 #define INVALID_P2M_ENTRY (~0UL)
159 #define XN_QUEUE_NAME_LEN 8 /* xn{t,r}x_%u, allow for two digits */
160 struct netfront_rxq {
161 struct netfront_info *info;
163 char name[XN_QUEUE_NAME_LEN];
167 netif_rx_front_ring_t ring;
168 xen_intr_handle_t xen_intr_handle;
170 grant_ref_t gref_head;
171 grant_ref_t grant_ref[NET_RX_RING_SIZE + 1];
173 struct mbuf *mbufs[NET_RX_RING_SIZE + 1];
177 struct callout rx_refill;
180 struct netfront_txq {
181 struct netfront_info *info;
183 char name[XN_QUEUE_NAME_LEN];
187 netif_tx_front_ring_t ring;
188 xen_intr_handle_t xen_intr_handle;
190 grant_ref_t gref_head;
191 grant_ref_t grant_ref[NET_TX_RING_SIZE + 1];
193 struct mbuf *mbufs[NET_TX_RING_SIZE + 1];
197 struct taskqueue *tq;
198 struct task defrtask;
203 struct netfront_info {
204 struct ifnet *xn_ifp;
209 struct netfront_rxq *rxq;
210 struct netfront_txq *txq;
216 uint8_t mac[ETHER_ADDR_LEN];
220 struct ifmedia sc_media;
225 struct netfront_rx_info {
226 struct netif_rx_response rx;
227 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
230 #define XN_RX_LOCK(_q) mtx_lock(&(_q)->lock)
231 #define XN_RX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
233 #define XN_TX_LOCK(_q) mtx_lock(&(_q)->lock)
234 #define XN_TX_TRYLOCK(_q) mtx_trylock(&(_q)->lock)
235 #define XN_TX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
237 #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock);
238 #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock);
240 #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED);
241 #define XN_RX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
242 #define XN_TX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
244 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
245 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
246 #define netfront_carrier_ok(netif) ((netif)->carrier)
248 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
251 add_id_to_freelist(struct mbuf **list, uintptr_t id)
255 ("%s: the head item (0) must always be free.", __func__));
257 list[0] = (struct mbuf *)id;
260 static inline unsigned short
261 get_id_from_freelist(struct mbuf **list)
265 id = (uintptr_t)list[0];
267 ("%s: the head item (0) must always remain free.", __func__));
273 xn_rxidx(RING_IDX idx)
276 return idx & (NET_RX_RING_SIZE - 1);
279 static inline struct mbuf *
280 xn_get_rx_mbuf(struct netfront_rxq *rxq, RING_IDX ri)
287 rxq->mbufs[i] = NULL;
291 static inline grant_ref_t
292 xn_get_rx_ref(struct netfront_rxq *rxq, RING_IDX ri)
294 int i = xn_rxidx(ri);
295 grant_ref_t ref = rxq->grant_ref[i];
297 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
298 rxq->grant_ref[i] = GRANT_REF_INVALID;
302 #define IPRINTK(fmt, args...) \
303 printf("[XEN] " fmt, ##args)
305 #define WPRINTK(fmt, args...) \
306 printf("[XEN] " fmt, ##args)
308 #define WPRINTK(fmt, args...)
311 #define DPRINTK(fmt, args...) \
312 printf("[XEN] %s: " fmt, __func__, ##args)
314 #define DPRINTK(fmt, args...)
318 * Read the 'mac' node at the given device's node in the store, and parse that
319 * as colon-separated octets, placing result the given mac array. mac must be
320 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
321 * Return 0 on success, or errno on error.
324 xen_net_read_mac(device_t dev, uint8_t mac[])
327 char *s, *e, *macstr;
330 path = xenbus_get_node(dev);
331 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
332 if (error == ENOENT) {
334 * Deal with missing mac XenStore nodes on devices with
335 * HVM emulation (the 'ioemu' configuration attribute)
338 * The HVM emulator may execute in a stub device model
339 * domain which lacks the permission, only given to Dom0,
340 * to update the guest's XenStore tree. For this reason,
341 * the HVM emulator doesn't even attempt to write the
342 * front-side mac node, even when operating in Dom0.
343 * However, there should always be a mac listed in the
344 * backend tree. Fallback to this version if our query
345 * of the front side XenStore location doesn't find
348 path = xenbus_get_otherend_path(dev);
349 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
352 xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
357 for (i = 0; i < ETHER_ADDR_LEN; i++) {
358 mac[i] = strtoul(s, &e, 16);
359 if (s == e || (e[0] != ':' && e[0] != 0)) {
360 free(macstr, M_XENBUS);
365 free(macstr, M_XENBUS);
370 * Entry point to this code when a new device is created. Allocate the basic
371 * structures and the ring buffers for communication with the backend, and
372 * inform the backend of the appropriate details for those. Switch to
376 netfront_probe(device_t dev)
379 if (xen_hvm_domain() && xen_disable_pv_nics != 0)
382 if (!strcmp(xenbus_get_type(dev), "vif")) {
383 device_set_desc(dev, "Virtual Network Interface");
391 netfront_attach(device_t dev)
395 err = create_netdev(dev);
397 xenbus_dev_fatal(dev, err, "creating netdev");
401 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
402 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
403 OID_AUTO, "enable_lro", CTLFLAG_RW,
404 &xn_enable_lro, 0, "Large Receive Offload");
406 SYSCTL_ADD_ULONG(device_get_sysctl_ctx(dev),
407 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408 OID_AUTO, "num_queues", CTLFLAG_RD,
409 &xn_num_queues, "Number of pairs of queues");
415 netfront_suspend(device_t dev)
417 struct netfront_info *np = device_get_softc(dev);
420 for (i = 0; i < np->num_queues; i++) {
421 XN_RX_LOCK(&np->rxq[i]);
422 XN_TX_LOCK(&np->txq[i]);
424 netfront_carrier_off(np);
425 for (i = 0; i < np->num_queues; i++) {
426 XN_RX_UNLOCK(&np->rxq[i]);
427 XN_TX_UNLOCK(&np->txq[i]);
433 * We are reconnecting to the backend, due to a suspend/resume, or a backend
434 * driver restart. We tear down our netif structure and recreate it, but
435 * leave the device-layer structures intact so that this is transparent to the
436 * rest of the kernel.
439 netfront_resume(device_t dev)
441 struct netfront_info *info = device_get_softc(dev);
444 if (xen_suspend_cancelled) {
445 for (i = 0; i < info->num_queues; i++) {
446 XN_RX_LOCK(&info->rxq[i]);
447 XN_TX_LOCK(&info->txq[i]);
449 netfront_carrier_on(info);
450 for (i = 0; i < info->num_queues; i++) {
451 XN_RX_UNLOCK(&info->rxq[i]);
452 XN_TX_UNLOCK(&info->txq[i]);
457 netif_disconnect_backend(info);
462 write_queue_xenstore_keys(device_t dev,
463 struct netfront_rxq *rxq,
464 struct netfront_txq *txq,
465 struct xs_transaction *xst, bool hierarchy)
469 const char *node = xenbus_get_node(dev);
473 KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids"));
474 /* Split event channel support is not yet there. */
475 KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle,
476 ("Split event channels are not supported"));
479 path_size = strlen(node) + 10;
480 path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO);
481 snprintf(path, path_size, "%s/queue-%u", node, rxq->id);
483 path_size = strlen(node) + 1;
484 path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO);
485 snprintf(path, path_size, "%s", node);
488 err = xs_printf(*xst, path, "tx-ring-ref","%u", txq->ring_ref);
490 message = "writing tx ring-ref";
493 err = xs_printf(*xst, path, "rx-ring-ref","%u", rxq->ring_ref);
495 message = "writing rx ring-ref";
498 err = xs_printf(*xst, path, "event-channel", "%u",
499 xen_intr_port(rxq->xen_intr_handle));
501 message = "writing event-channel";
505 free(path, M_DEVBUF);
510 free(path, M_DEVBUF);
511 xenbus_dev_fatal(dev, err, "%s", message);
516 /* Common code used when first setting up, and when resuming. */
518 talk_to_backend(device_t dev, struct netfront_info *info)
521 struct xs_transaction xst;
522 const char *node = xenbus_get_node(dev);
524 unsigned long num_queues, max_queues = 0;
527 err = xen_net_read_mac(dev, info->mac);
529 xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
533 err = xs_scanf(XST_NIL, xenbus_get_otherend_path(info->xbdev),
534 "multi-queue-max-queues", NULL, "%lu", &max_queues);
537 num_queues = xn_num_queues;
538 if (num_queues > max_queues)
539 num_queues = max_queues;
541 err = setup_device(dev, info, num_queues);
546 err = xs_transaction_start(&xst);
548 xenbus_dev_fatal(dev, err, "starting transaction");
552 if (info->num_queues == 1) {
553 err = write_queue_xenstore_keys(dev, &info->rxq[0],
554 &info->txq[0], &xst, false);
556 goto abort_transaction_no_def_error;
558 err = xs_printf(xst, node, "multi-queue-num-queues",
559 "%u", info->num_queues);
561 message = "writing multi-queue-num-queues";
562 goto abort_transaction;
565 for (i = 0; i < info->num_queues; i++) {
566 err = write_queue_xenstore_keys(dev, &info->rxq[i],
567 &info->txq[i], &xst, true);
569 goto abort_transaction_no_def_error;
573 err = xs_printf(xst, node, "request-rx-copy", "%u", 1);
575 message = "writing request-rx-copy";
576 goto abort_transaction;
578 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
580 message = "writing feature-rx-notify";
581 goto abort_transaction;
583 err = xs_printf(xst, node, "feature-sg", "%d", 1);
585 message = "writing feature-sg";
586 goto abort_transaction;
588 if ((info->xn_ifp->if_capenable & IFCAP_LRO) != 0) {
589 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
591 message = "writing feature-gso-tcpv4";
592 goto abort_transaction;
595 if ((info->xn_ifp->if_capenable & IFCAP_RXCSUM) == 0) {
596 err = xs_printf(xst, node, "feature-no-csum-offload", "%d", 1);
598 message = "writing feature-no-csum-offload";
599 goto abort_transaction;
603 err = xs_transaction_end(xst, 0);
607 xenbus_dev_fatal(dev, err, "completing transaction");
614 xenbus_dev_fatal(dev, err, "%s", message);
615 abort_transaction_no_def_error:
616 xs_transaction_end(xst, 1);
624 xn_rxq_intr(struct netfront_rxq *rxq)
633 xn_txq_start(struct netfront_txq *txq)
635 struct netfront_info *np = txq->info;
636 struct ifnet *ifp = np->xn_ifp;
638 XN_TX_LOCK_ASSERT(txq);
639 if (!drbr_empty(ifp, txq->br))
640 xn_txq_mq_start_locked(txq, NULL);
644 xn_txq_intr(struct netfront_txq *txq)
648 if (RING_HAS_UNCONSUMED_RESPONSES(&txq->ring))
655 xn_txq_tq_deferred(void *xtxq, int pending)
657 struct netfront_txq *txq = xtxq;
665 disconnect_rxq(struct netfront_rxq *rxq)
668 xn_release_rx_bufs(rxq);
669 gnttab_free_grant_references(rxq->gref_head);
670 gnttab_end_foreign_access(rxq->ring_ref, NULL);
672 * No split event channel support at the moment, handle will
673 * be unbound in tx. So no need to call xen_intr_unbind here,
674 * but we do want to reset the handler to 0.
676 rxq->xen_intr_handle = 0;
680 destroy_rxq(struct netfront_rxq *rxq)
683 callout_drain(&rxq->rx_refill);
684 free(rxq->ring.sring, M_DEVBUF);
688 destroy_rxqs(struct netfront_info *np)
692 for (i = 0; i < np->num_queues; i++)
693 destroy_rxq(&np->rxq[i]);
695 free(np->rxq, M_DEVBUF);
700 setup_rxqs(device_t dev, struct netfront_info *info,
701 unsigned long num_queues)
705 netif_rx_sring_t *rxs;
706 struct netfront_rxq *rxq;
708 info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues,
709 M_DEVBUF, M_WAITOK|M_ZERO);
711 for (q = 0; q < num_queues; q++) {
716 rxq->ring_ref = GRANT_REF_INVALID;
717 rxq->ring.sring = NULL;
718 snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q);
719 mtx_init(&rxq->lock, rxq->name, "netfront receive lock",
722 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
723 rxq->mbufs[i] = NULL;
724 rxq->grant_ref[i] = GRANT_REF_INVALID;
727 /* Start resources allocation */
729 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
730 &rxq->gref_head) != 0) {
731 device_printf(dev, "allocating rx gref");
736 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF,
738 SHARED_RING_INIT(rxs);
739 FRONT_RING_INIT(&rxq->ring, rxs, PAGE_SIZE);
741 error = xenbus_grant_ring(dev, virt_to_mfn(rxs),
744 device_printf(dev, "granting rx ring page");
745 goto fail_grant_ring;
748 callout_init(&rxq->rx_refill, 1);
754 gnttab_free_grant_references(rxq->gref_head);
755 free(rxq->ring.sring, M_DEVBUF);
757 for (; q >= 0; q--) {
758 disconnect_rxq(&info->rxq[q]);
759 destroy_rxq(&info->rxq[q]);
762 free(info->rxq, M_DEVBUF);
767 disconnect_txq(struct netfront_txq *txq)
770 xn_release_tx_bufs(txq);
771 gnttab_free_grant_references(txq->gref_head);
772 gnttab_end_foreign_access(txq->ring_ref, NULL);
773 xen_intr_unbind(&txq->xen_intr_handle);
777 destroy_txq(struct netfront_txq *txq)
780 free(txq->ring.sring, M_DEVBUF);
781 buf_ring_free(txq->br, M_DEVBUF);
782 taskqueue_drain_all(txq->tq);
783 taskqueue_free(txq->tq);
787 destroy_txqs(struct netfront_info *np)
791 for (i = 0; i < np->num_queues; i++)
792 destroy_txq(&np->txq[i]);
794 free(np->txq, M_DEVBUF);
799 setup_txqs(device_t dev, struct netfront_info *info,
800 unsigned long num_queues)
804 netif_tx_sring_t *txs;
805 struct netfront_txq *txq;
807 info->txq = malloc(sizeof(struct netfront_txq) * num_queues,
808 M_DEVBUF, M_WAITOK|M_ZERO);
810 for (q = 0; q < num_queues; q++) {
816 txq->ring_ref = GRANT_REF_INVALID;
817 txq->ring.sring = NULL;
819 snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q);
821 mtx_init(&txq->lock, txq->name, "netfront transmit lock",
824 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
825 txq->mbufs[i] = (void *) ((u_long) i+1);
826 txq->grant_ref[i] = GRANT_REF_INVALID;
828 txq->mbufs[NET_TX_RING_SIZE] = (void *)0;
830 /* Start resources allocation. */
832 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
833 &txq->gref_head) != 0) {
834 device_printf(dev, "failed to allocate tx grant refs\n");
839 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF,
841 SHARED_RING_INIT(txs);
842 FRONT_RING_INIT(&txq->ring, txs, PAGE_SIZE);
844 error = xenbus_grant_ring(dev, virt_to_mfn(txs),
847 device_printf(dev, "failed to grant tx ring\n");
848 goto fail_grant_ring;
851 txq->br = buf_ring_alloc(NET_TX_RING_SIZE, M_DEVBUF,
852 M_WAITOK, &txq->lock);
853 TASK_INIT(&txq->defrtask, 0, xn_txq_tq_deferred, txq);
855 txq->tq = taskqueue_create(txq->name, M_WAITOK,
856 taskqueue_thread_enqueue, &txq->tq);
858 error = taskqueue_start_threads(&txq->tq, 1, PI_NET,
859 "%s txq %d", device_get_nameunit(dev), txq->id);
861 device_printf(dev, "failed to start tx taskq %d\n",
863 goto fail_start_thread;
866 error = xen_intr_alloc_and_bind_local_port(dev,
867 xenbus_get_otherend_id(dev), /* filter */ NULL, xn_intr,
868 &info->txq[q], INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY,
869 &txq->xen_intr_handle);
872 device_printf(dev, "xen_intr_alloc_and_bind_local_port failed\n");
880 taskqueue_drain_all(txq->tq);
882 buf_ring_free(txq->br, M_DEVBUF);
883 taskqueue_free(txq->tq);
884 gnttab_end_foreign_access(txq->ring_ref, NULL);
886 gnttab_free_grant_references(txq->gref_head);
887 free(txq->ring.sring, M_DEVBUF);
889 for (; q >= 0; q--) {
890 disconnect_txq(&info->txq[q]);
891 destroy_txq(&info->txq[q]);
894 free(info->txq, M_DEVBUF);
899 setup_device(device_t dev, struct netfront_info *info,
900 unsigned long num_queues)
911 info->num_queues = 0;
913 error = setup_rxqs(dev, info, num_queues);
916 error = setup_txqs(dev, info, num_queues);
920 info->num_queues = num_queues;
922 /* No split event channel at the moment. */
923 for (q = 0; q < num_queues; q++)
924 info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle;
929 KASSERT(error != 0, ("Error path taken without providing an error code"));
935 * If this interface has an ipv4 address, send an arp for it. This
936 * helps to get the network going again after migrating hosts.
939 netfront_send_fake_arp(device_t dev, struct netfront_info *info)
945 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
946 if (ifa->ifa_addr->sa_family == AF_INET) {
947 arp_ifinit(ifp, ifa);
954 * Callback received when the backend's state changes.
957 netfront_backend_changed(device_t dev, XenbusState newstate)
959 struct netfront_info *sc = device_get_softc(dev);
961 DPRINTK("newstate=%d\n", newstate);
964 case XenbusStateInitialising:
965 case XenbusStateInitialised:
966 case XenbusStateUnknown:
967 case XenbusStateReconfigured:
968 case XenbusStateReconfiguring:
970 case XenbusStateInitWait:
971 if (xenbus_get_state(dev) != XenbusStateInitialising)
973 if (xn_connect(sc) != 0)
975 /* Switch to connected state before kicking the rings. */
976 xenbus_set_state(sc->xbdev, XenbusStateConnected);
979 case XenbusStateClosing:
980 xenbus_set_state(dev, XenbusStateClosed);
982 case XenbusStateClosed:
984 netif_disconnect_backend(sc);
985 xenbus_set_state(dev, XenbusStateInitialising);
986 sc->xn_reset = false;
989 case XenbusStateConnected:
991 netfront_send_fake_arp(dev, sc);
998 * \brief Verify that there is sufficient space in the Tx ring
999 * buffer for a maximally sized request to be enqueued.
1001 * A transmit request requires a transmit descriptor for each packet
1002 * fragment, plus up to 2 entries for "options" (e.g. TSO).
1005 xn_tx_slot_available(struct netfront_txq *txq)
1008 return (RING_FREE_REQUESTS(&txq->ring) > (MAX_TX_REQ_FRAGS + 2));
1012 xn_release_tx_bufs(struct netfront_txq *txq)
1016 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
1022 * We assume that no kernel addresses are
1023 * less than NET_TX_RING_SIZE. Any entry
1024 * in the table that is below this number
1025 * must be an index from free-list tracking.
1027 if (((uintptr_t)m) <= NET_TX_RING_SIZE)
1029 gnttab_end_foreign_access_ref(txq->grant_ref[i]);
1030 gnttab_release_grant_reference(&txq->gref_head,
1032 txq->grant_ref[i] = GRANT_REF_INVALID;
1033 add_id_to_freelist(txq->mbufs, i);
1035 if (txq->mbufs_cnt < 0) {
1036 panic("%s: tx_chain_cnt must be >= 0", __func__);
1042 static struct mbuf *
1043 xn_alloc_one_rx_buffer(struct netfront_rxq *rxq)
1047 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1050 m->m_len = m->m_pkthdr.len = MJUMPAGESIZE;
1056 xn_alloc_rx_buffers(struct netfront_rxq *rxq)
1061 XN_RX_LOCK_ASSERT(rxq);
1063 if (__predict_false(rxq->info->carrier == 0))
1066 for (req_prod = rxq->ring.req_prod_pvt;
1067 req_prod - rxq->ring.rsp_cons < NET_RX_RING_SIZE;
1072 struct netif_rx_request *req;
1075 m = xn_alloc_one_rx_buffer(rxq);
1079 id = xn_rxidx(req_prod);
1081 KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain"));
1084 ref = gnttab_claim_grant_reference(&rxq->gref_head);
1085 KASSERT(ref != GNTTAB_LIST_END,
1086 ("reserved grant references exhuasted"));
1087 rxq->grant_ref[id] = ref;
1089 pfn = atop(vtophys(mtod(m, vm_offset_t)));
1090 req = RING_GET_REQUEST(&rxq->ring, req_prod);
1092 gnttab_grant_foreign_access_ref(ref,
1093 xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0);
1098 rxq->ring.req_prod_pvt = req_prod;
1100 /* Not enough requests? Try again later. */
1101 if (req_prod - rxq->ring.rsp_cons < NET_RX_SLOTS_MIN) {
1102 callout_reset_curcpu(&rxq->rx_refill, hz/10,
1103 xn_alloc_rx_buffers_callout, rxq);
1107 wmb(); /* barrier so backend seens requests */
1109 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify);
1111 xen_intr_signal(rxq->xen_intr_handle);
1114 static void xn_alloc_rx_buffers_callout(void *arg)
1116 struct netfront_rxq *rxq;
1118 rxq = (struct netfront_rxq *)arg;
1120 xn_alloc_rx_buffers(rxq);
1125 xn_release_rx_bufs(struct netfront_rxq *rxq)
1130 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1136 ref = rxq->grant_ref[i];
1137 if (ref == GRANT_REF_INVALID)
1140 gnttab_end_foreign_access_ref(ref);
1141 gnttab_release_grant_reference(&rxq->gref_head, ref);
1142 rxq->mbufs[i] = NULL;
1143 rxq->grant_ref[i] = GRANT_REF_INVALID;
1149 xn_rxeof(struct netfront_rxq *rxq)
1152 struct netfront_info *np = rxq->info;
1153 #if (defined(INET) || defined(INET6))
1154 struct lro_ctrl *lro = &rxq->lro;
1156 struct netfront_rx_info rinfo;
1157 struct netif_rx_response *rx = &rinfo.rx;
1158 struct netif_extra_info *extras = rinfo.extras;
1161 struct mbufq mbufq_rxq, mbufq_errq;
1162 int err, work_to_do;
1164 XN_RX_LOCK_ASSERT(rxq);
1166 if (!netfront_carrier_ok(np))
1169 /* XXX: there should be some sane limit. */
1170 mbufq_init(&mbufq_errq, INT_MAX);
1171 mbufq_init(&mbufq_rxq, INT_MAX);
1176 rp = rxq->ring.sring->rsp_prod;
1177 rmb(); /* Ensure we see queued responses up to 'rp'. */
1179 i = rxq->ring.rsp_cons;
1181 memcpy(rx, RING_GET_RESPONSE(&rxq->ring, i), sizeof(*rx));
1182 memset(extras, 0, sizeof(rinfo.extras));
1185 err = xn_get_responses(rxq, &rinfo, rp, &i, &m);
1187 if (__predict_false(err)) {
1189 (void )mbufq_enqueue(&mbufq_errq, m);
1190 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1194 m->m_pkthdr.rcvif = ifp;
1195 if (rx->flags & NETRXF_data_validated) {
1197 * According to mbuf(9) the correct way to tell
1198 * the stack that the checksum of an inbound
1199 * packet is correct, without it actually being
1200 * present (because the underlying interface
1201 * doesn't provide it), is to set the
1202 * CSUM_DATA_VALID and CSUM_PSEUDO_HDR flags,
1203 * and the csum_data field to 0xffff.
1205 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
1207 m->m_pkthdr.csum_data = 0xffff;
1209 if ((rx->flags & NETRXF_extra_info) != 0 &&
1210 (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type ==
1211 XEN_NETIF_EXTRA_TYPE_GSO)) {
1212 m->m_pkthdr.tso_segsz =
1213 extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].u.gso.size;
1214 m->m_pkthdr.csum_flags |= CSUM_TSO;
1217 (void )mbufq_enqueue(&mbufq_rxq, m);
1220 rxq->ring.rsp_cons = i;
1222 xn_alloc_rx_buffers(rxq);
1224 RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do);
1225 } while (work_to_do);
1227 mbufq_drain(&mbufq_errq);
1229 * Process all the mbufs after the remapping is complete.
1230 * Break the mbuf chain first though.
1232 while ((m = mbufq_dequeue(&mbufq_rxq)) != NULL) {
1233 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1234 #if (defined(INET) || defined(INET6))
1235 /* Use LRO if possible */
1236 if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
1237 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
1239 * If LRO fails, pass up to the stack
1242 (*ifp->if_input)(ifp, m);
1245 (*ifp->if_input)(ifp, m);
1249 #if (defined(INET) || defined(INET6))
1251 * Flush any outstanding LRO work
1253 tcp_lro_flush_all(lro);
1258 xn_txeof(struct netfront_txq *txq)
1263 netif_tx_response_t *txr;
1265 struct netfront_info *np = txq->info;
1267 XN_TX_LOCK_ASSERT(txq);
1269 if (!netfront_carrier_ok(np))
1275 prod = txq->ring.sring->rsp_prod;
1276 rmb(); /* Ensure we see responses up to 'rp'. */
1278 for (i = txq->ring.rsp_cons; i != prod; i++) {
1279 txr = RING_GET_RESPONSE(&txq->ring, i);
1280 if (txr->status == NETIF_RSP_NULL)
1283 if (txr->status != NETIF_RSP_OKAY) {
1284 printf("%s: WARNING: response is %d!\n",
1285 __func__, txr->status);
1289 KASSERT(m != NULL, ("mbuf not found in chain"));
1290 KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
1291 ("mbuf already on the free list, but we're "
1292 "trying to free it again!"));
1295 if (__predict_false(gnttab_query_foreign_access(
1296 txq->grant_ref[id]) != 0)) {
1297 panic("%s: grant id %u still in use by the "
1298 "backend", __func__, id);
1300 gnttab_end_foreign_access_ref(txq->grant_ref[id]);
1301 gnttab_release_grant_reference(
1302 &txq->gref_head, txq->grant_ref[id]);
1303 txq->grant_ref[id] = GRANT_REF_INVALID;
1305 txq->mbufs[id] = NULL;
1306 add_id_to_freelist(txq->mbufs, id);
1309 /* Only mark the txq active if we've freed up at least one slot to try */
1310 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1312 txq->ring.rsp_cons = prod;
1315 * Set a new event, then check for race with update of
1316 * tx_cons. Note that it is essential to schedule a
1317 * callback, no matter how few buffers are pending. Even if
1318 * there is space in the transmit ring, higher layers may
1319 * be blocked because too much data is outstanding: in such
1320 * cases notification from Xen is likely to be the only kick
1323 txq->ring.sring->rsp_event =
1324 prod + ((txq->ring.sring->req_prod - prod) >> 1) + 1;
1327 } while (prod != txq->ring.sring->rsp_prod);
1330 ((txq->ring.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1339 struct netfront_txq *txq = xsc;
1340 struct netfront_info *np = txq->info;
1341 struct netfront_rxq *rxq = &np->rxq[txq->id];
1343 /* kick both tx and rx */
1349 xn_move_rx_slot(struct netfront_rxq *rxq, struct mbuf *m,
1352 int new = xn_rxidx(rxq->ring.req_prod_pvt);
1354 KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL"));
1355 rxq->mbufs[new] = m;
1356 rxq->grant_ref[new] = ref;
1357 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new;
1358 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref;
1359 rxq->ring.req_prod_pvt++;
1363 xn_get_extras(struct netfront_rxq *rxq,
1364 struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
1366 struct netif_extra_info *extra;
1374 if (__predict_false(*cons + 1 == rp)) {
1379 extra = (struct netif_extra_info *)
1380 RING_GET_RESPONSE(&rxq->ring, ++(*cons));
1382 if (__predict_false(!extra->type ||
1383 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1386 memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1389 m = xn_get_rx_mbuf(rxq, *cons);
1390 ref = xn_get_rx_ref(rxq, *cons);
1391 xn_move_rx_slot(rxq, m, ref);
1392 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1398 xn_get_responses(struct netfront_rxq *rxq,
1399 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
1402 struct netif_rx_response *rx = &rinfo->rx;
1403 struct netif_extra_info *extras = rinfo->extras;
1404 struct mbuf *m, *m0, *m_prev;
1405 grant_ref_t ref = xn_get_rx_ref(rxq, *cons);
1406 RING_IDX ref_cons = *cons;
1411 m0 = m = m_prev = xn_get_rx_mbuf(rxq, *cons);
1413 if (rx->flags & NETRXF_extra_info) {
1414 err = xn_get_extras(rxq, extras, rp, cons);
1418 m0->m_pkthdr.len = 0;
1424 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1425 rx->status, rx->offset, frags);
1427 if (__predict_false(rx->status < 0 ||
1428 rx->offset + rx->status > PAGE_SIZE)) {
1430 xn_move_rx_slot(rxq, m, ref);
1435 goto next_skip_queue;
1439 * This definitely indicates a bug, either in this driver or in
1440 * the backend driver. In future this should flag the bad
1441 * situation to the system controller to reboot the backed.
1443 if (ref == GRANT_REF_INVALID) {
1444 printf("%s: Bad rx response id %d.\n", __func__, rx->id);
1449 ret = gnttab_end_foreign_access_ref(ref);
1450 KASSERT(ret, ("Unable to end access to grant references"));
1452 gnttab_release_grant_reference(&rxq->gref_head, ref);
1458 m->m_len = rx->status;
1459 m->m_data += rx->offset;
1460 m0->m_pkthdr.len += rx->status;
1463 if (!(rx->flags & NETRXF_more_data))
1466 if (*cons + frags == rp) {
1467 if (net_ratelimit())
1468 WPRINTK("Need more frags\n");
1470 printf("%s: cons %u frags %u rp %u, not enough frags\n",
1471 __func__, *cons, frags, rp);
1475 * Note that m can be NULL, if rx->status < 0 or if
1476 * rx->offset + rx->status > PAGE_SIZE above.
1480 rx = RING_GET_RESPONSE(&rxq->ring, *cons + frags);
1481 m = xn_get_rx_mbuf(rxq, *cons + frags);
1484 * m_prev == NULL can happen if rx->status < 0 or if
1485 * rx->offset + * rx->status > PAGE_SIZE above.
1491 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1492 * rx->status > PAGE_SIZE above.
1497 ref = xn_get_rx_ref(rxq, *cons + frags);
1498 ref_cons = *cons + frags;
1508 * \brief Count the number of fragments in an mbuf chain.
1510 * Surprisingly, there isn't an M* macro for this.
1513 xn_count_frags(struct mbuf *m)
1517 for (nfrags = 0; m != NULL; m = m->m_next)
1524 * Given an mbuf chain, make sure we have enough room and then push
1525 * it onto the transmit ring.
1528 xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head)
1531 struct netfront_info *np = txq->info;
1532 struct ifnet *ifp = np->xn_ifp;
1537 * Defragment the mbuf if necessary.
1539 nfrags = xn_count_frags(m_head);
1542 * Check to see whether this request is longer than netback
1543 * can handle, and try to defrag it.
1546 * It is a bit lame, but the netback driver in Linux can't
1547 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
1548 * the Linux network stack.
1550 if (nfrags > np->maxfrags) {
1551 m = m_defrag(m_head, M_NOWAIT);
1554 * Defrag failed, so free the mbuf and
1555 * therefore drop the packet.
1563 /* Determine how many fragments now exist */
1564 nfrags = xn_count_frags(m_head);
1567 * Check to see whether the defragmented packet has too many
1568 * segments for the Linux netback driver.
1571 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1572 * of mbufs longer than Linux can handle. Make sure we don't
1573 * pass a too-long chain over to the other side by dropping the
1574 * packet. It doesn't look like there is currently a way to
1575 * tell the TCP stack to generate a shorter chain of packets.
1577 if (nfrags > MAX_TX_REQ_FRAGS) {
1579 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1580 "won't be able to handle it, dropping\n",
1581 __func__, nfrags, MAX_TX_REQ_FRAGS);
1588 * This check should be redundant. We've already verified that we
1589 * have enough slots in the ring to handle a packet of maximum
1590 * size, and that our packet is less than the maximum size. Keep
1591 * it in here as an assert for now just to make certain that
1592 * chain_cnt is accurate.
1594 KASSERT((txq->mbufs_cnt + nfrags) <= NET_TX_RING_SIZE,
1595 ("%s: chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1596 "(%d)!", __func__, (int) txq->mbufs_cnt,
1597 (int) nfrags, (int) NET_TX_RING_SIZE));
1600 * Start packing the mbufs in this chain into
1601 * the fragment pointers. Stop when we run out
1602 * of fragments or hit the end of the mbuf chain.
1605 otherend_id = xenbus_get_otherend_id(np->xbdev);
1606 for (m = m_head; m; m = m->m_next) {
1607 netif_tx_request_t *tx;
1610 u_long mfn; /* XXX Wrong type? */
1612 tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt);
1613 id = get_id_from_freelist(txq->mbufs);
1615 panic("%s: was allocated the freelist head!\n",
1618 if (txq->mbufs_cnt > NET_TX_RING_SIZE)
1619 panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n",
1623 ref = gnttab_claim_grant_reference(&txq->gref_head);
1624 KASSERT((short)ref >= 0, ("Negative ref"));
1625 mfn = virt_to_mfn(mtod(m, vm_offset_t));
1626 gnttab_grant_foreign_access_ref(ref, otherend_id,
1627 mfn, GNTMAP_readonly);
1628 tx->gref = txq->grant_ref[id] = ref;
1629 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
1633 * The first fragment has the entire packet
1634 * size, subsequent fragments have just the
1635 * fragment size. The backend works out the
1636 * true size of the first fragment by
1637 * subtracting the sizes of the other
1640 tx->size = m->m_pkthdr.len;
1643 * The first fragment contains the checksum flags
1644 * and is optionally followed by extra data for
1648 * CSUM_TSO requires checksum offloading.
1649 * Some versions of FreeBSD fail to
1650 * set CSUM_TCP in the CSUM_TSO case,
1651 * so we have to test for CSUM_TSO
1654 if (m->m_pkthdr.csum_flags
1655 & (CSUM_DELAY_DATA | CSUM_TSO)) {
1656 tx->flags |= (NETTXF_csum_blank
1657 | NETTXF_data_validated);
1659 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1660 struct netif_extra_info *gso =
1661 (struct netif_extra_info *)
1662 RING_GET_REQUEST(&txq->ring,
1663 ++txq->ring.req_prod_pvt);
1665 tx->flags |= NETTXF_extra_info;
1667 gso->u.gso.size = m->m_pkthdr.tso_segsz;
1669 XEN_NETIF_GSO_TYPE_TCPV4;
1671 gso->u.gso.features = 0;
1673 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1677 tx->size = m->m_len;
1680 tx->flags |= NETTXF_more_data;
1682 txq->ring.req_prod_pvt++;
1684 BPF_MTAP(ifp, m_head);
1686 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1687 if_inc_counter(ifp, IFCOUNTER_OBYTES, m_head->m_pkthdr.len);
1688 if (m_head->m_flags & M_MCAST)
1689 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1696 /* equivalent of network_open() in Linux */
1698 xn_ifinit_locked(struct netfront_info *np)
1702 struct netfront_rxq *rxq;
1708 if (ifp->if_drv_flags & IFF_DRV_RUNNING || !netfront_carrier_ok(np))
1713 for (i = 0; i < np->num_queues; i++) {
1716 xn_alloc_rx_buffers(rxq);
1717 rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1;
1718 if (RING_HAS_UNCONSUMED_RESPONSES(&rxq->ring))
1723 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1724 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1725 if_link_state_change(ifp, LINK_STATE_UP);
1729 xn_ifinit(void *xsc)
1731 struct netfront_info *sc = xsc;
1734 xn_ifinit_locked(sc);
1739 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1741 struct netfront_info *sc = ifp->if_softc;
1742 struct ifreq *ifr = (struct ifreq *) data;
1745 struct ifaddr *ifa = (struct ifaddr *)data;
1747 int mask, error = 0, reinit;
1755 if (ifa->ifa_addr->sa_family == AF_INET) {
1756 ifp->if_flags |= IFF_UP;
1757 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1758 xn_ifinit_locked(sc);
1759 arp_ifinit(ifp, ifa);
1764 error = ether_ioctl(ifp, cmd, data);
1770 if (ifp->if_mtu == ifr->ifr_mtu)
1773 ifp->if_mtu = ifr->ifr_mtu;
1774 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1779 if (ifp->if_flags & IFF_UP) {
1781 * If only the state of the PROMISC flag changed,
1782 * then just use the 'set promisc mode' command
1783 * instead of reinitializing the entire NIC. Doing
1784 * a full re-init means reloading the firmware and
1785 * waiting for it to start up, which may take a
1788 xn_ifinit_locked(sc);
1790 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1794 sc->xn_if_flags = ifp->if_flags;
1798 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1801 if (mask & IFCAP_TXCSUM) {
1802 ifp->if_capenable ^= IFCAP_TXCSUM;
1803 ifp->if_hwassist ^= XN_CSUM_FEATURES;
1805 if (mask & IFCAP_TSO4) {
1806 ifp->if_capenable ^= IFCAP_TSO4;
1807 ifp->if_hwassist ^= CSUM_TSO;
1810 if (mask & (IFCAP_RXCSUM | IFCAP_LRO)) {
1811 /* These Rx features require us to renegotiate. */
1814 if (mask & IFCAP_RXCSUM)
1815 ifp->if_capenable ^= IFCAP_RXCSUM;
1816 if (mask & IFCAP_LRO)
1817 ifp->if_capenable ^= IFCAP_LRO;
1824 * We must reset the interface so the backend picks up the
1827 device_printf(sc->xbdev,
1828 "performing interface reset due to feature change\n");
1830 netfront_carrier_off(sc);
1831 sc->xn_reset = true;
1833 * NB: the pending packet queue is not flushed, since
1834 * the interface should still support the old options.
1838 * Delete the xenstore nodes that export features.
1840 * NB: There's a xenbus state called
1841 * "XenbusStateReconfiguring", which is what we should set
1842 * here. Sadly none of the backends know how to handle it,
1843 * and simply disconnect from the frontend, so we will just
1844 * switch back to XenbusStateInitialising in order to force
1847 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-gso-tcpv4");
1848 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-no-csum-offload");
1849 xenbus_set_state(dev, XenbusStateClosing);
1852 * Wait for the frontend to reconnect before returning
1853 * from the ioctl. 30s should be more than enough for any
1854 * sane backend to reconnect.
1856 error = tsleep(sc, 0, "xn_rst", 30*hz);
1863 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1866 error = ether_ioctl(ifp, cmd, data);
1873 xn_stop(struct netfront_info *sc)
1881 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1882 if_link_state_change(ifp, LINK_STATE_DOWN);
1886 xn_rebuild_rx_bufs(struct netfront_rxq *rxq)
1890 netif_rx_request_t *req;
1892 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1896 if (rxq->mbufs[i] == NULL)
1899 m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i);
1900 ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i);
1902 req = RING_GET_REQUEST(&rxq->ring, requeue_idx);
1903 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1905 gnttab_grant_foreign_access_ref(ref,
1906 xenbus_get_otherend_id(rxq->info->xbdev),
1910 req->id = requeue_idx;
1915 rxq->ring.req_prod_pvt = requeue_idx;
1918 /* START of Xenolinux helper functions adapted to FreeBSD */
1920 xn_connect(struct netfront_info *np)
1923 u_int feature_rx_copy;
1924 struct netfront_rxq *rxq;
1925 struct netfront_txq *txq;
1927 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1928 "feature-rx-copy", NULL, "%u", &feature_rx_copy);
1930 feature_rx_copy = 0;
1932 /* We only support rx copy. */
1933 if (!feature_rx_copy)
1934 return (EPROTONOSUPPORT);
1936 /* Recovery procedure: */
1937 error = talk_to_backend(np->xbdev, np);
1941 /* Step 1: Reinitialise variables. */
1942 xn_query_features(np);
1943 xn_configure_features(np);
1945 /* Step 2: Release TX buffer */
1946 for (i = 0; i < np->num_queues; i++) {
1948 xn_release_tx_bufs(txq);
1951 /* Step 3: Rebuild the RX buffer freelist and the RX ring itself. */
1952 for (i = 0; i < np->num_queues; i++) {
1954 xn_rebuild_rx_bufs(rxq);
1957 /* Step 4: All public and private state should now be sane. Get
1958 * ready to start sending and receiving packets and give the driver
1959 * domain a kick because we've probably just requeued some
1962 netfront_carrier_on(np);
1969 xn_kick_rings(struct netfront_info *np)
1971 struct netfront_rxq *rxq;
1972 struct netfront_txq *txq;
1975 for (i = 0; i < np->num_queues; i++) {
1978 xen_intr_signal(txq->xen_intr_handle);
1983 xn_alloc_rx_buffers(rxq);
1989 xn_query_features(struct netfront_info *np)
1993 device_printf(np->xbdev, "backend features:");
1995 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1996 "feature-sg", NULL, "%d", &val) != 0)
2001 np->maxfrags = MAX_TX_REQ_FRAGS;
2002 printf(" feature-sg");
2005 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2006 "feature-gso-tcpv4", NULL, "%d", &val) != 0)
2009 np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO);
2011 np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO;
2012 printf(" feature-gso-tcp4");
2016 * HW CSUM offload is assumed to be available unless
2017 * feature-no-csum-offload is set in xenstore.
2019 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2020 "feature-no-csum-offload", NULL, "%d", &val) != 0)
2023 np->xn_ifp->if_capabilities |= IFCAP_HWCSUM;
2025 np->xn_ifp->if_capabilities &= ~(IFCAP_HWCSUM);
2026 printf(" feature-no-csum-offload");
2033 xn_configure_features(struct netfront_info *np)
2035 int err, cap_enabled;
2036 #if (defined(INET) || defined(INET6))
2044 if ((ifp->if_capenable & ifp->if_capabilities) == ifp->if_capenable) {
2045 /* Current options are available, no need to do anything. */
2049 /* Try to preserve as many options as possible. */
2050 cap_enabled = ifp->if_capenable;
2051 ifp->if_capenable = ifp->if_hwassist = 0;
2053 #if (defined(INET) || defined(INET6))
2054 if ((cap_enabled & IFCAP_LRO) != 0)
2055 for (i = 0; i < np->num_queues; i++)
2056 tcp_lro_free(&np->rxq[i].lro);
2057 if (xn_enable_lro &&
2058 (ifp->if_capabilities & cap_enabled & IFCAP_LRO) != 0) {
2059 ifp->if_capenable |= IFCAP_LRO;
2060 for (i = 0; i < np->num_queues; i++) {
2061 err = tcp_lro_init(&np->rxq[i].lro);
2063 device_printf(np->xbdev,
2064 "LRO initialization failed\n");
2065 ifp->if_capenable &= ~IFCAP_LRO;
2068 np->rxq[i].lro.ifp = ifp;
2071 if ((ifp->if_capabilities & cap_enabled & IFCAP_TSO4) != 0) {
2072 ifp->if_capenable |= IFCAP_TSO4;
2073 ifp->if_hwassist |= CSUM_TSO;
2076 if ((ifp->if_capabilities & cap_enabled & IFCAP_TXCSUM) != 0) {
2077 ifp->if_capenable |= IFCAP_TXCSUM;
2078 ifp->if_hwassist |= XN_CSUM_FEATURES;
2080 if ((ifp->if_capabilities & cap_enabled & IFCAP_RXCSUM) != 0)
2081 ifp->if_capenable |= IFCAP_RXCSUM;
2087 xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m)
2089 struct netfront_info *np;
2091 struct buf_ring *br;
2099 XN_TX_LOCK_ASSERT(txq);
2101 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2102 !netfront_carrier_ok(np)) {
2104 error = drbr_enqueue(ifp, br, m);
2109 error = drbr_enqueue(ifp, br, m);
2114 while ((m = drbr_peek(ifp, br)) != NULL) {
2115 if (!xn_tx_slot_available(txq)) {
2116 drbr_putback(ifp, br, m);
2120 error = xn_assemble_tx_request(txq, m);
2121 /* xn_assemble_tx_request always consumes the mbuf*/
2123 drbr_advance(ifp, br);
2127 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&txq->ring, notify);
2129 xen_intr_signal(txq->xen_intr_handle);
2131 drbr_advance(ifp, br);
2134 if (RING_FULL(&txq->ring))
2141 xn_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
2143 struct netfront_info *np;
2144 struct netfront_txq *txq;
2145 int i, npairs, error;
2148 npairs = np->num_queues;
2150 if (!netfront_carrier_ok(np))
2153 KASSERT(npairs != 0, ("called with 0 available queues"));
2155 /* check if flowid is set */
2156 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2157 i = m->m_pkthdr.flowid % npairs;
2159 i = curcpu % npairs;
2163 if (XN_TX_TRYLOCK(txq) != 0) {
2164 error = xn_txq_mq_start_locked(txq, m);
2167 error = drbr_enqueue(ifp, txq->br, m);
2168 taskqueue_enqueue(txq->tq, &txq->defrtask);
2175 xn_qflush(struct ifnet *ifp)
2177 struct netfront_info *np;
2178 struct netfront_txq *txq;
2184 for (i = 0; i < np->num_queues; i++) {
2188 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
2197 * Create a network device.
2198 * @param dev Newbus device representing this virtual NIC.
2201 create_netdev(device_t dev)
2203 struct netfront_info *np;
2207 np = device_get_softc(dev);
2211 mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF);
2213 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
2214 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
2215 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
2217 err = xen_net_read_mac(dev, np->mac);
2221 /* Set up ifnet structure */
2222 ifp = np->xn_ifp = if_alloc(IFT_ETHER);
2224 if_initname(ifp, "xn", device_get_unit(dev));
2225 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2226 ifp->if_ioctl = xn_ioctl;
2228 ifp->if_transmit = xn_txq_mq_start;
2229 ifp->if_qflush = xn_qflush;
2231 ifp->if_init = xn_ifinit;
2233 ifp->if_hwassist = XN_CSUM_FEATURES;
2234 /* Enable all supported features at device creation. */
2235 ifp->if_capenable = ifp->if_capabilities =
2236 IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_LRO;
2237 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2238 ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS;
2239 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
2241 ether_ifattach(ifp, np->mac);
2242 netfront_carrier_off(np);
2247 KASSERT(err != 0, ("Error path with no error code specified"));
2252 netfront_detach(device_t dev)
2254 struct netfront_info *info = device_get_softc(dev);
2256 DPRINTK("%s\n", xenbus_get_node(dev));
2264 netif_free(struct netfront_info *np)
2270 netif_disconnect_backend(np);
2271 ether_ifdetach(np->xn_ifp);
2272 free(np->rxq, M_DEVBUF);
2273 free(np->txq, M_DEVBUF);
2274 if_free(np->xn_ifp);
2276 ifmedia_removeall(&np->sc_media);
2280 netif_disconnect_backend(struct netfront_info *np)
2284 for (i = 0; i < np->num_queues; i++) {
2285 XN_RX_LOCK(&np->rxq[i]);
2286 XN_TX_LOCK(&np->txq[i]);
2288 netfront_carrier_off(np);
2289 for (i = 0; i < np->num_queues; i++) {
2290 XN_RX_UNLOCK(&np->rxq[i]);
2291 XN_TX_UNLOCK(&np->txq[i]);
2294 for (i = 0; i < np->num_queues; i++) {
2295 disconnect_rxq(&np->rxq[i]);
2296 disconnect_txq(&np->txq[i]);
2301 xn_ifmedia_upd(struct ifnet *ifp)
2308 xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2311 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2312 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2315 /* ** Driver registration ** */
2316 static device_method_t netfront_methods[] = {
2317 /* Device interface */
2318 DEVMETHOD(device_probe, netfront_probe),
2319 DEVMETHOD(device_attach, netfront_attach),
2320 DEVMETHOD(device_detach, netfront_detach),
2321 DEVMETHOD(device_shutdown, bus_generic_shutdown),
2322 DEVMETHOD(device_suspend, netfront_suspend),
2323 DEVMETHOD(device_resume, netfront_resume),
2325 /* Xenbus interface */
2326 DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
2331 static driver_t netfront_driver = {
2334 sizeof(struct netfront_info),
2336 devclass_t netfront_devclass;
2338 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL,