2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2004-2006 Kip Macy
5 * Copyright (c) 2015 Wei Liu <wei.liu2@citrix.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include "opt_inet6.h"
36 #include <sys/param.h>
37 #include <sys/sockio.h>
38 #include <sys/limits.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/kernel.h>
43 #include <sys/socket.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
48 #include <net/if_var.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_media.h>
53 #include <net/if_types.h>
55 #include <netinet/in.h>
56 #include <netinet/ip.h>
57 #include <netinet/if_ether.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_lro.h>
66 #include <xen/xen-os.h>
67 #include <xen/hypervisor.h>
68 #include <xen/xen_intr.h>
69 #include <xen/gnttab.h>
70 #include <xen/interface/memory.h>
71 #include <xen/interface/io/netif.h>
72 #include <xen/xenbus/xenbusvar.h>
74 #include "xenbus_if.h"
76 /* Features supported by all backends. TSO and LRO can be negotiated */
77 #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
79 #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE)
80 #define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE)
82 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
85 * Should the driver do LRO on the RX end
86 * this can be toggled on the fly, but the
87 * interface must be reset (down/up) for it
90 static int xn_enable_lro = 1;
91 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
94 * Number of pairs of queues.
96 static unsigned long xn_num_queues = 4;
97 TUNABLE_ULONG("hw.xn.num_queues", &xn_num_queues);
100 * \brief The maximum allowed data fragments in a single transmit
103 * This limit is imposed by the backend driver. We assume here that
104 * we are dealing with a Linux driver domain and have set our limit
105 * to mirror the Linux MAX_SKB_FRAGS constant.
107 #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
109 #define RX_COPY_THRESHOLD 256
111 #define net_ratelimit() 0
115 struct netfront_info;
116 struct netfront_rx_info;
118 static void xn_txeof(struct netfront_txq *);
119 static void xn_rxeof(struct netfront_rxq *);
120 static void xn_alloc_rx_buffers(struct netfront_rxq *);
121 static void xn_alloc_rx_buffers_callout(void *arg);
123 static void xn_release_rx_bufs(struct netfront_rxq *);
124 static void xn_release_tx_bufs(struct netfront_txq *);
126 static void xn_rxq_intr(struct netfront_rxq *);
127 static void xn_txq_intr(struct netfront_txq *);
128 static void xn_intr(void *);
129 static inline int xn_count_frags(struct mbuf *m);
130 static int xn_assemble_tx_request(struct netfront_txq *, struct mbuf *);
131 static int xn_ioctl(struct ifnet *, u_long, caddr_t);
132 static void xn_ifinit_locked(struct netfront_info *);
133 static void xn_ifinit(void *);
134 static void xn_stop(struct netfront_info *);
135 static void xn_query_features(struct netfront_info *np);
136 static int xn_configure_features(struct netfront_info *np);
137 static void netif_free(struct netfront_info *info);
138 static int netfront_detach(device_t dev);
140 static int xn_txq_mq_start_locked(struct netfront_txq *, struct mbuf *);
141 static int xn_txq_mq_start(struct ifnet *, struct mbuf *);
143 static int talk_to_backend(device_t dev, struct netfront_info *info);
144 static int create_netdev(device_t dev);
145 static void netif_disconnect_backend(struct netfront_info *info);
146 static int setup_device(device_t dev, struct netfront_info *info,
148 static int xn_ifmedia_upd(struct ifnet *ifp);
149 static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
151 static int xn_connect(struct netfront_info *);
152 static void xn_kick_rings(struct netfront_info *);
154 static int xn_get_responses(struct netfront_rxq *,
155 struct netfront_rx_info *, RING_IDX, RING_IDX *,
158 #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT)
160 #define INVALID_P2M_ENTRY (~0UL)
161 #define XN_QUEUE_NAME_LEN 8 /* xn{t,r}x_%u, allow for two digits */
162 struct netfront_rxq {
163 struct netfront_info *info;
165 char name[XN_QUEUE_NAME_LEN];
169 netif_rx_front_ring_t ring;
170 xen_intr_handle_t xen_intr_handle;
172 grant_ref_t gref_head;
173 grant_ref_t grant_ref[NET_RX_RING_SIZE + 1];
175 struct mbuf *mbufs[NET_RX_RING_SIZE + 1];
179 struct callout rx_refill;
182 struct netfront_txq {
183 struct netfront_info *info;
185 char name[XN_QUEUE_NAME_LEN];
189 netif_tx_front_ring_t ring;
190 xen_intr_handle_t xen_intr_handle;
192 grant_ref_t gref_head;
193 grant_ref_t grant_ref[NET_TX_RING_SIZE + 1];
195 struct mbuf *mbufs[NET_TX_RING_SIZE + 1];
199 struct taskqueue *tq;
200 struct task defrtask;
205 struct netfront_info {
206 struct ifnet *xn_ifp;
211 struct netfront_rxq *rxq;
212 struct netfront_txq *txq;
218 uint8_t mac[ETHER_ADDR_LEN];
222 struct ifmedia sc_media;
227 struct netfront_rx_info {
228 struct netif_rx_response rx;
229 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
232 #define XN_RX_LOCK(_q) mtx_lock(&(_q)->lock)
233 #define XN_RX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
235 #define XN_TX_LOCK(_q) mtx_lock(&(_q)->lock)
236 #define XN_TX_TRYLOCK(_q) mtx_trylock(&(_q)->lock)
237 #define XN_TX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
239 #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock);
240 #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock);
242 #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED);
243 #define XN_RX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
244 #define XN_TX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
246 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
247 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
248 #define netfront_carrier_ok(netif) ((netif)->carrier)
250 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
253 add_id_to_freelist(struct mbuf **list, uintptr_t id)
257 ("%s: the head item (0) must always be free.", __func__));
259 list[0] = (struct mbuf *)id;
262 static inline unsigned short
263 get_id_from_freelist(struct mbuf **list)
267 id = (uintptr_t)list[0];
269 ("%s: the head item (0) must always remain free.", __func__));
275 xn_rxidx(RING_IDX idx)
278 return idx & (NET_RX_RING_SIZE - 1);
281 static inline struct mbuf *
282 xn_get_rx_mbuf(struct netfront_rxq *rxq, RING_IDX ri)
289 rxq->mbufs[i] = NULL;
293 static inline grant_ref_t
294 xn_get_rx_ref(struct netfront_rxq *rxq, RING_IDX ri)
296 int i = xn_rxidx(ri);
297 grant_ref_t ref = rxq->grant_ref[i];
299 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
300 rxq->grant_ref[i] = GRANT_REF_INVALID;
304 #define IPRINTK(fmt, args...) \
305 printf("[XEN] " fmt, ##args)
307 #define WPRINTK(fmt, args...) \
308 printf("[XEN] " fmt, ##args)
310 #define WPRINTK(fmt, args...)
313 #define DPRINTK(fmt, args...) \
314 printf("[XEN] %s: " fmt, __func__, ##args)
316 #define DPRINTK(fmt, args...)
320 * Read the 'mac' node at the given device's node in the store, and parse that
321 * as colon-separated octets, placing result the given mac array. mac must be
322 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
323 * Return 0 on success, or errno on error.
326 xen_net_read_mac(device_t dev, uint8_t mac[])
329 char *s, *e, *macstr;
332 path = xenbus_get_node(dev);
333 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
334 if (error == ENOENT) {
336 * Deal with missing mac XenStore nodes on devices with
337 * HVM emulation (the 'ioemu' configuration attribute)
340 * The HVM emulator may execute in a stub device model
341 * domain which lacks the permission, only given to Dom0,
342 * to update the guest's XenStore tree. For this reason,
343 * the HVM emulator doesn't even attempt to write the
344 * front-side mac node, even when operating in Dom0.
345 * However, there should always be a mac listed in the
346 * backend tree. Fallback to this version if our query
347 * of the front side XenStore location doesn't find
350 path = xenbus_get_otherend_path(dev);
351 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
354 xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
359 for (i = 0; i < ETHER_ADDR_LEN; i++) {
360 mac[i] = strtoul(s, &e, 16);
361 if (s == e || (e[0] != ':' && e[0] != 0)) {
362 free(macstr, M_XENBUS);
367 free(macstr, M_XENBUS);
372 * Entry point to this code when a new device is created. Allocate the basic
373 * structures and the ring buffers for communication with the backend, and
374 * inform the backend of the appropriate details for those. Switch to
378 netfront_probe(device_t dev)
381 if (xen_hvm_domain() && xen_disable_pv_nics != 0)
384 if (!strcmp(xenbus_get_type(dev), "vif")) {
385 device_set_desc(dev, "Virtual Network Interface");
393 netfront_attach(device_t dev)
397 err = create_netdev(dev);
399 xenbus_dev_fatal(dev, err, "creating netdev");
403 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
404 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
405 OID_AUTO, "enable_lro", CTLFLAG_RW,
406 &xn_enable_lro, 0, "Large Receive Offload");
408 SYSCTL_ADD_ULONG(device_get_sysctl_ctx(dev),
409 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
410 OID_AUTO, "num_queues", CTLFLAG_RD,
411 &xn_num_queues, "Number of pairs of queues");
417 netfront_suspend(device_t dev)
419 struct netfront_info *np = device_get_softc(dev);
422 for (i = 0; i < np->num_queues; i++) {
423 XN_RX_LOCK(&np->rxq[i]);
424 XN_TX_LOCK(&np->txq[i]);
426 netfront_carrier_off(np);
427 for (i = 0; i < np->num_queues; i++) {
428 XN_RX_UNLOCK(&np->rxq[i]);
429 XN_TX_UNLOCK(&np->txq[i]);
435 * We are reconnecting to the backend, due to a suspend/resume, or a backend
436 * driver restart. We tear down our netif structure and recreate it, but
437 * leave the device-layer structures intact so that this is transparent to the
438 * rest of the kernel.
441 netfront_resume(device_t dev)
443 struct netfront_info *info = device_get_softc(dev);
446 if (xen_suspend_cancelled) {
447 for (i = 0; i < info->num_queues; i++) {
448 XN_RX_LOCK(&info->rxq[i]);
449 XN_TX_LOCK(&info->txq[i]);
451 netfront_carrier_on(info);
452 for (i = 0; i < info->num_queues; i++) {
453 XN_RX_UNLOCK(&info->rxq[i]);
454 XN_TX_UNLOCK(&info->txq[i]);
459 netif_disconnect_backend(info);
464 write_queue_xenstore_keys(device_t dev,
465 struct netfront_rxq *rxq,
466 struct netfront_txq *txq,
467 struct xs_transaction *xst, bool hierarchy)
471 const char *node = xenbus_get_node(dev);
475 KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids"));
476 /* Split event channel support is not yet there. */
477 KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle,
478 ("Split event channels are not supported"));
481 path_size = strlen(node) + 10;
482 path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO);
483 snprintf(path, path_size, "%s/queue-%u", node, rxq->id);
485 path_size = strlen(node) + 1;
486 path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO);
487 snprintf(path, path_size, "%s", node);
490 err = xs_printf(*xst, path, "tx-ring-ref","%u", txq->ring_ref);
492 message = "writing tx ring-ref";
495 err = xs_printf(*xst, path, "rx-ring-ref","%u", rxq->ring_ref);
497 message = "writing rx ring-ref";
500 err = xs_printf(*xst, path, "event-channel", "%u",
501 xen_intr_port(rxq->xen_intr_handle));
503 message = "writing event-channel";
507 free(path, M_DEVBUF);
512 free(path, M_DEVBUF);
513 xenbus_dev_fatal(dev, err, "%s", message);
518 /* Common code used when first setting up, and when resuming. */
520 talk_to_backend(device_t dev, struct netfront_info *info)
523 struct xs_transaction xst;
524 const char *node = xenbus_get_node(dev);
526 unsigned long num_queues, max_queues = 0;
529 err = xen_net_read_mac(dev, info->mac);
531 xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
535 err = xs_scanf(XST_NIL, xenbus_get_otherend_path(info->xbdev),
536 "multi-queue-max-queues", NULL, "%lu", &max_queues);
539 num_queues = xn_num_queues;
540 if (num_queues > max_queues)
541 num_queues = max_queues;
543 err = setup_device(dev, info, num_queues);
548 err = xs_transaction_start(&xst);
550 xenbus_dev_fatal(dev, err, "starting transaction");
554 if (info->num_queues == 1) {
555 err = write_queue_xenstore_keys(dev, &info->rxq[0],
556 &info->txq[0], &xst, false);
558 goto abort_transaction_no_def_error;
560 err = xs_printf(xst, node, "multi-queue-num-queues",
561 "%u", info->num_queues);
563 message = "writing multi-queue-num-queues";
564 goto abort_transaction;
567 for (i = 0; i < info->num_queues; i++) {
568 err = write_queue_xenstore_keys(dev, &info->rxq[i],
569 &info->txq[i], &xst, true);
571 goto abort_transaction_no_def_error;
575 err = xs_printf(xst, node, "request-rx-copy", "%u", 1);
577 message = "writing request-rx-copy";
578 goto abort_transaction;
580 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
582 message = "writing feature-rx-notify";
583 goto abort_transaction;
585 err = xs_printf(xst, node, "feature-sg", "%d", 1);
587 message = "writing feature-sg";
588 goto abort_transaction;
590 if ((info->xn_ifp->if_capenable & IFCAP_LRO) != 0) {
591 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
593 message = "writing feature-gso-tcpv4";
594 goto abort_transaction;
597 if ((info->xn_ifp->if_capenable & IFCAP_RXCSUM) == 0) {
598 err = xs_printf(xst, node, "feature-no-csum-offload", "%d", 1);
600 message = "writing feature-no-csum-offload";
601 goto abort_transaction;
605 err = xs_transaction_end(xst, 0);
609 xenbus_dev_fatal(dev, err, "completing transaction");
616 xenbus_dev_fatal(dev, err, "%s", message);
617 abort_transaction_no_def_error:
618 xs_transaction_end(xst, 1);
626 xn_rxq_intr(struct netfront_rxq *rxq)
635 xn_txq_start(struct netfront_txq *txq)
637 struct netfront_info *np = txq->info;
638 struct ifnet *ifp = np->xn_ifp;
640 XN_TX_LOCK_ASSERT(txq);
641 if (!drbr_empty(ifp, txq->br))
642 xn_txq_mq_start_locked(txq, NULL);
646 xn_txq_intr(struct netfront_txq *txq)
650 if (RING_HAS_UNCONSUMED_RESPONSES(&txq->ring))
657 xn_txq_tq_deferred(void *xtxq, int pending)
659 struct netfront_txq *txq = xtxq;
667 disconnect_rxq(struct netfront_rxq *rxq)
670 xn_release_rx_bufs(rxq);
671 gnttab_free_grant_references(rxq->gref_head);
672 gnttab_end_foreign_access(rxq->ring_ref, NULL);
674 * No split event channel support at the moment, handle will
675 * be unbound in tx. So no need to call xen_intr_unbind here,
676 * but we do want to reset the handler to 0.
678 rxq->xen_intr_handle = 0;
682 destroy_rxq(struct netfront_rxq *rxq)
685 callout_drain(&rxq->rx_refill);
686 free(rxq->ring.sring, M_DEVBUF);
690 destroy_rxqs(struct netfront_info *np)
694 for (i = 0; i < np->num_queues; i++)
695 destroy_rxq(&np->rxq[i]);
697 free(np->rxq, M_DEVBUF);
702 setup_rxqs(device_t dev, struct netfront_info *info,
703 unsigned long num_queues)
707 netif_rx_sring_t *rxs;
708 struct netfront_rxq *rxq;
710 info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues,
711 M_DEVBUF, M_WAITOK|M_ZERO);
713 for (q = 0; q < num_queues; q++) {
718 rxq->ring_ref = GRANT_REF_INVALID;
719 rxq->ring.sring = NULL;
720 snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q);
721 mtx_init(&rxq->lock, rxq->name, "netfront receive lock",
724 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
725 rxq->mbufs[i] = NULL;
726 rxq->grant_ref[i] = GRANT_REF_INVALID;
729 /* Start resources allocation */
731 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
732 &rxq->gref_head) != 0) {
733 device_printf(dev, "allocating rx gref");
738 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF,
740 SHARED_RING_INIT(rxs);
741 FRONT_RING_INIT(&rxq->ring, rxs, PAGE_SIZE);
743 error = xenbus_grant_ring(dev, virt_to_mfn(rxs),
746 device_printf(dev, "granting rx ring page");
747 goto fail_grant_ring;
750 callout_init(&rxq->rx_refill, 1);
756 gnttab_free_grant_references(rxq->gref_head);
757 free(rxq->ring.sring, M_DEVBUF);
759 for (; q >= 0; q--) {
760 disconnect_rxq(&info->rxq[q]);
761 destroy_rxq(&info->rxq[q]);
764 free(info->rxq, M_DEVBUF);
769 disconnect_txq(struct netfront_txq *txq)
772 xn_release_tx_bufs(txq);
773 gnttab_free_grant_references(txq->gref_head);
774 gnttab_end_foreign_access(txq->ring_ref, NULL);
775 xen_intr_unbind(&txq->xen_intr_handle);
779 destroy_txq(struct netfront_txq *txq)
782 free(txq->ring.sring, M_DEVBUF);
783 buf_ring_free(txq->br, M_DEVBUF);
784 taskqueue_drain_all(txq->tq);
785 taskqueue_free(txq->tq);
789 destroy_txqs(struct netfront_info *np)
793 for (i = 0; i < np->num_queues; i++)
794 destroy_txq(&np->txq[i]);
796 free(np->txq, M_DEVBUF);
801 setup_txqs(device_t dev, struct netfront_info *info,
802 unsigned long num_queues)
806 netif_tx_sring_t *txs;
807 struct netfront_txq *txq;
809 info->txq = malloc(sizeof(struct netfront_txq) * num_queues,
810 M_DEVBUF, M_WAITOK|M_ZERO);
812 for (q = 0; q < num_queues; q++) {
818 txq->ring_ref = GRANT_REF_INVALID;
819 txq->ring.sring = NULL;
821 snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q);
823 mtx_init(&txq->lock, txq->name, "netfront transmit lock",
826 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
827 txq->mbufs[i] = (void *) ((u_long) i+1);
828 txq->grant_ref[i] = GRANT_REF_INVALID;
830 txq->mbufs[NET_TX_RING_SIZE] = (void *)0;
832 /* Start resources allocation. */
834 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
835 &txq->gref_head) != 0) {
836 device_printf(dev, "failed to allocate tx grant refs\n");
841 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF,
843 SHARED_RING_INIT(txs);
844 FRONT_RING_INIT(&txq->ring, txs, PAGE_SIZE);
846 error = xenbus_grant_ring(dev, virt_to_mfn(txs),
849 device_printf(dev, "failed to grant tx ring\n");
850 goto fail_grant_ring;
853 txq->br = buf_ring_alloc(NET_TX_RING_SIZE, M_DEVBUF,
854 M_WAITOK, &txq->lock);
855 TASK_INIT(&txq->defrtask, 0, xn_txq_tq_deferred, txq);
857 txq->tq = taskqueue_create(txq->name, M_WAITOK,
858 taskqueue_thread_enqueue, &txq->tq);
860 error = taskqueue_start_threads(&txq->tq, 1, PI_NET,
861 "%s txq %d", device_get_nameunit(dev), txq->id);
863 device_printf(dev, "failed to start tx taskq %d\n",
865 goto fail_start_thread;
868 error = xen_intr_alloc_and_bind_local_port(dev,
869 xenbus_get_otherend_id(dev), /* filter */ NULL, xn_intr,
870 &info->txq[q], INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY,
871 &txq->xen_intr_handle);
874 device_printf(dev, "xen_intr_alloc_and_bind_local_port failed\n");
882 taskqueue_drain_all(txq->tq);
884 buf_ring_free(txq->br, M_DEVBUF);
885 taskqueue_free(txq->tq);
886 gnttab_end_foreign_access(txq->ring_ref, NULL);
888 gnttab_free_grant_references(txq->gref_head);
889 free(txq->ring.sring, M_DEVBUF);
891 for (; q >= 0; q--) {
892 disconnect_txq(&info->txq[q]);
893 destroy_txq(&info->txq[q]);
896 free(info->txq, M_DEVBUF);
901 setup_device(device_t dev, struct netfront_info *info,
902 unsigned long num_queues)
913 info->num_queues = 0;
915 error = setup_rxqs(dev, info, num_queues);
918 error = setup_txqs(dev, info, num_queues);
922 info->num_queues = num_queues;
924 /* No split event channel at the moment. */
925 for (q = 0; q < num_queues; q++)
926 info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle;
931 KASSERT(error != 0, ("Error path taken without providing an error code"));
937 * If this interface has an ipv4 address, send an arp for it. This
938 * helps to get the network going again after migrating hosts.
941 netfront_send_fake_arp(device_t dev, struct netfront_info *info)
947 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
948 if (ifa->ifa_addr->sa_family == AF_INET) {
949 arp_ifinit(ifp, ifa);
956 * Callback received when the backend's state changes.
959 netfront_backend_changed(device_t dev, XenbusState newstate)
961 struct netfront_info *sc = device_get_softc(dev);
963 DPRINTK("newstate=%d\n", newstate);
966 case XenbusStateInitialising:
967 case XenbusStateInitialised:
968 case XenbusStateUnknown:
969 case XenbusStateReconfigured:
970 case XenbusStateReconfiguring:
972 case XenbusStateInitWait:
973 if (xenbus_get_state(dev) != XenbusStateInitialising)
975 if (xn_connect(sc) != 0)
977 /* Switch to connected state before kicking the rings. */
978 xenbus_set_state(sc->xbdev, XenbusStateConnected);
981 case XenbusStateClosing:
982 xenbus_set_state(dev, XenbusStateClosed);
984 case XenbusStateClosed:
986 netif_disconnect_backend(sc);
987 xenbus_set_state(dev, XenbusStateInitialising);
988 sc->xn_reset = false;
991 case XenbusStateConnected:
993 netfront_send_fake_arp(dev, sc);
1000 * \brief Verify that there is sufficient space in the Tx ring
1001 * buffer for a maximally sized request to be enqueued.
1003 * A transmit request requires a transmit descriptor for each packet
1004 * fragment, plus up to 2 entries for "options" (e.g. TSO).
1007 xn_tx_slot_available(struct netfront_txq *txq)
1010 return (RING_FREE_REQUESTS(&txq->ring) > (MAX_TX_REQ_FRAGS + 2));
1014 xn_release_tx_bufs(struct netfront_txq *txq)
1018 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
1024 * We assume that no kernel addresses are
1025 * less than NET_TX_RING_SIZE. Any entry
1026 * in the table that is below this number
1027 * must be an index from free-list tracking.
1029 if (((uintptr_t)m) <= NET_TX_RING_SIZE)
1031 gnttab_end_foreign_access_ref(txq->grant_ref[i]);
1032 gnttab_release_grant_reference(&txq->gref_head,
1034 txq->grant_ref[i] = GRANT_REF_INVALID;
1035 add_id_to_freelist(txq->mbufs, i);
1037 if (txq->mbufs_cnt < 0) {
1038 panic("%s: tx_chain_cnt must be >= 0", __func__);
1044 static struct mbuf *
1045 xn_alloc_one_rx_buffer(struct netfront_rxq *rxq)
1049 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1052 m->m_len = m->m_pkthdr.len = MJUMPAGESIZE;
1058 xn_alloc_rx_buffers(struct netfront_rxq *rxq)
1063 XN_RX_LOCK_ASSERT(rxq);
1065 if (__predict_false(rxq->info->carrier == 0))
1068 for (req_prod = rxq->ring.req_prod_pvt;
1069 req_prod - rxq->ring.rsp_cons < NET_RX_RING_SIZE;
1074 struct netif_rx_request *req;
1077 m = xn_alloc_one_rx_buffer(rxq);
1081 id = xn_rxidx(req_prod);
1083 KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain"));
1086 ref = gnttab_claim_grant_reference(&rxq->gref_head);
1087 KASSERT(ref != GNTTAB_LIST_END,
1088 ("reserved grant references exhuasted"));
1089 rxq->grant_ref[id] = ref;
1091 pfn = atop(vtophys(mtod(m, vm_offset_t)));
1092 req = RING_GET_REQUEST(&rxq->ring, req_prod);
1094 gnttab_grant_foreign_access_ref(ref,
1095 xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0);
1100 rxq->ring.req_prod_pvt = req_prod;
1102 /* Not enough requests? Try again later. */
1103 if (req_prod - rxq->ring.rsp_cons < NET_RX_SLOTS_MIN) {
1104 callout_reset_curcpu(&rxq->rx_refill, hz/10,
1105 xn_alloc_rx_buffers_callout, rxq);
1109 wmb(); /* barrier so backend seens requests */
1111 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify);
1113 xen_intr_signal(rxq->xen_intr_handle);
1116 static void xn_alloc_rx_buffers_callout(void *arg)
1118 struct netfront_rxq *rxq;
1120 rxq = (struct netfront_rxq *)arg;
1122 xn_alloc_rx_buffers(rxq);
1127 xn_release_rx_bufs(struct netfront_rxq *rxq)
1132 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1138 ref = rxq->grant_ref[i];
1139 if (ref == GRANT_REF_INVALID)
1142 gnttab_end_foreign_access_ref(ref);
1143 gnttab_release_grant_reference(&rxq->gref_head, ref);
1144 rxq->mbufs[i] = NULL;
1145 rxq->grant_ref[i] = GRANT_REF_INVALID;
1151 xn_rxeof(struct netfront_rxq *rxq)
1154 struct netfront_info *np = rxq->info;
1155 #if (defined(INET) || defined(INET6))
1156 struct lro_ctrl *lro = &rxq->lro;
1158 struct netfront_rx_info rinfo;
1159 struct netif_rx_response *rx = &rinfo.rx;
1160 struct netif_extra_info *extras = rinfo.extras;
1163 struct mbufq mbufq_rxq, mbufq_errq;
1164 int err, work_to_do;
1166 XN_RX_LOCK_ASSERT(rxq);
1168 if (!netfront_carrier_ok(np))
1171 /* XXX: there should be some sane limit. */
1172 mbufq_init(&mbufq_errq, INT_MAX);
1173 mbufq_init(&mbufq_rxq, INT_MAX);
1178 rp = rxq->ring.sring->rsp_prod;
1179 rmb(); /* Ensure we see queued responses up to 'rp'. */
1181 i = rxq->ring.rsp_cons;
1183 memcpy(rx, RING_GET_RESPONSE(&rxq->ring, i), sizeof(*rx));
1184 memset(extras, 0, sizeof(rinfo.extras));
1187 err = xn_get_responses(rxq, &rinfo, rp, &i, &m);
1189 if (__predict_false(err)) {
1191 (void )mbufq_enqueue(&mbufq_errq, m);
1192 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1196 m->m_pkthdr.rcvif = ifp;
1197 if (rx->flags & NETRXF_data_validated) {
1199 * According to mbuf(9) the correct way to tell
1200 * the stack that the checksum of an inbound
1201 * packet is correct, without it actually being
1202 * present (because the underlying interface
1203 * doesn't provide it), is to set the
1204 * CSUM_DATA_VALID and CSUM_PSEUDO_HDR flags,
1205 * and the csum_data field to 0xffff.
1207 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
1209 m->m_pkthdr.csum_data = 0xffff;
1211 if ((rx->flags & NETRXF_extra_info) != 0 &&
1212 (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type ==
1213 XEN_NETIF_EXTRA_TYPE_GSO)) {
1214 m->m_pkthdr.tso_segsz =
1215 extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].u.gso.size;
1216 m->m_pkthdr.csum_flags |= CSUM_TSO;
1219 (void )mbufq_enqueue(&mbufq_rxq, m);
1222 rxq->ring.rsp_cons = i;
1224 xn_alloc_rx_buffers(rxq);
1226 RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do);
1227 } while (work_to_do);
1229 mbufq_drain(&mbufq_errq);
1231 * Process all the mbufs after the remapping is complete.
1232 * Break the mbuf chain first though.
1234 while ((m = mbufq_dequeue(&mbufq_rxq)) != NULL) {
1235 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1236 #if (defined(INET) || defined(INET6))
1237 /* Use LRO if possible */
1238 if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
1239 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
1241 * If LRO fails, pass up to the stack
1244 (*ifp->if_input)(ifp, m);
1247 (*ifp->if_input)(ifp, m);
1251 #if (defined(INET) || defined(INET6))
1253 * Flush any outstanding LRO work
1255 tcp_lro_flush_all(lro);
1260 xn_txeof(struct netfront_txq *txq)
1265 netif_tx_response_t *txr;
1267 struct netfront_info *np = txq->info;
1269 XN_TX_LOCK_ASSERT(txq);
1271 if (!netfront_carrier_ok(np))
1277 prod = txq->ring.sring->rsp_prod;
1278 rmb(); /* Ensure we see responses up to 'rp'. */
1280 for (i = txq->ring.rsp_cons; i != prod; i++) {
1281 txr = RING_GET_RESPONSE(&txq->ring, i);
1282 if (txr->status == NETIF_RSP_NULL)
1285 if (txr->status != NETIF_RSP_OKAY) {
1286 printf("%s: WARNING: response is %d!\n",
1287 __func__, txr->status);
1291 KASSERT(m != NULL, ("mbuf not found in chain"));
1292 KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
1293 ("mbuf already on the free list, but we're "
1294 "trying to free it again!"));
1297 if (__predict_false(gnttab_query_foreign_access(
1298 txq->grant_ref[id]) != 0)) {
1299 panic("%s: grant id %u still in use by the "
1300 "backend", __func__, id);
1302 gnttab_end_foreign_access_ref(txq->grant_ref[id]);
1303 gnttab_release_grant_reference(
1304 &txq->gref_head, txq->grant_ref[id]);
1305 txq->grant_ref[id] = GRANT_REF_INVALID;
1307 txq->mbufs[id] = NULL;
1308 add_id_to_freelist(txq->mbufs, id);
1311 /* Only mark the txq active if we've freed up at least one slot to try */
1312 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1314 txq->ring.rsp_cons = prod;
1317 * Set a new event, then check for race with update of
1318 * tx_cons. Note that it is essential to schedule a
1319 * callback, no matter how few buffers are pending. Even if
1320 * there is space in the transmit ring, higher layers may
1321 * be blocked because too much data is outstanding: in such
1322 * cases notification from Xen is likely to be the only kick
1325 txq->ring.sring->rsp_event =
1326 prod + ((txq->ring.sring->req_prod - prod) >> 1) + 1;
1329 } while (prod != txq->ring.sring->rsp_prod);
1332 ((txq->ring.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1341 struct netfront_txq *txq = xsc;
1342 struct netfront_info *np = txq->info;
1343 struct netfront_rxq *rxq = &np->rxq[txq->id];
1345 /* kick both tx and rx */
1351 xn_move_rx_slot(struct netfront_rxq *rxq, struct mbuf *m,
1354 int new = xn_rxidx(rxq->ring.req_prod_pvt);
1356 KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL"));
1357 rxq->mbufs[new] = m;
1358 rxq->grant_ref[new] = ref;
1359 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new;
1360 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref;
1361 rxq->ring.req_prod_pvt++;
1365 xn_get_extras(struct netfront_rxq *rxq,
1366 struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
1368 struct netif_extra_info *extra;
1376 if (__predict_false(*cons + 1 == rp)) {
1381 extra = (struct netif_extra_info *)
1382 RING_GET_RESPONSE(&rxq->ring, ++(*cons));
1384 if (__predict_false(!extra->type ||
1385 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1388 memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1391 m = xn_get_rx_mbuf(rxq, *cons);
1392 ref = xn_get_rx_ref(rxq, *cons);
1393 xn_move_rx_slot(rxq, m, ref);
1394 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1400 xn_get_responses(struct netfront_rxq *rxq,
1401 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
1404 struct netif_rx_response *rx = &rinfo->rx;
1405 struct netif_extra_info *extras = rinfo->extras;
1406 struct mbuf *m, *m0, *m_prev;
1407 grant_ref_t ref = xn_get_rx_ref(rxq, *cons);
1408 RING_IDX ref_cons = *cons;
1413 m0 = m = m_prev = xn_get_rx_mbuf(rxq, *cons);
1415 if (rx->flags & NETRXF_extra_info) {
1416 err = xn_get_extras(rxq, extras, rp, cons);
1420 m0->m_pkthdr.len = 0;
1426 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1427 rx->status, rx->offset, frags);
1429 if (__predict_false(rx->status < 0 ||
1430 rx->offset + rx->status > PAGE_SIZE)) {
1432 xn_move_rx_slot(rxq, m, ref);
1437 goto next_skip_queue;
1441 * This definitely indicates a bug, either in this driver or in
1442 * the backend driver. In future this should flag the bad
1443 * situation to the system controller to reboot the backed.
1445 if (ref == GRANT_REF_INVALID) {
1446 printf("%s: Bad rx response id %d.\n", __func__, rx->id);
1451 ret = gnttab_end_foreign_access_ref(ref);
1452 KASSERT(ret, ("Unable to end access to grant references"));
1454 gnttab_release_grant_reference(&rxq->gref_head, ref);
1460 m->m_len = rx->status;
1461 m->m_data += rx->offset;
1462 m0->m_pkthdr.len += rx->status;
1465 if (!(rx->flags & NETRXF_more_data))
1468 if (*cons + frags == rp) {
1469 if (net_ratelimit())
1470 WPRINTK("Need more frags\n");
1472 printf("%s: cons %u frags %u rp %u, not enough frags\n",
1473 __func__, *cons, frags, rp);
1477 * Note that m can be NULL, if rx->status < 0 or if
1478 * rx->offset + rx->status > PAGE_SIZE above.
1482 rx = RING_GET_RESPONSE(&rxq->ring, *cons + frags);
1483 m = xn_get_rx_mbuf(rxq, *cons + frags);
1486 * m_prev == NULL can happen if rx->status < 0 or if
1487 * rx->offset + * rx->status > PAGE_SIZE above.
1493 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1494 * rx->status > PAGE_SIZE above.
1499 ref = xn_get_rx_ref(rxq, *cons + frags);
1500 ref_cons = *cons + frags;
1510 * \brief Count the number of fragments in an mbuf chain.
1512 * Surprisingly, there isn't an M* macro for this.
1515 xn_count_frags(struct mbuf *m)
1519 for (nfrags = 0; m != NULL; m = m->m_next)
1526 * Given an mbuf chain, make sure we have enough room and then push
1527 * it onto the transmit ring.
1530 xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head)
1533 struct netfront_info *np = txq->info;
1534 struct ifnet *ifp = np->xn_ifp;
1539 * Defragment the mbuf if necessary.
1541 nfrags = xn_count_frags(m_head);
1544 * Check to see whether this request is longer than netback
1545 * can handle, and try to defrag it.
1548 * It is a bit lame, but the netback driver in Linux can't
1549 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
1550 * the Linux network stack.
1552 if (nfrags > np->maxfrags) {
1553 m = m_defrag(m_head, M_NOWAIT);
1556 * Defrag failed, so free the mbuf and
1557 * therefore drop the packet.
1565 /* Determine how many fragments now exist */
1566 nfrags = xn_count_frags(m_head);
1569 * Check to see whether the defragmented packet has too many
1570 * segments for the Linux netback driver.
1573 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1574 * of mbufs longer than Linux can handle. Make sure we don't
1575 * pass a too-long chain over to the other side by dropping the
1576 * packet. It doesn't look like there is currently a way to
1577 * tell the TCP stack to generate a shorter chain of packets.
1579 if (nfrags > MAX_TX_REQ_FRAGS) {
1581 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1582 "won't be able to handle it, dropping\n",
1583 __func__, nfrags, MAX_TX_REQ_FRAGS);
1590 * This check should be redundant. We've already verified that we
1591 * have enough slots in the ring to handle a packet of maximum
1592 * size, and that our packet is less than the maximum size. Keep
1593 * it in here as an assert for now just to make certain that
1594 * chain_cnt is accurate.
1596 KASSERT((txq->mbufs_cnt + nfrags) <= NET_TX_RING_SIZE,
1597 ("%s: chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1598 "(%d)!", __func__, (int) txq->mbufs_cnt,
1599 (int) nfrags, (int) NET_TX_RING_SIZE));
1602 * Start packing the mbufs in this chain into
1603 * the fragment pointers. Stop when we run out
1604 * of fragments or hit the end of the mbuf chain.
1607 otherend_id = xenbus_get_otherend_id(np->xbdev);
1608 for (m = m_head; m; m = m->m_next) {
1609 netif_tx_request_t *tx;
1612 u_long mfn; /* XXX Wrong type? */
1614 tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt);
1615 id = get_id_from_freelist(txq->mbufs);
1617 panic("%s: was allocated the freelist head!\n",
1620 if (txq->mbufs_cnt > NET_TX_RING_SIZE)
1621 panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n",
1625 ref = gnttab_claim_grant_reference(&txq->gref_head);
1626 KASSERT((short)ref >= 0, ("Negative ref"));
1627 mfn = virt_to_mfn(mtod(m, vm_offset_t));
1628 gnttab_grant_foreign_access_ref(ref, otherend_id,
1629 mfn, GNTMAP_readonly);
1630 tx->gref = txq->grant_ref[id] = ref;
1631 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
1635 * The first fragment has the entire packet
1636 * size, subsequent fragments have just the
1637 * fragment size. The backend works out the
1638 * true size of the first fragment by
1639 * subtracting the sizes of the other
1642 tx->size = m->m_pkthdr.len;
1645 * The first fragment contains the checksum flags
1646 * and is optionally followed by extra data for
1650 * CSUM_TSO requires checksum offloading.
1651 * Some versions of FreeBSD fail to
1652 * set CSUM_TCP in the CSUM_TSO case,
1653 * so we have to test for CSUM_TSO
1656 if (m->m_pkthdr.csum_flags
1657 & (CSUM_DELAY_DATA | CSUM_TSO)) {
1658 tx->flags |= (NETTXF_csum_blank
1659 | NETTXF_data_validated);
1661 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1662 struct netif_extra_info *gso =
1663 (struct netif_extra_info *)
1664 RING_GET_REQUEST(&txq->ring,
1665 ++txq->ring.req_prod_pvt);
1667 tx->flags |= NETTXF_extra_info;
1669 gso->u.gso.size = m->m_pkthdr.tso_segsz;
1671 XEN_NETIF_GSO_TYPE_TCPV4;
1673 gso->u.gso.features = 0;
1675 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1679 tx->size = m->m_len;
1682 tx->flags |= NETTXF_more_data;
1684 txq->ring.req_prod_pvt++;
1686 BPF_MTAP(ifp, m_head);
1688 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1689 if_inc_counter(ifp, IFCOUNTER_OBYTES, m_head->m_pkthdr.len);
1690 if (m_head->m_flags & M_MCAST)
1691 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1698 /* equivalent of network_open() in Linux */
1700 xn_ifinit_locked(struct netfront_info *np)
1704 struct netfront_rxq *rxq;
1710 if (ifp->if_drv_flags & IFF_DRV_RUNNING || !netfront_carrier_ok(np))
1715 for (i = 0; i < np->num_queues; i++) {
1718 xn_alloc_rx_buffers(rxq);
1719 rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1;
1720 if (RING_HAS_UNCONSUMED_RESPONSES(&rxq->ring))
1725 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1726 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1727 if_link_state_change(ifp, LINK_STATE_UP);
1731 xn_ifinit(void *xsc)
1733 struct netfront_info *sc = xsc;
1736 xn_ifinit_locked(sc);
1741 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1743 struct netfront_info *sc = ifp->if_softc;
1744 struct ifreq *ifr = (struct ifreq *) data;
1747 struct ifaddr *ifa = (struct ifaddr *)data;
1749 int mask, error = 0, reinit;
1757 if (ifa->ifa_addr->sa_family == AF_INET) {
1758 ifp->if_flags |= IFF_UP;
1759 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1760 xn_ifinit_locked(sc);
1761 arp_ifinit(ifp, ifa);
1766 error = ether_ioctl(ifp, cmd, data);
1772 if (ifp->if_mtu == ifr->ifr_mtu)
1775 ifp->if_mtu = ifr->ifr_mtu;
1776 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1781 if (ifp->if_flags & IFF_UP) {
1783 * If only the state of the PROMISC flag changed,
1784 * then just use the 'set promisc mode' command
1785 * instead of reinitializing the entire NIC. Doing
1786 * a full re-init means reloading the firmware and
1787 * waiting for it to start up, which may take a
1790 xn_ifinit_locked(sc);
1792 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1796 sc->xn_if_flags = ifp->if_flags;
1800 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1803 if (mask & IFCAP_TXCSUM) {
1804 ifp->if_capenable ^= IFCAP_TXCSUM;
1805 ifp->if_hwassist ^= XN_CSUM_FEATURES;
1807 if (mask & IFCAP_TSO4) {
1808 ifp->if_capenable ^= IFCAP_TSO4;
1809 ifp->if_hwassist ^= CSUM_TSO;
1812 if (mask & (IFCAP_RXCSUM | IFCAP_LRO)) {
1813 /* These Rx features require us to renegotiate. */
1816 if (mask & IFCAP_RXCSUM)
1817 ifp->if_capenable ^= IFCAP_RXCSUM;
1818 if (mask & IFCAP_LRO)
1819 ifp->if_capenable ^= IFCAP_LRO;
1826 * We must reset the interface so the backend picks up the
1829 device_printf(sc->xbdev,
1830 "performing interface reset due to feature change\n");
1832 netfront_carrier_off(sc);
1833 sc->xn_reset = true;
1835 * NB: the pending packet queue is not flushed, since
1836 * the interface should still support the old options.
1840 * Delete the xenstore nodes that export features.
1842 * NB: There's a xenbus state called
1843 * "XenbusStateReconfiguring", which is what we should set
1844 * here. Sadly none of the backends know how to handle it,
1845 * and simply disconnect from the frontend, so we will just
1846 * switch back to XenbusStateInitialising in order to force
1849 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-gso-tcpv4");
1850 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-no-csum-offload");
1851 xenbus_set_state(dev, XenbusStateClosing);
1854 * Wait for the frontend to reconnect before returning
1855 * from the ioctl. 30s should be more than enough for any
1856 * sane backend to reconnect.
1858 error = tsleep(sc, 0, "xn_rst", 30*hz);
1865 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1868 error = ether_ioctl(ifp, cmd, data);
1875 xn_stop(struct netfront_info *sc)
1883 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1884 if_link_state_change(ifp, LINK_STATE_DOWN);
1888 xn_rebuild_rx_bufs(struct netfront_rxq *rxq)
1892 netif_rx_request_t *req;
1894 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1898 if (rxq->mbufs[i] == NULL)
1901 m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i);
1902 ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i);
1904 req = RING_GET_REQUEST(&rxq->ring, requeue_idx);
1905 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1907 gnttab_grant_foreign_access_ref(ref,
1908 xenbus_get_otherend_id(rxq->info->xbdev),
1912 req->id = requeue_idx;
1917 rxq->ring.req_prod_pvt = requeue_idx;
1920 /* START of Xenolinux helper functions adapted to FreeBSD */
1922 xn_connect(struct netfront_info *np)
1925 u_int feature_rx_copy;
1926 struct netfront_rxq *rxq;
1927 struct netfront_txq *txq;
1929 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1930 "feature-rx-copy", NULL, "%u", &feature_rx_copy);
1932 feature_rx_copy = 0;
1934 /* We only support rx copy. */
1935 if (!feature_rx_copy)
1936 return (EPROTONOSUPPORT);
1938 /* Recovery procedure: */
1939 error = talk_to_backend(np->xbdev, np);
1943 /* Step 1: Reinitialise variables. */
1944 xn_query_features(np);
1945 xn_configure_features(np);
1947 /* Step 2: Release TX buffer */
1948 for (i = 0; i < np->num_queues; i++) {
1950 xn_release_tx_bufs(txq);
1953 /* Step 3: Rebuild the RX buffer freelist and the RX ring itself. */
1954 for (i = 0; i < np->num_queues; i++) {
1956 xn_rebuild_rx_bufs(rxq);
1959 /* Step 4: All public and private state should now be sane. Get
1960 * ready to start sending and receiving packets and give the driver
1961 * domain a kick because we've probably just requeued some
1964 netfront_carrier_on(np);
1971 xn_kick_rings(struct netfront_info *np)
1973 struct netfront_rxq *rxq;
1974 struct netfront_txq *txq;
1977 for (i = 0; i < np->num_queues; i++) {
1980 xen_intr_signal(txq->xen_intr_handle);
1985 xn_alloc_rx_buffers(rxq);
1991 xn_query_features(struct netfront_info *np)
1995 device_printf(np->xbdev, "backend features:");
1997 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1998 "feature-sg", NULL, "%d", &val) != 0)
2003 np->maxfrags = MAX_TX_REQ_FRAGS;
2004 printf(" feature-sg");
2007 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2008 "feature-gso-tcpv4", NULL, "%d", &val) != 0)
2011 np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO);
2013 np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO;
2014 printf(" feature-gso-tcp4");
2018 * HW CSUM offload is assumed to be available unless
2019 * feature-no-csum-offload is set in xenstore.
2021 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2022 "feature-no-csum-offload", NULL, "%d", &val) != 0)
2025 np->xn_ifp->if_capabilities |= IFCAP_HWCSUM;
2027 np->xn_ifp->if_capabilities &= ~(IFCAP_HWCSUM);
2028 printf(" feature-no-csum-offload");
2035 xn_configure_features(struct netfront_info *np)
2037 int err, cap_enabled;
2038 #if (defined(INET) || defined(INET6))
2046 if ((ifp->if_capenable & ifp->if_capabilities) == ifp->if_capenable) {
2047 /* Current options are available, no need to do anything. */
2051 /* Try to preserve as many options as possible. */
2052 cap_enabled = ifp->if_capenable;
2053 ifp->if_capenable = ifp->if_hwassist = 0;
2055 #if (defined(INET) || defined(INET6))
2056 if ((cap_enabled & IFCAP_LRO) != 0)
2057 for (i = 0; i < np->num_queues; i++)
2058 tcp_lro_free(&np->rxq[i].lro);
2059 if (xn_enable_lro &&
2060 (ifp->if_capabilities & cap_enabled & IFCAP_LRO) != 0) {
2061 ifp->if_capenable |= IFCAP_LRO;
2062 for (i = 0; i < np->num_queues; i++) {
2063 err = tcp_lro_init(&np->rxq[i].lro);
2065 device_printf(np->xbdev,
2066 "LRO initialization failed\n");
2067 ifp->if_capenable &= ~IFCAP_LRO;
2070 np->rxq[i].lro.ifp = ifp;
2073 if ((ifp->if_capabilities & cap_enabled & IFCAP_TSO4) != 0) {
2074 ifp->if_capenable |= IFCAP_TSO4;
2075 ifp->if_hwassist |= CSUM_TSO;
2078 if ((ifp->if_capabilities & cap_enabled & IFCAP_TXCSUM) != 0) {
2079 ifp->if_capenable |= IFCAP_TXCSUM;
2080 ifp->if_hwassist |= XN_CSUM_FEATURES;
2082 if ((ifp->if_capabilities & cap_enabled & IFCAP_RXCSUM) != 0)
2083 ifp->if_capenable |= IFCAP_RXCSUM;
2089 xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m)
2091 struct netfront_info *np;
2093 struct buf_ring *br;
2101 XN_TX_LOCK_ASSERT(txq);
2103 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2104 !netfront_carrier_ok(np)) {
2106 error = drbr_enqueue(ifp, br, m);
2111 error = drbr_enqueue(ifp, br, m);
2116 while ((m = drbr_peek(ifp, br)) != NULL) {
2117 if (!xn_tx_slot_available(txq)) {
2118 drbr_putback(ifp, br, m);
2122 error = xn_assemble_tx_request(txq, m);
2123 /* xn_assemble_tx_request always consumes the mbuf*/
2125 drbr_advance(ifp, br);
2129 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&txq->ring, notify);
2131 xen_intr_signal(txq->xen_intr_handle);
2133 drbr_advance(ifp, br);
2136 if (RING_FULL(&txq->ring))
2143 xn_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
2145 struct netfront_info *np;
2146 struct netfront_txq *txq;
2147 int i, npairs, error;
2150 npairs = np->num_queues;
2152 if (!netfront_carrier_ok(np))
2155 KASSERT(npairs != 0, ("called with 0 available queues"));
2157 /* check if flowid is set */
2158 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2159 i = m->m_pkthdr.flowid % npairs;
2161 i = curcpu % npairs;
2165 if (XN_TX_TRYLOCK(txq) != 0) {
2166 error = xn_txq_mq_start_locked(txq, m);
2169 error = drbr_enqueue(ifp, txq->br, m);
2170 taskqueue_enqueue(txq->tq, &txq->defrtask);
2177 xn_qflush(struct ifnet *ifp)
2179 struct netfront_info *np;
2180 struct netfront_txq *txq;
2186 for (i = 0; i < np->num_queues; i++) {
2190 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
2199 * Create a network device.
2200 * @param dev Newbus device representing this virtual NIC.
2203 create_netdev(device_t dev)
2205 struct netfront_info *np;
2209 np = device_get_softc(dev);
2213 mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF);
2215 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
2216 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
2217 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
2219 err = xen_net_read_mac(dev, np->mac);
2223 /* Set up ifnet structure */
2224 ifp = np->xn_ifp = if_alloc(IFT_ETHER);
2226 if_initname(ifp, "xn", device_get_unit(dev));
2227 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2228 ifp->if_ioctl = xn_ioctl;
2230 ifp->if_transmit = xn_txq_mq_start;
2231 ifp->if_qflush = xn_qflush;
2233 ifp->if_init = xn_ifinit;
2235 ifp->if_hwassist = XN_CSUM_FEATURES;
2236 /* Enable all supported features at device creation. */
2237 ifp->if_capenable = ifp->if_capabilities =
2238 IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_LRO;
2239 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2240 ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS;
2241 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
2243 ether_ifattach(ifp, np->mac);
2244 netfront_carrier_off(np);
2249 KASSERT(err != 0, ("Error path with no error code specified"));
2254 netfront_detach(device_t dev)
2256 struct netfront_info *info = device_get_softc(dev);
2258 DPRINTK("%s\n", xenbus_get_node(dev));
2266 netif_free(struct netfront_info *np)
2272 netif_disconnect_backend(np);
2273 ether_ifdetach(np->xn_ifp);
2274 free(np->rxq, M_DEVBUF);
2275 free(np->txq, M_DEVBUF);
2276 if_free(np->xn_ifp);
2278 ifmedia_removeall(&np->sc_media);
2282 netif_disconnect_backend(struct netfront_info *np)
2286 for (i = 0; i < np->num_queues; i++) {
2287 XN_RX_LOCK(&np->rxq[i]);
2288 XN_TX_LOCK(&np->txq[i]);
2290 netfront_carrier_off(np);
2291 for (i = 0; i < np->num_queues; i++) {
2292 XN_RX_UNLOCK(&np->rxq[i]);
2293 XN_TX_UNLOCK(&np->txq[i]);
2296 for (i = 0; i < np->num_queues; i++) {
2297 disconnect_rxq(&np->rxq[i]);
2298 disconnect_txq(&np->txq[i]);
2303 xn_ifmedia_upd(struct ifnet *ifp)
2310 xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2313 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2314 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2317 /* ** Driver registration ** */
2318 static device_method_t netfront_methods[] = {
2319 /* Device interface */
2320 DEVMETHOD(device_probe, netfront_probe),
2321 DEVMETHOD(device_attach, netfront_attach),
2322 DEVMETHOD(device_detach, netfront_detach),
2323 DEVMETHOD(device_shutdown, bus_generic_shutdown),
2324 DEVMETHOD(device_suspend, netfront_suspend),
2325 DEVMETHOD(device_resume, netfront_resume),
2327 /* Xenbus interface */
2328 DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
2333 static driver_t netfront_driver = {
2336 sizeof(struct netfront_info),
2338 devclass_t netfront_devclass;
2340 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL,