2 * Copyright (c) 2004-2006 Kip Macy
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_inet6.h"
33 #include <sys/param.h>
34 #include <sys/sockio.h>
35 #include <sys/limits.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/kernel.h>
40 #include <sys/socket.h>
41 #include <sys/sysctl.h>
44 #include <net/if_var.h>
45 #include <net/if_arp.h>
46 #include <net/ethernet.h>
47 #include <net/if_media.h>
51 #include <net/if_types.h>
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/if_ether.h>
56 #include <netinet/tcp.h>
57 #include <netinet/tcp_lro.h>
64 #include <xen/xen-os.h>
65 #include <xen/hypervisor.h>
66 #include <xen/xen_intr.h>
67 #include <xen/gnttab.h>
68 #include <xen/interface/memory.h>
69 #include <xen/interface/io/netif.h>
70 #include <xen/xenbus/xenbusvar.h>
72 #include "xenbus_if.h"
74 /* Features supported by all backends. TSO and LRO can be negotiated */
75 #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
77 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
78 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
81 * Should the driver do LRO on the RX end
82 * this can be toggled on the fly, but the
83 * interface must be reset (down/up) for it
86 static int xn_enable_lro = 1;
87 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
90 * \brief The maximum allowed data fragments in a single transmit
93 * This limit is imposed by the backend driver. We assume here that
94 * we are dealing with a Linux driver domain and have set our limit
95 * to mirror the Linux MAX_SKB_FRAGS constant.
97 #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
99 #define RX_COPY_THRESHOLD 256
101 #define net_ratelimit() 0
103 struct netfront_info;
104 struct netfront_rx_info;
106 static void xn_txeof(struct netfront_info *);
107 static void xn_rxeof(struct netfront_info *);
108 static void network_alloc_rx_buffers(struct netfront_info *);
110 static void xn_tick_locked(struct netfront_info *);
111 static void xn_tick(void *);
113 static void xn_intr(void *);
114 static inline int xn_count_frags(struct mbuf *m);
115 static int xn_assemble_tx_request(struct netfront_info *sc,
116 struct mbuf *m_head);
117 static void xn_start_locked(struct ifnet *);
118 static void xn_start(struct ifnet *);
119 static int xn_ioctl(struct ifnet *, u_long, caddr_t);
120 static void xn_ifinit_locked(struct netfront_info *);
121 static void xn_ifinit(void *);
122 static void xn_stop(struct netfront_info *);
123 static void xn_query_features(struct netfront_info *np);
124 static int xn_configure_features(struct netfront_info *np);
126 static void xn_watchdog(struct ifnet *);
130 static void netfront_closing(device_t dev);
132 static void netif_free(struct netfront_info *info);
133 static int netfront_detach(device_t dev);
135 static int talk_to_backend(device_t dev, struct netfront_info *info);
136 static int create_netdev(device_t dev);
137 static void netif_disconnect_backend(struct netfront_info *info);
138 static int setup_device(device_t dev, struct netfront_info *info);
139 static void free_ring(int *ref, void *ring_ptr_ref);
141 static int xn_ifmedia_upd(struct ifnet *ifp);
142 static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
144 /* Xenolinux helper functions */
145 int network_connect(struct netfront_info *);
147 static void xn_free_rx_ring(struct netfront_info *);
149 static void xn_free_tx_ring(struct netfront_info *);
151 static int xennet_get_responses(struct netfront_info *np,
152 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
155 #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT)
157 #define INVALID_P2M_ENTRY (~0UL)
160 * Mbuf pointers. We need these to keep track of the virtual addresses
161 * of our mbuf chains since we can only convert from virtual to physical,
162 * not the other way around. The size must track the free index arrays.
164 struct xn_chain_data {
165 struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1];
167 struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1];
170 struct netfront_stats
172 u_long rx_packets; /* total packets received */
173 u_long tx_packets; /* total packets transmitted */
174 u_long rx_bytes; /* total bytes received */
175 u_long tx_bytes; /* total bytes transmitted */
176 u_long rx_errors; /* bad packets received */
177 u_long tx_errors; /* packet transmit problems */
180 struct netfront_info {
181 struct ifnet *xn_ifp;
182 struct lro_ctrl xn_lro;
184 struct netfront_stats stats;
187 netif_tx_front_ring_t tx;
188 netif_rx_front_ring_t rx;
194 xen_intr_handle_t xen_intr_handle;
198 /* Receive-ring batched refills. */
199 #define RX_MIN_TARGET 32
200 #define RX_MAX_TARGET NET_RX_RING_SIZE
205 grant_ref_t gref_tx_head;
206 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
207 grant_ref_t gref_rx_head;
208 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
213 uint8_t mac[ETHER_ADDR_LEN];
214 struct xn_chain_data xn_cdata; /* mbufs */
215 struct mbufq xn_rx_batch; /* batch queue */
218 struct callout xn_stat_ch;
220 xen_pfn_t rx_pfn_array[NET_RX_RING_SIZE];
221 struct ifmedia sc_media;
226 #define rx_mbufs xn_cdata.xn_rx_chain
227 #define tx_mbufs xn_cdata.xn_tx_chain
229 #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock)
230 #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock)
232 #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock)
233 #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock)
235 #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock);
236 #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock);
238 #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED);
239 #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED);
240 #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED);
242 struct netfront_rx_info {
243 struct netif_rx_response rx;
244 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
247 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
248 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
249 #define netfront_carrier_ok(netif) ((netif)->carrier)
251 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
254 add_id_to_freelist(struct mbuf **list, uintptr_t id)
257 ("%s: the head item (0) must always be free.", __func__));
259 list[0] = (struct mbuf *)id;
262 static inline unsigned short
263 get_id_from_freelist(struct mbuf **list)
267 id = (uintptr_t)list[0];
269 ("%s: the head item (0) must always remain free.", __func__));
275 xennet_rxidx(RING_IDX idx)
277 return idx & (NET_RX_RING_SIZE - 1);
280 static inline struct mbuf *
281 xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri)
283 int i = xennet_rxidx(ri);
287 np->rx_mbufs[i] = NULL;
291 static inline grant_ref_t
292 xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
294 int i = xennet_rxidx(ri);
295 grant_ref_t ref = np->grant_rx_ref[i];
296 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
297 np->grant_rx_ref[i] = GRANT_REF_INVALID;
301 #define IPRINTK(fmt, args...) \
302 printf("[XEN] " fmt, ##args)
304 #define WPRINTK(fmt, args...) \
305 printf("[XEN] " fmt, ##args)
307 #define WPRINTK(fmt, args...)
310 #define DPRINTK(fmt, args...) \
311 printf("[XEN] %s: " fmt, __func__, ##args)
313 #define DPRINTK(fmt, args...)
317 * Read the 'mac' node at the given device's node in the store, and parse that
318 * as colon-separated octets, placing result the given mac array. mac must be
319 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
320 * Return 0 on success, or errno on error.
323 xen_net_read_mac(device_t dev, uint8_t mac[])
326 char *s, *e, *macstr;
329 path = xenbus_get_node(dev);
330 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
331 if (error == ENOENT) {
333 * Deal with missing mac XenStore nodes on devices with
334 * HVM emulation (the 'ioemu' configuration attribute)
337 * The HVM emulator may execute in a stub device model
338 * domain which lacks the permission, only given to Dom0,
339 * to update the guest's XenStore tree. For this reason,
340 * the HVM emulator doesn't even attempt to write the
341 * front-side mac node, even when operating in Dom0.
342 * However, there should always be a mac listed in the
343 * backend tree. Fallback to this version if our query
344 * of the front side XenStore location doesn't find
347 path = xenbus_get_otherend_path(dev);
348 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
351 xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
356 for (i = 0; i < ETHER_ADDR_LEN; i++) {
357 mac[i] = strtoul(s, &e, 16);
358 if (s == e || (e[0] != ':' && e[0] != 0)) {
359 free(macstr, M_XENBUS);
364 free(macstr, M_XENBUS);
369 * Entry point to this code when a new device is created. Allocate the basic
370 * structures and the ring buffers for communication with the backend, and
371 * inform the backend of the appropriate details for those. Switch to
375 netfront_probe(device_t dev)
378 if (xen_hvm_domain() && xen_disable_pv_nics != 0)
381 if (!strcmp(xenbus_get_type(dev), "vif")) {
382 device_set_desc(dev, "Virtual Network Interface");
390 netfront_attach(device_t dev)
394 err = create_netdev(dev);
396 xenbus_dev_fatal(dev, err, "creating netdev");
400 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
401 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
402 OID_AUTO, "enable_lro", CTLFLAG_RW,
403 &xn_enable_lro, 0, "Large Receive Offload");
409 netfront_suspend(device_t dev)
411 struct netfront_info *info = device_get_softc(dev);
415 netfront_carrier_off(info);
422 * We are reconnecting to the backend, due to a suspend/resume, or a backend
423 * driver restart. We tear down our netif structure and recreate it, but
424 * leave the device-layer structures intact so that this is transparent to the
425 * rest of the kernel.
428 netfront_resume(device_t dev)
430 struct netfront_info *info = device_get_softc(dev);
432 info->xn_resume = true;
433 netif_disconnect_backend(info);
437 /* Common code used when first setting up, and when resuming. */
439 talk_to_backend(device_t dev, struct netfront_info *info)
442 struct xs_transaction xst;
443 const char *node = xenbus_get_node(dev);
446 err = xen_net_read_mac(dev, info->mac);
448 xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
452 /* Create shared ring, alloc event channel. */
453 err = setup_device(dev, info);
458 err = xs_transaction_start(&xst);
460 xenbus_dev_fatal(dev, err, "starting transaction");
463 err = xs_printf(xst, node, "tx-ring-ref","%u",
466 message = "writing tx ring-ref";
467 goto abort_transaction;
469 err = xs_printf(xst, node, "rx-ring-ref","%u",
472 message = "writing rx ring-ref";
473 goto abort_transaction;
475 err = xs_printf(xst, node,
476 "event-channel", "%u",
477 xen_intr_port(info->xen_intr_handle));
479 message = "writing event-channel";
480 goto abort_transaction;
482 err = xs_printf(xst, node, "request-rx-copy", "%u", 1);
484 message = "writing request-rx-copy";
485 goto abort_transaction;
487 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
489 message = "writing feature-rx-notify";
490 goto abort_transaction;
492 err = xs_printf(xst, node, "feature-sg", "%d", 1);
494 message = "writing feature-sg";
495 goto abort_transaction;
497 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
499 message = "writing feature-gso-tcpv4";
500 goto abort_transaction;
503 err = xs_transaction_end(xst, 0);
507 xenbus_dev_fatal(dev, err, "completing transaction");
514 xs_transaction_end(xst, 1);
515 xenbus_dev_fatal(dev, err, "%s", message);
523 setup_device(device_t dev, struct netfront_info *info)
525 netif_tx_sring_t *txs;
526 netif_rx_sring_t *rxs;
529 info->tx_ring_ref = GRANT_REF_INVALID;
530 info->rx_ring_ref = GRANT_REF_INVALID;
531 info->rx.sring = NULL;
532 info->tx.sring = NULL;
534 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
537 xenbus_dev_fatal(dev, error, "allocating tx ring page");
540 SHARED_RING_INIT(txs);
541 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
542 error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref);
546 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
549 xenbus_dev_fatal(dev, error, "allocating rx ring page");
552 SHARED_RING_INIT(rxs);
553 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
555 error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref);
559 error = xen_intr_alloc_and_bind_local_port(dev,
560 xenbus_get_otherend_id(dev), /*filter*/NULL, xn_intr, info,
561 INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &info->xen_intr_handle);
564 xenbus_dev_fatal(dev, error,
565 "xen_intr_alloc_and_bind_local_port failed");
578 * If this interface has an ipv4 address, send an arp for it. This
579 * helps to get the network going again after migrating hosts.
582 netfront_send_fake_arp(device_t dev, struct netfront_info *info)
588 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
589 if (ifa->ifa_addr->sa_family == AF_INET) {
590 arp_ifinit(ifp, ifa);
597 * Callback received when the backend's state changes.
600 netfront_backend_changed(device_t dev, XenbusState newstate)
602 struct netfront_info *sc = device_get_softc(dev);
604 DPRINTK("newstate=%d\n", newstate);
607 case XenbusStateInitialising:
608 case XenbusStateInitialised:
609 case XenbusStateUnknown:
610 case XenbusStateClosed:
611 case XenbusStateReconfigured:
612 case XenbusStateReconfiguring:
614 case XenbusStateInitWait:
615 if (xenbus_get_state(dev) != XenbusStateInitialising)
617 if (network_connect(sc) != 0)
619 xenbus_set_state(dev, XenbusStateConnected);
621 case XenbusStateClosing:
622 xenbus_set_state(dev, XenbusStateClosed);
624 case XenbusStateConnected:
626 netfront_send_fake_arp(dev, sc);
633 xn_free_rx_ring(struct netfront_info *sc)
638 for (i = 0; i < NET_RX_RING_SIZE; i++) {
639 if (sc->xn_cdata.rx_mbufs[i] != NULL) {
640 m_freem(sc->rx_mbufs[i]);
641 sc->rx_mbufs[i] = NULL;
646 sc->xn_rx_if->req_prod = 0;
647 sc->xn_rx_if->event = sc->rx.rsp_cons ;
652 xn_free_tx_ring(struct netfront_info *sc)
657 for (i = 0; i < NET_TX_RING_SIZE; i++) {
658 if (sc->tx_mbufs[i] != NULL) {
659 m_freem(sc->tx_mbufs[i]);
660 sc->xn_cdata.xn_tx_chain[i] = NULL;
669 * \brief Verify that there is sufficient space in the Tx ring
670 * buffer for a maximally sized request to be enqueued.
672 * A transmit request requires a transmit descriptor for each packet
673 * fragment, plus up to 2 entries for "options" (e.g. TSO).
676 xn_tx_slot_available(struct netfront_info *np)
678 return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2));
682 netif_release_tx_bufs(struct netfront_info *np)
686 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
692 * We assume that no kernel addresses are
693 * less than NET_TX_RING_SIZE. Any entry
694 * in the table that is below this number
695 * must be an index from free-list tracking.
697 if (((uintptr_t)m) <= NET_TX_RING_SIZE)
699 gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
700 gnttab_release_grant_reference(&np->gref_tx_head,
701 np->grant_tx_ref[i]);
702 np->grant_tx_ref[i] = GRANT_REF_INVALID;
703 add_id_to_freelist(np->tx_mbufs, i);
704 np->xn_cdata.xn_tx_chain_cnt--;
705 if (np->xn_cdata.xn_tx_chain_cnt < 0) {
706 panic("%s: tx_chain_cnt must be >= 0", __func__);
713 network_alloc_rx_buffers(struct netfront_info *sc)
715 int otherend_id = xenbus_get_otherend_id(sc->xbdev);
718 int i, batch_target, notify;
721 netif_rx_request_t *req;
725 req_prod = sc->rx.req_prod_pvt;
727 if (__predict_false(sc->carrier == 0))
731 * Allocate mbufs greedily, even though we batch updates to the
732 * receive ring. This creates a less bursty demand on the memory
733 * allocator, and so should reduce the chance of failed allocation
734 * requests both for ourself and for other kernel subsystems.
736 * Here we attempt to maintain rx_target buffers in flight, counting
737 * buffers that we have yet to process in the receive ring.
739 batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
740 for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
741 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
750 m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
752 /* queue the mbufs allocated */
753 (void )mbufq_enqueue(&sc->xn_rx_batch, m_new);
757 * If we've allocated at least half of our target number of entries,
758 * submit them to the backend - we have enough to make the overhead
759 * of submission worthwhile. Otherwise wait for more mbufs and
760 * request entries to become available.
762 if (i < (sc->rx_target/2)) {
763 if (req_prod >sc->rx.sring->req_prod)
769 * Double floating fill target if we risked having the backend
770 * run out of empty buffers for receive traffic. We define "running
771 * low" as having less than a fourth of our target buffers free
772 * at the time we refilled the queue.
774 if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) {
776 if (sc->rx_target > sc->rx_max_target)
777 sc->rx_target = sc->rx_max_target;
782 if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
785 m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
786 vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
788 id = xennet_rxidx(req_prod + i);
790 KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain"));
791 sc->rx_mbufs[id] = m_new;
793 ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
794 KASSERT(ref != GNTTAB_LIST_END,
795 ("reserved grant references exhuasted"));
796 sc->grant_rx_ref[id] = ref;
798 vaddr = mtod(m_new, vm_offset_t);
799 pfn = vtophys(vaddr) >> PAGE_SHIFT;
800 req = RING_GET_REQUEST(&sc->rx, req_prod + i);
802 gnttab_grant_foreign_access_ref(ref, otherend_id, pfn, 0);
806 sc->rx_pfn_array[i] =
807 vtophys(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
810 KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
811 KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
813 * We may have allocated buffers which have entries outstanding
814 * in the page * update queue -- make sure we flush those first!
818 /* Above is a suitable barrier to ensure backend will see requests. */
819 sc->rx.req_prod_pvt = req_prod + i;
821 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
823 xen_intr_signal(sc->xen_intr_handle);
827 xn_rxeof(struct netfront_info *np)
830 #if (defined(INET) || defined(INET6))
831 struct lro_ctrl *lro = &np->xn_lro;
832 struct lro_entry *queued;
834 struct netfront_rx_info rinfo;
835 struct netif_rx_response *rx = &rinfo.rx;
836 struct netif_extra_info *extras = rinfo.extras;
839 struct mbufq rxq, errq;
843 XN_RX_LOCK_ASSERT(np);
844 if (!netfront_carrier_ok(np))
847 /* XXX: there should be some sane limit. */
848 mbufq_init(&errq, INT_MAX);
849 mbufq_init(&rxq, INT_MAX);
853 rp = np->rx.sring->rsp_prod;
854 rmb(); /* Ensure we see queued responses up to 'rp'. */
858 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
859 memset(extras, 0, sizeof(rinfo.extras));
862 err = xennet_get_responses(np, &rinfo, rp, &i, &m);
864 if (__predict_false(err)) {
866 (void )mbufq_enqueue(&errq, m);
867 np->stats.rx_errors++;
871 m->m_pkthdr.rcvif = ifp;
872 if ( rx->flags & NETRXF_data_validated ) {
873 /* Tell the stack the checksums are okay */
875 * XXX this isn't necessarily the case - need to add
879 m->m_pkthdr.csum_flags |=
880 (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
882 m->m_pkthdr.csum_data = 0xffff;
885 np->stats.rx_packets++;
886 np->stats.rx_bytes += m->m_pkthdr.len;
888 (void )mbufq_enqueue(&rxq, m);
895 * Process all the mbufs after the remapping is complete.
896 * Break the mbuf chain first though.
898 while ((m = mbufq_dequeue(&rxq)) != NULL) {
899 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
902 * Do we really need to drop the rx lock?
905 #if (defined(INET) || defined(INET6))
906 /* Use LRO if possible */
907 if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
908 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
910 * If LRO fails, pass up to the stack
913 (*ifp->if_input)(ifp, m);
916 (*ifp->if_input)(ifp, m);
923 #if (defined(INET) || defined(INET6))
925 * Flush any outstanding LRO work
927 while (!SLIST_EMPTY(&lro->lro_active)) {
928 queued = SLIST_FIRST(&lro->lro_active);
929 SLIST_REMOVE_HEAD(&lro->lro_active, next);
930 tcp_lro_flush(lro, queued);
935 /* If we get a callback with very few responses, reduce fill target. */
936 /* NB. Note exponential increase, linear decrease. */
937 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
938 ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
939 np->rx_target = np->rx_min_target;
942 network_alloc_rx_buffers(np);
944 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
945 } while (work_to_do);
949 xn_txeof(struct netfront_info *np)
954 netif_tx_response_t *txr;
957 XN_TX_LOCK_ASSERT(np);
959 if (!netfront_carrier_ok(np))
965 prod = np->tx.sring->rsp_prod;
966 rmb(); /* Ensure we see responses up to 'rp'. */
968 for (i = np->tx.rsp_cons; i != prod; i++) {
969 txr = RING_GET_RESPONSE(&np->tx, i);
970 if (txr->status == NETIF_RSP_NULL)
973 if (txr->status != NETIF_RSP_OKAY) {
974 printf("%s: WARNING: response is %d!\n",
975 __func__, txr->status);
978 m = np->tx_mbufs[id];
979 KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
980 KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
981 ("mbuf already on the free list, but we're "
982 "trying to free it again!"));
986 * Increment packet count if this is the last
990 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
991 if (__predict_false(gnttab_query_foreign_access(
992 np->grant_tx_ref[id]) != 0)) {
993 panic("%s: grant id %u still in use by the "
994 "backend", __func__, id);
996 gnttab_end_foreign_access_ref(
997 np->grant_tx_ref[id]);
998 gnttab_release_grant_reference(
999 &np->gref_tx_head, np->grant_tx_ref[id]);
1000 np->grant_tx_ref[id] = GRANT_REF_INVALID;
1002 np->tx_mbufs[id] = NULL;
1003 add_id_to_freelist(np->tx_mbufs, id);
1004 np->xn_cdata.xn_tx_chain_cnt--;
1006 /* Only mark the queue active if we've freed up at least one slot to try */
1007 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1009 np->tx.rsp_cons = prod;
1012 * Set a new event, then check for race with update of
1013 * tx_cons. Note that it is essential to schedule a
1014 * callback, no matter how few buffers are pending. Even if
1015 * there is space in the transmit ring, higher layers may
1016 * be blocked because too much data is outstanding: in such
1017 * cases notification from Xen is likely to be the only kick
1020 np->tx.sring->rsp_event =
1021 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
1024 } while (prod != np->tx.sring->rsp_prod);
1027 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1030 if (np->user_state == UST_OPEN)
1031 netif_wake_queue(dev);
1039 struct netfront_info *np = xsc;
1040 struct ifnet *ifp = np->xn_ifp;
1043 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
1044 likely(netfront_carrier_ok(np)) &&
1045 ifp->if_drv_flags & IFF_DRV_RUNNING))
1048 if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) {
1058 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1059 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1064 xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
1067 int new = xennet_rxidx(np->rx.req_prod_pvt);
1069 KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
1070 np->rx_mbufs[new] = m;
1071 np->grant_rx_ref[new] = ref;
1072 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1073 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1074 np->rx.req_prod_pvt++;
1078 xennet_get_extras(struct netfront_info *np,
1079 struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
1081 struct netif_extra_info *extra;
1089 if (__predict_false(*cons + 1 == rp)) {
1091 if (net_ratelimit())
1092 WPRINTK("Missing extra info\n");
1098 extra = (struct netif_extra_info *)
1099 RING_GET_RESPONSE(&np->rx, ++(*cons));
1101 if (__predict_false(!extra->type ||
1102 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1104 if (net_ratelimit())
1105 WPRINTK("Invalid extra type: %d\n",
1110 memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1113 m = xennet_get_rx_mbuf(np, *cons);
1114 ref = xennet_get_rx_ref(np, *cons);
1115 xennet_move_rx_slot(np, m, ref);
1116 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1122 xennet_get_responses(struct netfront_info *np,
1123 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
1126 struct netif_rx_response *rx = &rinfo->rx;
1127 struct netif_extra_info *extras = rinfo->extras;
1128 struct mbuf *m, *m0, *m_prev;
1129 grant_ref_t ref = xennet_get_rx_ref(np, *cons);
1130 RING_IDX ref_cons = *cons;
1135 m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons);
1137 if (rx->flags & NETRXF_extra_info) {
1138 err = xennet_get_extras(np, extras, rp, cons);
1142 m0->m_pkthdr.len = 0;
1148 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1149 rx->status, rx->offset, frags);
1151 if (__predict_false(rx->status < 0 ||
1152 rx->offset + rx->status > PAGE_SIZE)) {
1155 if (net_ratelimit())
1156 WPRINTK("rx->offset: %x, size: %u\n",
1157 rx->offset, rx->status);
1159 xennet_move_rx_slot(np, m, ref);
1164 goto next_skip_queue;
1168 * This definitely indicates a bug, either in this driver or in
1169 * the backend driver. In future this should flag the bad
1170 * situation to the system controller to reboot the backed.
1172 if (ref == GRANT_REF_INVALID) {
1175 if (net_ratelimit())
1176 WPRINTK("Bad rx response id %d.\n", rx->id);
1178 printf("%s: Bad rx response id %d.\n", __func__,rx->id);
1183 ret = gnttab_end_foreign_access_ref(ref);
1184 KASSERT(ret, ("Unable to end access to grant references"));
1186 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1192 m->m_len = rx->status;
1193 m->m_data += rx->offset;
1194 m0->m_pkthdr.len += rx->status;
1197 if (!(rx->flags & NETRXF_more_data))
1200 if (*cons + frags == rp) {
1201 if (net_ratelimit())
1202 WPRINTK("Need more frags\n");
1204 printf("%s: cons %u frags %u rp %u, not enough frags\n",
1205 __func__, *cons, frags, rp);
1209 * Note that m can be NULL, if rx->status < 0 or if
1210 * rx->offset + rx->status > PAGE_SIZE above.
1214 rx = RING_GET_RESPONSE(&np->rx, *cons + frags);
1215 m = xennet_get_rx_mbuf(np, *cons + frags);
1218 * m_prev == NULL can happen if rx->status < 0 or if
1219 * rx->offset + * rx->status > PAGE_SIZE above.
1225 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1226 * rx->status > PAGE_SIZE above.
1231 ref = xennet_get_rx_ref(np, *cons + frags);
1232 ref_cons = *cons + frags;
1242 xn_tick_locked(struct netfront_info *sc)
1244 XN_RX_LOCK_ASSERT(sc);
1245 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1247 /* XXX placeholder for printing debug information */
1253 struct netfront_info *sc;
1262 * \brief Count the number of fragments in an mbuf chain.
1264 * Surprisingly, there isn't an M* macro for this.
1267 xn_count_frags(struct mbuf *m)
1271 for (nfrags = 0; m != NULL; m = m->m_next)
1278 * Given an mbuf chain, make sure we have enough room and then push
1279 * it onto the transmit ring.
1282 xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head)
1292 * Defragment the mbuf if necessary.
1294 nfrags = xn_count_frags(m_head);
1297 * Check to see whether this request is longer than netback
1298 * can handle, and try to defrag it.
1301 * It is a bit lame, but the netback driver in Linux can't
1302 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
1303 * the Linux network stack.
1305 if (nfrags > sc->maxfrags) {
1306 m = m_defrag(m_head, M_NOWAIT);
1309 * Defrag failed, so free the mbuf and
1310 * therefore drop the packet.
1318 /* Determine how many fragments now exist */
1319 nfrags = xn_count_frags(m_head);
1322 * Check to see whether the defragmented packet has too many
1323 * segments for the Linux netback driver.
1326 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1327 * of mbufs longer than Linux can handle. Make sure we don't
1328 * pass a too-long chain over to the other side by dropping the
1329 * packet. It doesn't look like there is currently a way to
1330 * tell the TCP stack to generate a shorter chain of packets.
1332 if (nfrags > MAX_TX_REQ_FRAGS) {
1334 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1335 "won't be able to handle it, dropping\n",
1336 __func__, nfrags, MAX_TX_REQ_FRAGS);
1343 * This check should be redundant. We've already verified that we
1344 * have enough slots in the ring to handle a packet of maximum
1345 * size, and that our packet is less than the maximum size. Keep
1346 * it in here as an assert for now just to make certain that
1347 * xn_tx_chain_cnt is accurate.
1349 KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE,
1350 ("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1351 "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt,
1352 (int) nfrags, (int) NET_TX_RING_SIZE));
1355 * Start packing the mbufs in this chain into
1356 * the fragment pointers. Stop when we run out
1357 * of fragments or hit the end of the mbuf chain.
1360 otherend_id = xenbus_get_otherend_id(sc->xbdev);
1361 for (m = m_head; m; m = m->m_next) {
1362 netif_tx_request_t *tx;
1365 u_long mfn; /* XXX Wrong type? */
1367 tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt);
1368 id = get_id_from_freelist(sc->tx_mbufs);
1370 panic("%s: was allocated the freelist head!\n",
1372 sc->xn_cdata.xn_tx_chain_cnt++;
1373 if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE)
1374 panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n",
1376 sc->tx_mbufs[id] = m;
1378 ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
1379 KASSERT((short)ref >= 0, ("Negative ref"));
1380 mfn = virt_to_mfn(mtod(m, vm_offset_t));
1381 gnttab_grant_foreign_access_ref(ref, otherend_id,
1382 mfn, GNTMAP_readonly);
1383 tx->gref = sc->grant_tx_ref[id] = ref;
1384 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
1388 * The first fragment has the entire packet
1389 * size, subsequent fragments have just the
1390 * fragment size. The backend works out the
1391 * true size of the first fragment by
1392 * subtracting the sizes of the other
1395 tx->size = m->m_pkthdr.len;
1398 * The first fragment contains the checksum flags
1399 * and is optionally followed by extra data for
1403 * CSUM_TSO requires checksum offloading.
1404 * Some versions of FreeBSD fail to
1405 * set CSUM_TCP in the CSUM_TSO case,
1406 * so we have to test for CSUM_TSO
1409 if (m->m_pkthdr.csum_flags
1410 & (CSUM_DELAY_DATA | CSUM_TSO)) {
1411 tx->flags |= (NETTXF_csum_blank
1412 | NETTXF_data_validated);
1414 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1415 struct netif_extra_info *gso =
1416 (struct netif_extra_info *)
1417 RING_GET_REQUEST(&sc->tx,
1418 ++sc->tx.req_prod_pvt);
1420 tx->flags |= NETTXF_extra_info;
1422 gso->u.gso.size = m->m_pkthdr.tso_segsz;
1424 XEN_NETIF_GSO_TYPE_TCPV4;
1426 gso->u.gso.features = 0;
1428 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1432 tx->size = m->m_len;
1435 tx->flags |= NETTXF_more_data;
1437 sc->tx.req_prod_pvt++;
1439 BPF_MTAP(ifp, m_head);
1441 sc->stats.tx_bytes += m_head->m_pkthdr.len;
1442 sc->stats.tx_packets++;
1448 xn_start_locked(struct ifnet *ifp)
1450 struct netfront_info *sc;
1451 struct mbuf *m_head;
1456 if (!netfront_carrier_ok(sc))
1460 * While we have enough transmit slots available for at least one
1461 * maximum-sized packet, pull mbufs off the queue and put them on
1462 * the transmit ring.
1464 while (xn_tx_slot_available(sc)) {
1465 IF_DEQUEUE(&ifp->if_snd, m_head);
1469 if (xn_assemble_tx_request(sc, m_head) != 0)
1473 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
1475 xen_intr_signal(sc->xen_intr_handle);
1477 if (RING_FULL(&sc->tx)) {
1480 netif_stop_queue(dev);
1486 xn_start(struct ifnet *ifp)
1488 struct netfront_info *sc;
1491 xn_start_locked(ifp);
1495 /* equivalent of network_open() in Linux */
1497 xn_ifinit_locked(struct netfront_info *sc)
1505 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1510 network_alloc_rx_buffers(sc);
1511 sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
1513 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1514 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1515 if_link_state_change(ifp, LINK_STATE_UP);
1517 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1521 xn_ifinit(void *xsc)
1523 struct netfront_info *sc = xsc;
1526 xn_ifinit_locked(sc);
1531 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1533 struct netfront_info *sc = ifp->if_softc;
1534 struct ifreq *ifr = (struct ifreq *) data;
1536 struct ifaddr *ifa = (struct ifaddr *)data;
1539 int mask, error = 0;
1544 if (ifa->ifa_addr->sa_family == AF_INET) {
1545 ifp->if_flags |= IFF_UP;
1546 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1547 xn_ifinit_locked(sc);
1548 arp_ifinit(ifp, ifa);
1553 error = ether_ioctl(ifp, cmd, data);
1559 /* XXX can we alter the MTU on a VN ?*/
1561 if (ifr->ifr_mtu > XN_JUMBO_MTU)
1566 ifp->if_mtu = ifr->ifr_mtu;
1567 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1573 if (ifp->if_flags & IFF_UP) {
1575 * If only the state of the PROMISC flag changed,
1576 * then just use the 'set promisc mode' command
1577 * instead of reinitializing the entire NIC. Doing
1578 * a full re-init means reloading the firmware and
1579 * waiting for it to start up, which may take a
1583 /* No promiscuous mode with Xen */
1584 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1585 ifp->if_flags & IFF_PROMISC &&
1586 !(sc->xn_if_flags & IFF_PROMISC)) {
1587 XN_SETBIT(sc, XN_RX_MODE,
1588 XN_RXMODE_RX_PROMISC);
1589 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1590 !(ifp->if_flags & IFF_PROMISC) &&
1591 sc->xn_if_flags & IFF_PROMISC) {
1592 XN_CLRBIT(sc, XN_RX_MODE,
1593 XN_RXMODE_RX_PROMISC);
1596 xn_ifinit_locked(sc);
1598 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1602 sc->xn_if_flags = ifp->if_flags;
1607 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1608 if (mask & IFCAP_TXCSUM) {
1609 if (IFCAP_TXCSUM & ifp->if_capenable) {
1610 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1611 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1612 | CSUM_IP | CSUM_TSO);
1614 ifp->if_capenable |= IFCAP_TXCSUM;
1615 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
1619 if (mask & IFCAP_RXCSUM) {
1620 ifp->if_capenable ^= IFCAP_RXCSUM;
1622 if (mask & IFCAP_TSO4) {
1623 if (IFCAP_TSO4 & ifp->if_capenable) {
1624 ifp->if_capenable &= ~IFCAP_TSO4;
1625 ifp->if_hwassist &= ~CSUM_TSO;
1626 } else if (IFCAP_TXCSUM & ifp->if_capenable) {
1627 ifp->if_capenable |= IFCAP_TSO4;
1628 ifp->if_hwassist |= CSUM_TSO;
1630 IPRINTK("Xen requires tx checksum offload"
1631 " be enabled to use TSO\n");
1635 if (mask & IFCAP_LRO) {
1636 ifp->if_capenable ^= IFCAP_LRO;
1644 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1654 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1657 error = ether_ioctl(ifp, cmd, data);
1664 xn_stop(struct netfront_info *sc)
1672 callout_stop(&sc->xn_stat_ch);
1674 xn_free_rx_ring(sc);
1675 xn_free_tx_ring(sc);
1677 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1678 if_link_state_change(ifp, LINK_STATE_DOWN);
1681 /* START of Xenolinux helper functions adapted to FreeBSD */
1683 network_connect(struct netfront_info *np)
1685 int i, requeue_idx, error;
1687 netif_rx_request_t *req;
1688 u_int feature_rx_copy;
1690 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1691 "feature-rx-copy", NULL, "%u", &feature_rx_copy);
1693 feature_rx_copy = 0;
1695 /* We only support rx copy. */
1696 if (!feature_rx_copy)
1697 return (EPROTONOSUPPORT);
1699 /* Recovery procedure: */
1700 error = talk_to_backend(np->xbdev, np);
1704 /* Step 1: Reinitialise variables. */
1705 xn_query_features(np);
1706 xn_configure_features(np);
1707 netif_release_tx_bufs(np);
1709 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1710 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1714 if (np->rx_mbufs[i] == NULL)
1717 m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
1718 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1720 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1721 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1723 gnttab_grant_foreign_access_ref(ref,
1724 xenbus_get_otherend_id(np->xbdev),
1728 req->id = requeue_idx;
1733 np->rx.req_prod_pvt = requeue_idx;
1735 /* Step 3: All public and private state should now be sane. Get
1736 * ready to start sending and receiving packets and give the driver
1737 * domain a kick because we've probably just requeued some
1740 netfront_carrier_on(np);
1741 xen_intr_signal(np->xen_intr_handle);
1745 network_alloc_rx_buffers(np);
1751 xn_query_features(struct netfront_info *np)
1755 device_printf(np->xbdev, "backend features:");
1757 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1758 "feature-sg", NULL, "%d", &val) < 0)
1763 np->maxfrags = MAX_TX_REQ_FRAGS;
1764 printf(" feature-sg");
1767 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1768 "feature-gso-tcpv4", NULL, "%d", &val) < 0)
1771 np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO);
1773 np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO;
1774 printf(" feature-gso-tcp4");
1781 xn_configure_features(struct netfront_info *np)
1783 int err, cap_enabled;
1787 if (np->xn_resume &&
1788 ((np->xn_ifp->if_capenable & np->xn_ifp->if_capabilities)
1789 == np->xn_ifp->if_capenable)) {
1790 /* Current options are available, no need to do anything. */
1794 /* Try to preserve as many options as possible. */
1796 cap_enabled = np->xn_ifp->if_capenable;
1798 cap_enabled = UINT_MAX;
1800 #if (defined(INET) || defined(INET6))
1801 if ((np->xn_ifp->if_capenable & IFCAP_LRO) == (cap_enabled & IFCAP_LRO))
1802 tcp_lro_free(&np->xn_lro);
1804 np->xn_ifp->if_capenable =
1805 np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4) & cap_enabled;
1806 np->xn_ifp->if_hwassist &= ~CSUM_TSO;
1807 #if (defined(INET) || defined(INET6))
1808 if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) ==
1809 (cap_enabled & IFCAP_LRO)) {
1810 err = tcp_lro_init(&np->xn_lro);
1812 device_printf(np->xbdev, "LRO initialization failed\n");
1814 np->xn_lro.ifp = np->xn_ifp;
1815 np->xn_ifp->if_capenable |= IFCAP_LRO;
1818 if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) ==
1819 (cap_enabled & IFCAP_TSO4)) {
1820 np->xn_ifp->if_capenable |= IFCAP_TSO4;
1821 np->xn_ifp->if_hwassist |= CSUM_TSO;
1828 * Create a network device.
1829 * @param dev Newbus device representing this virtual NIC.
1832 create_netdev(device_t dev)
1835 struct netfront_info *np;
1839 np = device_get_softc(dev);
1843 mtx_init(&np->tx_lock, "xntx", "netfront transmit lock", MTX_DEF);
1844 mtx_init(&np->rx_lock, "xnrx", "netfront receive lock", MTX_DEF);
1845 mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF);
1847 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
1848 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
1849 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
1851 np->rx_target = RX_MIN_TARGET;
1852 np->rx_min_target = RX_MIN_TARGET;
1853 np->rx_max_target = RX_MAX_TARGET;
1855 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
1856 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
1857 np->tx_mbufs[i] = (void *) ((u_long) i+1);
1858 np->grant_tx_ref[i] = GRANT_REF_INVALID;
1860 np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0;
1862 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
1864 np->rx_mbufs[i] = NULL;
1865 np->grant_rx_ref[i] = GRANT_REF_INVALID;
1868 mbufq_init(&np->xn_rx_batch, INT_MAX);
1870 /* A grant for every tx ring slot */
1871 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1872 &np->gref_tx_head) != 0) {
1873 IPRINTK("#### netfront can't alloc tx grant refs\n");
1877 /* A grant for every rx ring slot */
1878 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1879 &np->gref_rx_head) != 0) {
1880 WPRINTK("#### netfront can't alloc rx grant refs\n");
1881 gnttab_free_grant_references(np->gref_tx_head);
1886 err = xen_net_read_mac(dev, np->mac);
1888 gnttab_free_grant_references(np->gref_rx_head);
1889 gnttab_free_grant_references(np->gref_tx_head);
1893 /* Set up ifnet structure */
1894 ifp = np->xn_ifp = if_alloc(IFT_ETHER);
1896 if_initname(ifp, "xn", device_get_unit(dev));
1897 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1898 ifp->if_ioctl = xn_ioctl;
1899 ifp->if_start = xn_start;
1901 ifp->if_watchdog = xn_watchdog;
1903 ifp->if_init = xn_ifinit;
1904 ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
1906 ifp->if_hwassist = XN_CSUM_FEATURES;
1907 ifp->if_capabilities = IFCAP_HWCSUM;
1908 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1909 ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS;
1910 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1912 ether_ifattach(ifp, np->mac);
1913 callout_init(&np->xn_stat_ch, 1);
1914 netfront_carrier_off(np);
1919 KASSERT(err != 0, ("Error path with no error code specified"));
1924 * Handle the change of state of the backend to Closing. We must delete our
1925 * device-layer structures now, to ensure that writes are flushed through to
1926 * the backend. Once is this done, we can switch to Closed in
1931 netfront_closing(device_t dev)
1934 struct netfront_info *info = dev->dev_driver_data;
1936 DPRINTK("netfront_closing: %s removed\n", dev->nodename);
1940 xenbus_switch_state(dev, XenbusStateClosed);
1945 netfront_detach(device_t dev)
1947 struct netfront_info *info = device_get_softc(dev);
1949 DPRINTK("%s\n", xenbus_get_node(dev));
1957 netif_free(struct netfront_info *info)
1962 callout_drain(&info->xn_stat_ch);
1963 netif_disconnect_backend(info);
1964 if (info->xn_ifp != NULL) {
1965 ether_ifdetach(info->xn_ifp);
1966 if_free(info->xn_ifp);
1967 info->xn_ifp = NULL;
1969 ifmedia_removeall(&info->sc_media);
1973 netif_disconnect_backend(struct netfront_info *info)
1977 netfront_carrier_off(info);
1981 free_ring(&info->tx_ring_ref, &info->tx.sring);
1982 free_ring(&info->rx_ring_ref, &info->rx.sring);
1984 xen_intr_unbind(&info->xen_intr_handle);
1988 free_ring(int *ref, void *ring_ptr_ref)
1990 void **ring_ptr_ptr = ring_ptr_ref;
1992 if (*ref != GRANT_REF_INVALID) {
1993 /* This API frees the associated storage. */
1994 gnttab_end_foreign_access(*ref, *ring_ptr_ptr);
1995 *ref = GRANT_REF_INVALID;
1997 *ring_ptr_ptr = NULL;
2001 xn_ifmedia_upd(struct ifnet *ifp)
2007 xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2009 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2010 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2013 /* ** Driver registration ** */
2014 static device_method_t netfront_methods[] = {
2015 /* Device interface */
2016 DEVMETHOD(device_probe, netfront_probe),
2017 DEVMETHOD(device_attach, netfront_attach),
2018 DEVMETHOD(device_detach, netfront_detach),
2019 DEVMETHOD(device_shutdown, bus_generic_shutdown),
2020 DEVMETHOD(device_suspend, netfront_suspend),
2021 DEVMETHOD(device_resume, netfront_resume),
2023 /* Xenbus interface */
2024 DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
2029 static driver_t netfront_driver = {
2032 sizeof(struct netfront_info),
2034 devclass_t netfront_devclass;
2036 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL,