2 * Copyright (c) 2004-2006 Kip Macy
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_inet6.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/kernel.h>
40 #include <sys/socket.h>
41 #include <sys/sysctl.h>
42 #include <sys/queue.h>
45 #include <sys/limits.h>
48 #include <net/if_arp.h>
49 #include <net/ethernet.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
55 #include <net/if_types.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/in.h>
60 #include <netinet/ip.h>
61 #include <netinet/if_ether.h>
62 #if __FreeBSD_version >= 700000
63 #include <netinet/tcp.h>
64 #include <netinet/tcp_lro.h>
70 #include <machine/clock.h> /* for DELAY */
71 #include <machine/bus.h>
72 #include <machine/resource.h>
73 #include <machine/frame.h>
74 #include <machine/vmparam.h>
79 #include <machine/intr_machdep.h>
81 #include <xen/xen-os.h>
82 #include <xen/hypervisor.h>
83 #include <xen/xen_intr.h>
84 #include <xen/gnttab.h>
85 #include <xen/interface/memory.h>
86 #include <xen/interface/io/netif.h>
87 #include <xen/xenbus/xenbusvar.h>
89 #include <machine/xen/xenvar.h>
91 #include <dev/xen/netfront/mbufq.h>
93 #include "xenbus_if.h"
95 /* Features supported by all backends. TSO and LRO can be negotiated */
96 #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
98 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
99 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
101 #if __FreeBSD_version >= 700000
103 * Should the driver do LRO on the RX end
104 * this can be toggled on the fly, but the
105 * interface must be reset (down/up) for it
108 static int xn_enable_lro = 1;
109 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
118 static int MODPARM_rx_copy = 0;
119 module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
120 MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
121 static int MODPARM_rx_flip = 0;
122 module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
123 MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
125 static const int MODPARM_rx_copy = 1;
126 static const int MODPARM_rx_flip = 0;
130 * \brief The maximum allowed data fragments in a single transmit
133 * This limit is imposed by the backend driver. We assume here that
134 * we are dealing with a Linux driver domain and have set our limit
135 * to mirror the Linux MAX_SKB_FRAGS constant.
137 #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
139 #define RX_COPY_THRESHOLD 256
141 #define net_ratelimit() 0
143 struct netfront_info;
144 struct netfront_rx_info;
146 static void xn_txeof(struct netfront_info *);
147 static void xn_rxeof(struct netfront_info *);
148 static void network_alloc_rx_buffers(struct netfront_info *);
150 static void xn_tick_locked(struct netfront_info *);
151 static void xn_tick(void *);
153 static void xn_intr(void *);
154 static inline int xn_count_frags(struct mbuf *m);
155 static int xn_assemble_tx_request(struct netfront_info *sc,
156 struct mbuf *m_head);
157 static void xn_start_locked(struct ifnet *);
158 static void xn_start(struct ifnet *);
159 static int xn_ioctl(struct ifnet *, u_long, caddr_t);
160 static void xn_ifinit_locked(struct netfront_info *);
161 static void xn_ifinit(void *);
162 static void xn_stop(struct netfront_info *);
163 static void xn_query_features(struct netfront_info *np);
164 static int xn_configure_features(struct netfront_info *np);
166 static void xn_watchdog(struct ifnet *);
170 static void netfront_closing(device_t dev);
172 static void netif_free(struct netfront_info *info);
173 static int netfront_detach(device_t dev);
175 static int talk_to_backend(device_t dev, struct netfront_info *info);
176 static int create_netdev(device_t dev);
177 static void netif_disconnect_backend(struct netfront_info *info);
178 static int setup_device(device_t dev, struct netfront_info *info);
179 static void free_ring(int *ref, void *ring_ptr_ref);
181 static int xn_ifmedia_upd(struct ifnet *ifp);
182 static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
184 /* Xenolinux helper functions */
185 int network_connect(struct netfront_info *);
187 static void xn_free_rx_ring(struct netfront_info *);
189 static void xn_free_tx_ring(struct netfront_info *);
191 static int xennet_get_responses(struct netfront_info *np,
192 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
193 struct mbuf **list, int *pages_flipped_p);
195 #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
197 #define INVALID_P2M_ENTRY (~0UL)
200 * Mbuf pointers. We need these to keep track of the virtual addresses
201 * of our mbuf chains since we can only convert from virtual to physical,
202 * not the other way around. The size must track the free index arrays.
204 struct xn_chain_data {
205 struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1];
207 struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1];
210 struct net_device_stats
212 u_long rx_packets; /* total packets received */
213 u_long tx_packets; /* total packets transmitted */
214 u_long rx_bytes; /* total bytes received */
215 u_long tx_bytes; /* total bytes transmitted */
216 u_long rx_errors; /* bad packets received */
217 u_long tx_errors; /* packet transmit problems */
218 u_long rx_dropped; /* no space in linux buffers */
219 u_long tx_dropped; /* no space available in linux */
220 u_long multicast; /* multicast packets received */
223 /* detailed rx_errors: */
224 u_long rx_length_errors;
225 u_long rx_over_errors; /* receiver ring buff overflow */
226 u_long rx_crc_errors; /* recved pkt with crc error */
227 u_long rx_frame_errors; /* recv'd frame alignment error */
228 u_long rx_fifo_errors; /* recv'r fifo overrun */
229 u_long rx_missed_errors; /* receiver missed packet */
231 /* detailed tx_errors */
232 u_long tx_aborted_errors;
233 u_long tx_carrier_errors;
234 u_long tx_fifo_errors;
235 u_long tx_heartbeat_errors;
236 u_long tx_window_errors;
239 u_long rx_compressed;
240 u_long tx_compressed;
243 struct netfront_info {
244 struct ifnet *xn_ifp;
245 #if __FreeBSD_version >= 700000
246 struct lro_ctrl xn_lro;
249 struct net_device_stats stats;
252 netif_tx_front_ring_t tx;
253 netif_rx_front_ring_t rx;
259 xen_intr_handle_t xen_intr_handle;
260 u_int copying_receiver;
264 /* Receive-ring batched refills. */
265 #define RX_MIN_TARGET 32
266 #define RX_MAX_TARGET NET_RX_RING_SIZE
271 grant_ref_t gref_tx_head;
272 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
273 grant_ref_t gref_rx_head;
274 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
279 uint8_t mac[ETHER_ADDR_LEN];
280 struct xn_chain_data xn_cdata; /* mbufs */
281 struct mbuf_head xn_rx_batch; /* head of the batch queue */
284 struct callout xn_stat_ch;
286 u_long rx_pfn_array[NET_RX_RING_SIZE];
287 multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
288 mmu_update_t rx_mmu[NET_RX_RING_SIZE];
289 struct ifmedia sc_media;
294 #define rx_mbufs xn_cdata.xn_rx_chain
295 #define tx_mbufs xn_cdata.xn_tx_chain
297 #define XN_LOCK_INIT(_sc, _name) \
298 mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \
299 mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \
300 mtx_init(&(_sc)->sc_lock, #_name"_sc", "netfront softc lock", MTX_DEF)
302 #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock)
303 #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock)
305 #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock)
306 #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock)
308 #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock);
309 #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock);
311 #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED);
312 #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED);
313 #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED);
314 #define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \
315 mtx_destroy(&(_sc)->tx_lock); \
316 mtx_destroy(&(_sc)->sc_lock);
318 struct netfront_rx_info {
319 struct netif_rx_response rx;
320 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
323 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
324 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
325 #define netfront_carrier_ok(netif) ((netif)->carrier)
327 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
330 add_id_to_freelist(struct mbuf **list, uintptr_t id)
333 ("%s: the head item (0) must always be free.", __func__));
335 list[0] = (struct mbuf *)id;
338 static inline unsigned short
339 get_id_from_freelist(struct mbuf **list)
343 id = (uintptr_t)list[0];
345 ("%s: the head item (0) must always remain free.", __func__));
351 xennet_rxidx(RING_IDX idx)
353 return idx & (NET_RX_RING_SIZE - 1);
356 static inline struct mbuf *
357 xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri)
359 int i = xennet_rxidx(ri);
363 np->rx_mbufs[i] = NULL;
367 static inline grant_ref_t
368 xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
370 int i = xennet_rxidx(ri);
371 grant_ref_t ref = np->grant_rx_ref[i];
372 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
373 np->grant_rx_ref[i] = GRANT_REF_INVALID;
377 #define IPRINTK(fmt, args...) \
378 printf("[XEN] " fmt, ##args)
380 #define WPRINTK(fmt, args...) \
381 printf("[XEN] " fmt, ##args)
383 #define WPRINTK(fmt, args...)
386 #define DPRINTK(fmt, args...) \
387 printf("[XEN] %s: " fmt, __func__, ##args)
389 #define DPRINTK(fmt, args...)
393 * Read the 'mac' node at the given device's node in the store, and parse that
394 * as colon-separated octets, placing result the given mac array. mac must be
395 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
396 * Return 0 on success, or errno on error.
399 xen_net_read_mac(device_t dev, uint8_t mac[])
402 char *s, *e, *macstr;
405 path = xenbus_get_node(dev);
406 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
407 if (error == ENOENT) {
409 * Deal with missing mac XenStore nodes on devices with
410 * HVM emulation (the 'ioemu' configuration attribute)
413 * The HVM emulator may execute in a stub device model
414 * domain which lacks the permission, only given to Dom0,
415 * to update the guest's XenStore tree. For this reason,
416 * the HVM emulator doesn't even attempt to write the
417 * front-side mac node, even when operating in Dom0.
418 * However, there should always be a mac listed in the
419 * backend tree. Fallback to this version if our query
420 * of the front side XenStore location doesn't find
423 path = xenbus_get_otherend_path(dev);
424 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
427 xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
432 for (i = 0; i < ETHER_ADDR_LEN; i++) {
433 mac[i] = strtoul(s, &e, 16);
434 if (s == e || (e[0] != ':' && e[0] != 0)) {
435 free(macstr, M_XENBUS);
440 free(macstr, M_XENBUS);
445 * Entry point to this code when a new device is created. Allocate the basic
446 * structures and the ring buffers for communication with the backend, and
447 * inform the backend of the appropriate details for those. Switch to
451 netfront_probe(device_t dev)
455 if (xen_disable_pv_nics != 0)
459 if (!strcmp(xenbus_get_type(dev), "vif")) {
460 device_set_desc(dev, "Virtual Network Interface");
468 netfront_attach(device_t dev)
472 err = create_netdev(dev);
474 xenbus_dev_fatal(dev, err, "creating netdev");
478 #if __FreeBSD_version >= 700000
479 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
480 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
481 OID_AUTO, "enable_lro", CTLFLAG_RW,
482 &xn_enable_lro, 0, "Large Receive Offload");
489 netfront_suspend(device_t dev)
491 struct netfront_info *info = device_get_softc(dev);
495 netfront_carrier_off(info);
502 * We are reconnecting to the backend, due to a suspend/resume, or a backend
503 * driver restart. We tear down our netif structure and recreate it, but
504 * leave the device-layer structures intact so that this is transparent to the
505 * rest of the kernel.
508 netfront_resume(device_t dev)
510 struct netfront_info *info = device_get_softc(dev);
512 if (xen_suspend_cancelled) {
515 netfront_carrier_on(info);
521 info->xn_resume = true;
522 netif_disconnect_backend(info);
526 /* Common code used when first setting up, and when resuming. */
528 talk_to_backend(device_t dev, struct netfront_info *info)
531 struct xs_transaction xst;
532 const char *node = xenbus_get_node(dev);
535 err = xen_net_read_mac(dev, info->mac);
537 xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
541 /* Create shared ring, alloc event channel. */
542 err = setup_device(dev, info);
547 err = xs_transaction_start(&xst);
549 xenbus_dev_fatal(dev, err, "starting transaction");
552 err = xs_printf(xst, node, "tx-ring-ref","%u",
555 message = "writing tx ring-ref";
556 goto abort_transaction;
558 err = xs_printf(xst, node, "rx-ring-ref","%u",
561 message = "writing rx ring-ref";
562 goto abort_transaction;
564 err = xs_printf(xst, node,
565 "event-channel", "%u",
566 xen_intr_port(info->xen_intr_handle));
568 message = "writing event-channel";
569 goto abort_transaction;
571 err = xs_printf(xst, node, "request-rx-copy", "%u",
572 info->copying_receiver);
574 message = "writing request-rx-copy";
575 goto abort_transaction;
577 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
579 message = "writing feature-rx-notify";
580 goto abort_transaction;
582 err = xs_printf(xst, node, "feature-sg", "%d", 1);
584 message = "writing feature-sg";
585 goto abort_transaction;
587 #if __FreeBSD_version >= 700000
588 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
590 message = "writing feature-gso-tcpv4";
591 goto abort_transaction;
595 err = xs_transaction_end(xst, 0);
599 xenbus_dev_fatal(dev, err, "completing transaction");
606 xs_transaction_end(xst, 1);
607 xenbus_dev_fatal(dev, err, "%s", message);
615 setup_device(device_t dev, struct netfront_info *info)
617 netif_tx_sring_t *txs;
618 netif_rx_sring_t *rxs;
624 info->tx_ring_ref = GRANT_REF_INVALID;
625 info->rx_ring_ref = GRANT_REF_INVALID;
626 info->rx.sring = NULL;
627 info->tx.sring = NULL;
629 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
632 xenbus_dev_fatal(dev, error, "allocating tx ring page");
635 SHARED_RING_INIT(txs);
636 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
637 error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref);
641 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
644 xenbus_dev_fatal(dev, error, "allocating rx ring page");
647 SHARED_RING_INIT(rxs);
648 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
650 error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref);
654 error = xen_intr_alloc_and_bind_local_port(dev,
655 xenbus_get_otherend_id(dev), /*filter*/NULL, xn_intr, info,
656 INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &info->xen_intr_handle);
659 xenbus_dev_fatal(dev, error,
660 "xen_intr_alloc_and_bind_local_port failed");
673 * If this interface has an ipv4 address, send an arp for it. This
674 * helps to get the network going again after migrating hosts.
677 netfront_send_fake_arp(device_t dev, struct netfront_info *info)
683 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
684 if (ifa->ifa_addr->sa_family == AF_INET) {
685 arp_ifinit(ifp, ifa);
692 * Callback received when the backend's state changes.
695 netfront_backend_changed(device_t dev, XenbusState newstate)
697 struct netfront_info *sc = device_get_softc(dev);
699 DPRINTK("newstate=%d\n", newstate);
702 case XenbusStateInitialising:
703 case XenbusStateInitialised:
704 case XenbusStateUnknown:
705 case XenbusStateClosed:
706 case XenbusStateReconfigured:
707 case XenbusStateReconfiguring:
709 case XenbusStateInitWait:
710 if (xenbus_get_state(dev) != XenbusStateInitialising)
712 if (network_connect(sc) != 0)
714 xenbus_set_state(dev, XenbusStateConnected);
716 case XenbusStateClosing:
717 xenbus_set_state(dev, XenbusStateClosed);
719 case XenbusStateConnected:
721 netfront_send_fake_arp(dev, sc);
728 xn_free_rx_ring(struct netfront_info *sc)
733 for (i = 0; i < NET_RX_RING_SIZE; i++) {
734 if (sc->xn_cdata.rx_mbufs[i] != NULL) {
735 m_freem(sc->rx_mbufs[i]);
736 sc->rx_mbufs[i] = NULL;
741 sc->xn_rx_if->req_prod = 0;
742 sc->xn_rx_if->event = sc->rx.rsp_cons ;
747 xn_free_tx_ring(struct netfront_info *sc)
752 for (i = 0; i < NET_TX_RING_SIZE; i++) {
753 if (sc->tx_mbufs[i] != NULL) {
754 m_freem(sc->tx_mbufs[i]);
755 sc->xn_cdata.xn_tx_chain[i] = NULL;
764 * \brief Verify that there is sufficient space in the Tx ring
765 * buffer for a maximally sized request to be enqueued.
767 * A transmit request requires a transmit descriptor for each packet
768 * fragment, plus up to 2 entries for "options" (e.g. TSO).
771 xn_tx_slot_available(struct netfront_info *np)
773 return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2));
777 netif_release_tx_bufs(struct netfront_info *np)
781 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
787 * We assume that no kernel addresses are
788 * less than NET_TX_RING_SIZE. Any entry
789 * in the table that is below this number
790 * must be an index from free-list tracking.
792 if (((uintptr_t)m) <= NET_TX_RING_SIZE)
794 gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
795 gnttab_release_grant_reference(&np->gref_tx_head,
796 np->grant_tx_ref[i]);
797 np->grant_tx_ref[i] = GRANT_REF_INVALID;
798 add_id_to_freelist(np->tx_mbufs, i);
799 np->xn_cdata.xn_tx_chain_cnt--;
800 if (np->xn_cdata.xn_tx_chain_cnt < 0) {
801 panic("%s: tx_chain_cnt must be >= 0", __func__);
808 netif_release_rx_bufs_copy(struct netfront_info *np)
812 unsigned int i, busy, inuse;
816 for (busy = inuse = i = 0; i < NET_RX_RING_SIZE; i++) {
817 ref = np->grant_rx_ref[i];
819 if (ref == GRANT_REF_INVALID)
826 if (!gnttab_end_foreign_access_ref(ref)) {
831 gnttab_release_grant_reference(&np->gref_rx_head, ref);
832 np->grant_rx_ref[i] = GRANT_REF_INVALID;
833 add_id_to_freelist(np->rx_mbufs, i);
839 device_printf(np->xbdev,
840 "Unable to release %u of %u in use grant references out of %zu total.\n",
841 busy, inuse, NET_RX_RING_SIZE);
847 network_alloc_rx_buffers(struct netfront_info *sc)
849 int otherend_id = xenbus_get_otherend_id(sc->xbdev);
852 int i, batch_target, notify;
854 struct xen_memory_reservation reservation;
857 netif_rx_request_t *req;
861 req_prod = sc->rx.req_prod_pvt;
863 if (__predict_false(sc->carrier == 0))
867 * Allocate mbufs greedily, even though we batch updates to the
868 * receive ring. This creates a less bursty demand on the memory
869 * allocator, and so should reduce the chance of failed allocation
870 * requests both for ourself and for other kernel subsystems.
872 * Here we attempt to maintain rx_target buffers in flight, counting
873 * buffers that we have yet to process in the receive ring.
875 batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
876 for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
877 MGETHDR(m_new, M_NOWAIT, MT_DATA);
879 printf("%s: MGETHDR failed\n", __func__);
883 m_cljget(m_new, M_NOWAIT, MJUMPAGESIZE);
884 if ((m_new->m_flags & M_EXT) == 0) {
885 printf("%s: m_cljget failed\n", __func__);
896 m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
898 /* queue the mbufs allocated */
899 mbufq_tail(&sc->xn_rx_batch, m_new);
903 * If we've allocated at least half of our target number of entries,
904 * submit them to the backend - we have enough to make the overhead
905 * of submission worthwhile. Otherwise wait for more mbufs and
906 * request entries to become available.
908 if (i < (sc->rx_target/2)) {
909 if (req_prod >sc->rx.sring->req_prod)
915 * Double floating fill target if we risked having the backend
916 * run out of empty buffers for receive traffic. We define "running
917 * low" as having less than a fourth of our target buffers free
918 * at the time we refilled the queue.
920 if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) {
922 if (sc->rx_target > sc->rx_max_target)
923 sc->rx_target = sc->rx_max_target;
927 for (nr_flips = i = 0; ; i++) {
928 if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
931 m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
932 vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
934 id = xennet_rxidx(req_prod + i);
936 KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain"));
937 sc->rx_mbufs[id] = m_new;
939 ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
940 KASSERT(ref != GNTTAB_LIST_END,
941 ("reserved grant references exhuasted"));
942 sc->grant_rx_ref[id] = ref;
944 vaddr = mtod(m_new, vm_offset_t);
945 pfn = vtophys(vaddr) >> PAGE_SHIFT;
946 req = RING_GET_REQUEST(&sc->rx, req_prod + i);
948 if (sc->copying_receiver == 0) {
949 gnttab_grant_foreign_transfer_ref(ref,
951 sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
952 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
953 /* Remove this page before passing
956 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
957 MULTI_update_va_mapping(&sc->rx_mcl[i],
962 gnttab_grant_foreign_access_ref(ref,
969 sc->rx_pfn_array[i] =
970 vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
973 KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
974 KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
976 * We may have allocated buffers which have entries outstanding
977 * in the page * update queue -- make sure we flush those first!
982 /* Tell the ballon driver what is going on. */
983 balloon_update_driver_allowance(i);
985 set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array);
986 reservation.nr_extents = i;
987 reservation.extent_order = 0;
988 reservation.address_bits = 0;
989 reservation.domid = DOMID_SELF;
991 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
992 /* After all PTEs have been zapped, flush the TLB. */
993 sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
994 UVMF_TLB_FLUSH|UVMF_ALL;
996 /* Give away a batch of pages. */
997 sc->rx_mcl[i].op = __HYPERVISOR_memory_op;
998 sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
999 sc->rx_mcl[i].args[1] = (u_long)&reservation;
1000 /* Zap PTEs and give away pages in one big multicall. */
1001 (void)HYPERVISOR_multicall(sc->rx_mcl, i+1);
1003 if (__predict_false(sc->rx_mcl[i].result != i ||
1004 HYPERVISOR_memory_op(XENMEM_decrease_reservation,
1005 &reservation) != i))
1006 panic("%s: unable to reduce memory "
1007 "reservation\n", __func__);
1013 /* Above is a suitable barrier to ensure backend will see requests. */
1014 sc->rx.req_prod_pvt = req_prod + i;
1016 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
1018 xen_intr_signal(sc->xen_intr_handle);
1022 xn_rxeof(struct netfront_info *np)
1025 #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
1026 struct lro_ctrl *lro = &np->xn_lro;
1027 struct lro_entry *queued;
1029 struct netfront_rx_info rinfo;
1030 struct netif_rx_response *rx = &rinfo.rx;
1031 struct netif_extra_info *extras = rinfo.extras;
1033 multicall_entry_t *mcl;
1035 struct mbuf_head rxq, errq;
1036 int err, pages_flipped = 0, work_to_do;
1039 XN_RX_LOCK_ASSERT(np);
1040 if (!netfront_carrier_ok(np))
1048 rp = np->rx.sring->rsp_prod;
1049 rmb(); /* Ensure we see queued responses up to 'rp'. */
1051 i = np->rx.rsp_cons;
1053 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
1054 memset(extras, 0, sizeof(rinfo.extras));
1057 err = xennet_get_responses(np, &rinfo, rp, &i, &m,
1060 if (__predict_false(err)) {
1062 mbufq_tail(&errq, m);
1063 np->stats.rx_errors++;
1067 m->m_pkthdr.rcvif = ifp;
1068 if ( rx->flags & NETRXF_data_validated ) {
1069 /* Tell the stack the checksums are okay */
1071 * XXX this isn't necessarily the case - need to add
1075 m->m_pkthdr.csum_flags |=
1076 (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
1078 m->m_pkthdr.csum_data = 0xffff;
1081 np->stats.rx_packets++;
1082 np->stats.rx_bytes += m->m_pkthdr.len;
1084 mbufq_tail(&rxq, m);
1085 np->rx.rsp_cons = i;
1088 if (pages_flipped) {
1089 /* Some pages are no longer absent... */
1091 balloon_update_driver_allowance(-pages_flipped);
1093 /* Do all the remapping work, and M->P updates, in one big
1096 if (!!xen_feature(XENFEAT_auto_translated_physmap)) {
1097 mcl = np->rx_mcl + pages_flipped;
1098 mcl->op = __HYPERVISOR_mmu_update;
1099 mcl->args[0] = (u_long)np->rx_mmu;
1100 mcl->args[1] = pages_flipped;
1102 mcl->args[3] = DOMID_SELF;
1103 (void)HYPERVISOR_multicall(np->rx_mcl,
1108 while ((m = mbufq_dequeue(&errq)))
1112 * Process all the mbufs after the remapping is complete.
1113 * Break the mbuf chain first though.
1115 while ((m = mbufq_dequeue(&rxq)) != NULL) {
1119 * Do we really need to drop the rx lock?
1122 #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
1123 /* Use LRO if possible */
1124 if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
1125 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
1127 * If LRO fails, pass up to the stack
1130 (*ifp->if_input)(ifp, m);
1133 (*ifp->if_input)(ifp, m);
1138 np->rx.rsp_cons = i;
1140 #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
1142 * Flush any outstanding LRO work
1144 while (!SLIST_EMPTY(&lro->lro_active)) {
1145 queued = SLIST_FIRST(&lro->lro_active);
1146 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1147 tcp_lro_flush(lro, queued);
1152 /* If we get a callback with very few responses, reduce fill target. */
1153 /* NB. Note exponential increase, linear decrease. */
1154 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1155 ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
1156 np->rx_target = np->rx_min_target;
1159 network_alloc_rx_buffers(np);
1161 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
1162 } while (work_to_do);
1166 xn_txeof(struct netfront_info *np)
1171 netif_tx_response_t *txr;
1174 XN_TX_LOCK_ASSERT(np);
1176 if (!netfront_carrier_ok(np))
1182 prod = np->tx.sring->rsp_prod;
1183 rmb(); /* Ensure we see responses up to 'rp'. */
1185 for (i = np->tx.rsp_cons; i != prod; i++) {
1186 txr = RING_GET_RESPONSE(&np->tx, i);
1187 if (txr->status == NETIF_RSP_NULL)
1190 if (txr->status != NETIF_RSP_OKAY) {
1191 printf("%s: WARNING: response is %d!\n",
1192 __func__, txr->status);
1195 m = np->tx_mbufs[id];
1196 KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
1197 KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
1198 ("mbuf already on the free list, but we're "
1199 "trying to free it again!"));
1203 * Increment packet count if this is the last
1204 * mbuf of the chain.
1208 if (__predict_false(gnttab_query_foreign_access(
1209 np->grant_tx_ref[id]) != 0)) {
1210 panic("%s: grant id %u still in use by the "
1211 "backend", __func__, id);
1213 gnttab_end_foreign_access_ref(
1214 np->grant_tx_ref[id]);
1215 gnttab_release_grant_reference(
1216 &np->gref_tx_head, np->grant_tx_ref[id]);
1217 np->grant_tx_ref[id] = GRANT_REF_INVALID;
1219 np->tx_mbufs[id] = NULL;
1220 add_id_to_freelist(np->tx_mbufs, id);
1221 np->xn_cdata.xn_tx_chain_cnt--;
1223 /* Only mark the queue active if we've freed up at least one slot to try */
1224 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1226 np->tx.rsp_cons = prod;
1229 * Set a new event, then check for race with update of
1230 * tx_cons. Note that it is essential to schedule a
1231 * callback, no matter how few buffers are pending. Even if
1232 * there is space in the transmit ring, higher layers may
1233 * be blocked because too much data is outstanding: in such
1234 * cases notification from Xen is likely to be the only kick
1237 np->tx.sring->rsp_event =
1238 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
1241 } while (prod != np->tx.sring->rsp_prod);
1244 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1247 if (np->user_state == UST_OPEN)
1248 netif_wake_queue(dev);
1256 struct netfront_info *np = xsc;
1257 struct ifnet *ifp = np->xn_ifp;
1260 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
1261 likely(netfront_carrier_ok(np)) &&
1262 ifp->if_drv_flags & IFF_DRV_RUNNING))
1265 if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) {
1275 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1276 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1281 xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
1284 int new = xennet_rxidx(np->rx.req_prod_pvt);
1286 KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
1287 np->rx_mbufs[new] = m;
1288 np->grant_rx_ref[new] = ref;
1289 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1290 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1291 np->rx.req_prod_pvt++;
1295 xennet_get_extras(struct netfront_info *np,
1296 struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
1298 struct netif_extra_info *extra;
1306 if (__predict_false(*cons + 1 == rp)) {
1308 if (net_ratelimit())
1309 WPRINTK("Missing extra info\n");
1315 extra = (struct netif_extra_info *)
1316 RING_GET_RESPONSE(&np->rx, ++(*cons));
1318 if (__predict_false(!extra->type ||
1319 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1321 if (net_ratelimit())
1322 WPRINTK("Invalid extra type: %d\n",
1327 memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1330 m = xennet_get_rx_mbuf(np, *cons);
1331 ref = xennet_get_rx_ref(np, *cons);
1332 xennet_move_rx_slot(np, m, ref);
1333 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1339 xennet_get_responses(struct netfront_info *np,
1340 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
1342 int *pages_flipped_p)
1344 int pages_flipped = *pages_flipped_p;
1345 struct mmu_update *mmu;
1346 struct multicall_entry *mcl;
1347 struct netif_rx_response *rx = &rinfo->rx;
1348 struct netif_extra_info *extras = rinfo->extras;
1349 struct mbuf *m, *m0, *m_prev;
1350 grant_ref_t ref = xennet_get_rx_ref(np, *cons);
1351 RING_IDX ref_cons = *cons;
1356 m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons);
1358 if (rx->flags & NETRXF_extra_info) {
1359 err = xennet_get_extras(np, extras, rp, cons);
1363 m0->m_pkthdr.len = 0;
1371 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1372 rx->status, rx->offset, frags);
1374 if (__predict_false(rx->status < 0 ||
1375 rx->offset + rx->status > PAGE_SIZE)) {
1378 if (net_ratelimit())
1379 WPRINTK("rx->offset: %x, size: %u\n",
1380 rx->offset, rx->status);
1382 xennet_move_rx_slot(np, m, ref);
1387 goto next_skip_queue;
1391 * This definitely indicates a bug, either in this driver or in
1392 * the backend driver. In future this should flag the bad
1393 * situation to the system controller to reboot the backed.
1395 if (ref == GRANT_REF_INVALID) {
1398 if (net_ratelimit())
1399 WPRINTK("Bad rx response id %d.\n", rx->id);
1401 printf("%s: Bad rx response id %d.\n", __func__,rx->id);
1406 if (!np->copying_receiver) {
1407 /* Memory pressure, insufficient buffer
1410 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1411 WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
1412 rx->id, rx->status);
1413 xennet_move_rx_slot(np, m, ref);
1418 if (!xen_feature( XENFEAT_auto_translated_physmap)) {
1419 /* Remap the page. */
1420 void *vaddr = mtod(m, void *);
1423 mcl = np->rx_mcl + pages_flipped;
1424 mmu = np->rx_mmu + pages_flipped;
1426 MULTI_update_va_mapping(mcl, (u_long)vaddr,
1427 (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW |
1428 PG_V | PG_M | PG_A, 0);
1429 pfn = (uintptr_t)m->m_ext.ext_arg1;
1430 mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) |
1431 MMU_MACHPHYS_UPDATE;
1434 set_phys_to_machine(pfn, mfn);
1438 ret = gnttab_end_foreign_access_ref(ref);
1439 KASSERT(ret, ("ret != 0"));
1442 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1448 m->m_len = rx->status;
1449 m->m_data += rx->offset;
1450 m0->m_pkthdr.len += rx->status;
1453 if (!(rx->flags & NETRXF_more_data))
1456 if (*cons + frags == rp) {
1457 if (net_ratelimit())
1458 WPRINTK("Need more frags\n");
1460 printf("%s: cons %u frags %u rp %u, not enough frags\n",
1461 __func__, *cons, frags, rp);
1465 * Note that m can be NULL, if rx->status < 0 or if
1466 * rx->offset + rx->status > PAGE_SIZE above.
1470 rx = RING_GET_RESPONSE(&np->rx, *cons + frags);
1471 m = xennet_get_rx_mbuf(np, *cons + frags);
1474 * m_prev == NULL can happen if rx->status < 0 or if
1475 * rx->offset + * rx->status > PAGE_SIZE above.
1481 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1482 * rx->status > PAGE_SIZE above.
1487 ref = xennet_get_rx_ref(np, *cons + frags);
1488 ref_cons = *cons + frags;
1493 *pages_flipped_p = pages_flipped;
1499 xn_tick_locked(struct netfront_info *sc)
1501 XN_RX_LOCK_ASSERT(sc);
1502 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1504 /* XXX placeholder for printing debug information */
1510 struct netfront_info *sc;
1519 * \brief Count the number of fragments in an mbuf chain.
1521 * Surprisingly, there isn't an M* macro for this.
1524 xn_count_frags(struct mbuf *m)
1528 for (nfrags = 0; m != NULL; m = m->m_next)
1535 * Given an mbuf chain, make sure we have enough room and then push
1536 * it onto the transmit ring.
1539 xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head)
1544 netif_extra_info_t *extra;
1550 * Defragment the mbuf if necessary.
1552 nfrags = xn_count_frags(m_head);
1555 * Check to see whether this request is longer than netback
1556 * can handle, and try to defrag it.
1559 * It is a bit lame, but the netback driver in Linux can't
1560 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
1561 * the Linux network stack.
1563 if (nfrags > sc->maxfrags) {
1564 m = m_defrag(m_head, M_NOWAIT);
1567 * Defrag failed, so free the mbuf and
1568 * therefore drop the packet.
1576 /* Determine how many fragments now exist */
1577 nfrags = xn_count_frags(m_head);
1580 * Check to see whether the defragmented packet has too many
1581 * segments for the Linux netback driver.
1584 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1585 * of mbufs longer than Linux can handle. Make sure we don't
1586 * pass a too-long chain over to the other side by dropping the
1587 * packet. It doesn't look like there is currently a way to
1588 * tell the TCP stack to generate a shorter chain of packets.
1590 if (nfrags > MAX_TX_REQ_FRAGS) {
1592 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1593 "won't be able to handle it, dropping\n",
1594 __func__, nfrags, MAX_TX_REQ_FRAGS);
1601 * This check should be redundant. We've already verified that we
1602 * have enough slots in the ring to handle a packet of maximum
1603 * size, and that our packet is less than the maximum size. Keep
1604 * it in here as an assert for now just to make certain that
1605 * xn_tx_chain_cnt is accurate.
1607 KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE,
1608 ("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1609 "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt,
1610 (int) nfrags, (int) NET_TX_RING_SIZE));
1613 * Start packing the mbufs in this chain into
1614 * the fragment pointers. Stop when we run out
1615 * of fragments or hit the end of the mbuf chain.
1619 otherend_id = xenbus_get_otherend_id(sc->xbdev);
1620 for (m = m_head; m; m = m->m_next) {
1621 netif_tx_request_t *tx;
1624 u_long mfn; /* XXX Wrong type? */
1626 tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt);
1627 id = get_id_from_freelist(sc->tx_mbufs);
1629 panic("%s: was allocated the freelist head!\n",
1631 sc->xn_cdata.xn_tx_chain_cnt++;
1632 if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE)
1633 panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n",
1635 sc->tx_mbufs[id] = m;
1637 ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
1638 KASSERT((short)ref >= 0, ("Negative ref"));
1639 mfn = virt_to_mfn(mtod(m, vm_offset_t));
1640 gnttab_grant_foreign_access_ref(ref, otherend_id,
1641 mfn, GNTMAP_readonly);
1642 tx->gref = sc->grant_tx_ref[id] = ref;
1643 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
1647 * The first fragment has the entire packet
1648 * size, subsequent fragments have just the
1649 * fragment size. The backend works out the
1650 * true size of the first fragment by
1651 * subtracting the sizes of the other
1654 tx->size = m->m_pkthdr.len;
1657 * The first fragment contains the checksum flags
1658 * and is optionally followed by extra data for
1662 * CSUM_TSO requires checksum offloading.
1663 * Some versions of FreeBSD fail to
1664 * set CSUM_TCP in the CSUM_TSO case,
1665 * so we have to test for CSUM_TSO
1668 if (m->m_pkthdr.csum_flags
1669 & (CSUM_DELAY_DATA | CSUM_TSO)) {
1670 tx->flags |= (NETTXF_csum_blank
1671 | NETTXF_data_validated);
1673 #if __FreeBSD_version >= 700000
1674 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1675 struct netif_extra_info *gso =
1676 (struct netif_extra_info *)
1677 RING_GET_REQUEST(&sc->tx,
1678 ++sc->tx.req_prod_pvt);
1680 tx->flags |= NETTXF_extra_info;
1682 gso->u.gso.size = m->m_pkthdr.tso_segsz;
1684 XEN_NETIF_GSO_TYPE_TCPV4;
1686 gso->u.gso.features = 0;
1688 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1693 tx->size = m->m_len;
1696 tx->flags |= NETTXF_more_data;
1698 sc->tx.req_prod_pvt++;
1700 BPF_MTAP(ifp, m_head);
1702 sc->stats.tx_bytes += m_head->m_pkthdr.len;
1703 sc->stats.tx_packets++;
1709 xn_start_locked(struct ifnet *ifp)
1711 struct netfront_info *sc;
1712 struct mbuf *m_head;
1717 if (!netfront_carrier_ok(sc))
1721 * While we have enough transmit slots available for at least one
1722 * maximum-sized packet, pull mbufs off the queue and put them on
1723 * the transmit ring.
1725 while (xn_tx_slot_available(sc)) {
1726 IF_DEQUEUE(&ifp->if_snd, m_head);
1730 if (xn_assemble_tx_request(sc, m_head) != 0)
1734 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
1736 xen_intr_signal(sc->xen_intr_handle);
1738 if (RING_FULL(&sc->tx)) {
1741 netif_stop_queue(dev);
1747 xn_start(struct ifnet *ifp)
1749 struct netfront_info *sc;
1752 xn_start_locked(ifp);
1756 /* equivalent of network_open() in Linux */
1758 xn_ifinit_locked(struct netfront_info *sc)
1766 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1771 network_alloc_rx_buffers(sc);
1772 sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
1774 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1775 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1776 if_link_state_change(ifp, LINK_STATE_UP);
1778 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1782 xn_ifinit(void *xsc)
1784 struct netfront_info *sc = xsc;
1787 xn_ifinit_locked(sc);
1792 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1794 struct netfront_info *sc = ifp->if_softc;
1795 struct ifreq *ifr = (struct ifreq *) data;
1797 struct ifaddr *ifa = (struct ifaddr *)data;
1800 int mask, error = 0;
1806 if (ifa->ifa_addr->sa_family == AF_INET) {
1807 ifp->if_flags |= IFF_UP;
1808 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1809 xn_ifinit_locked(sc);
1810 arp_ifinit(ifp, ifa);
1815 error = ether_ioctl(ifp, cmd, data);
1821 /* XXX can we alter the MTU on a VN ?*/
1823 if (ifr->ifr_mtu > XN_JUMBO_MTU)
1828 ifp->if_mtu = ifr->ifr_mtu;
1829 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1835 if (ifp->if_flags & IFF_UP) {
1837 * If only the state of the PROMISC flag changed,
1838 * then just use the 'set promisc mode' command
1839 * instead of reinitializing the entire NIC. Doing
1840 * a full re-init means reloading the firmware and
1841 * waiting for it to start up, which may take a
1845 /* No promiscuous mode with Xen */
1846 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1847 ifp->if_flags & IFF_PROMISC &&
1848 !(sc->xn_if_flags & IFF_PROMISC)) {
1849 XN_SETBIT(sc, XN_RX_MODE,
1850 XN_RXMODE_RX_PROMISC);
1851 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1852 !(ifp->if_flags & IFF_PROMISC) &&
1853 sc->xn_if_flags & IFF_PROMISC) {
1854 XN_CLRBIT(sc, XN_RX_MODE,
1855 XN_RXMODE_RX_PROMISC);
1858 xn_ifinit_locked(sc);
1860 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1864 sc->xn_if_flags = ifp->if_flags;
1869 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1870 if (mask & IFCAP_TXCSUM) {
1871 if (IFCAP_TXCSUM & ifp->if_capenable) {
1872 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1873 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1874 | CSUM_IP | CSUM_TSO);
1876 ifp->if_capenable |= IFCAP_TXCSUM;
1877 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
1881 if (mask & IFCAP_RXCSUM) {
1882 ifp->if_capenable ^= IFCAP_RXCSUM;
1884 #if __FreeBSD_version >= 700000
1885 if (mask & IFCAP_TSO4) {
1886 if (IFCAP_TSO4 & ifp->if_capenable) {
1887 ifp->if_capenable &= ~IFCAP_TSO4;
1888 ifp->if_hwassist &= ~CSUM_TSO;
1889 } else if (IFCAP_TXCSUM & ifp->if_capenable) {
1890 ifp->if_capenable |= IFCAP_TSO4;
1891 ifp->if_hwassist |= CSUM_TSO;
1893 IPRINTK("Xen requires tx checksum offload"
1894 " be enabled to use TSO\n");
1898 if (mask & IFCAP_LRO) {
1899 ifp->if_capenable ^= IFCAP_LRO;
1908 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1918 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1921 error = ether_ioctl(ifp, cmd, data);
1928 xn_stop(struct netfront_info *sc)
1936 callout_stop(&sc->xn_stat_ch);
1938 xn_free_rx_ring(sc);
1939 xn_free_tx_ring(sc);
1941 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1942 if_link_state_change(ifp, LINK_STATE_DOWN);
1945 /* START of Xenolinux helper functions adapted to FreeBSD */
1947 network_connect(struct netfront_info *np)
1949 int i, requeue_idx, error;
1951 netif_rx_request_t *req;
1952 u_int feature_rx_copy, feature_rx_flip;
1954 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1955 "feature-rx-copy", NULL, "%u", &feature_rx_copy);
1957 feature_rx_copy = 0;
1958 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1959 "feature-rx-flip", NULL, "%u", &feature_rx_flip);
1961 feature_rx_flip = 1;
1964 * Copy packets on receive path if:
1965 * (a) This was requested by user, and the backend supports it; or
1966 * (b) Flipping was requested, but this is unsupported by the backend.
1968 np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
1969 (MODPARM_rx_flip && !feature_rx_flip));
1971 /* Recovery procedure: */
1972 error = talk_to_backend(np->xbdev, np);
1976 /* Step 1: Reinitialise variables. */
1977 xn_query_features(np);
1978 xn_configure_features(np);
1979 netif_release_tx_bufs(np);
1981 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1982 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1986 if (np->rx_mbufs[i] == NULL)
1989 m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
1990 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1992 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1993 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1995 if (!np->copying_receiver) {
1996 gnttab_grant_foreign_transfer_ref(ref,
1997 xenbus_get_otherend_id(np->xbdev),
2000 gnttab_grant_foreign_access_ref(ref,
2001 xenbus_get_otherend_id(np->xbdev),
2005 req->id = requeue_idx;
2010 np->rx.req_prod_pvt = requeue_idx;
2012 /* Step 3: All public and private state should now be sane. Get
2013 * ready to start sending and receiving packets and give the driver
2014 * domain a kick because we've probably just requeued some
2017 netfront_carrier_on(np);
2018 xen_intr_signal(np->xen_intr_handle);
2022 network_alloc_rx_buffers(np);
2028 xn_query_features(struct netfront_info *np)
2032 device_printf(np->xbdev, "backend features:");
2034 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2035 "feature-sg", NULL, "%d", &val) < 0)
2040 np->maxfrags = MAX_TX_REQ_FRAGS;
2041 printf(" feature-sg");
2044 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2045 "feature-gso-tcpv4", NULL, "%d", &val) < 0)
2048 np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO);
2050 np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO;
2051 printf(" feature-gso-tcp4");
2058 xn_configure_features(struct netfront_info *np)
2060 int err, cap_enabled;
2064 if (np->xn_resume &&
2065 ((np->xn_ifp->if_capenable & np->xn_ifp->if_capabilities)
2066 == np->xn_ifp->if_capenable)) {
2067 /* Current options are available, no need to do anything. */
2071 /* Try to preserve as many options as possible. */
2073 cap_enabled = np->xn_ifp->if_capenable;
2075 cap_enabled = UINT_MAX;
2077 #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
2078 if ((np->xn_ifp->if_capenable & IFCAP_LRO) == (cap_enabled & IFCAP_LRO))
2079 tcp_lro_free(&np->xn_lro);
2081 np->xn_ifp->if_capenable =
2082 np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4) & cap_enabled;
2083 np->xn_ifp->if_hwassist &= ~CSUM_TSO;
2084 #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
2085 if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) ==
2086 (cap_enabled & IFCAP_LRO)) {
2087 err = tcp_lro_init(&np->xn_lro);
2089 device_printf(np->xbdev, "LRO initialization failed\n");
2091 np->xn_lro.ifp = np->xn_ifp;
2092 np->xn_ifp->if_capenable |= IFCAP_LRO;
2095 if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) ==
2096 (cap_enabled & IFCAP_TSO4)) {
2097 np->xn_ifp->if_capenable |= IFCAP_TSO4;
2098 np->xn_ifp->if_hwassist |= CSUM_TSO;
2105 * Create a network device.
2106 * @param dev Newbus device representing this virtual NIC.
2109 create_netdev(device_t dev)
2112 struct netfront_info *np;
2116 np = device_get_softc(dev);
2120 XN_LOCK_INIT(np, xennetif);
2122 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
2123 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
2124 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
2126 np->rx_target = RX_MIN_TARGET;
2127 np->rx_min_target = RX_MIN_TARGET;
2128 np->rx_max_target = RX_MAX_TARGET;
2130 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
2131 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
2132 np->tx_mbufs[i] = (void *) ((u_long) i+1);
2133 np->grant_tx_ref[i] = GRANT_REF_INVALID;
2135 np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0;
2137 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
2139 np->rx_mbufs[i] = NULL;
2140 np->grant_rx_ref[i] = GRANT_REF_INVALID;
2142 /* A grant for every tx ring slot */
2143 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2144 &np->gref_tx_head) != 0) {
2145 IPRINTK("#### netfront can't alloc tx grant refs\n");
2149 /* A grant for every rx ring slot */
2150 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
2151 &np->gref_rx_head) != 0) {
2152 WPRINTK("#### netfront can't alloc rx grant refs\n");
2153 gnttab_free_grant_references(np->gref_tx_head);
2158 err = xen_net_read_mac(dev, np->mac);
2162 /* Set up ifnet structure */
2163 ifp = np->xn_ifp = if_alloc(IFT_ETHER);
2165 if_initname(ifp, "xn", device_get_unit(dev));
2166 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2167 ifp->if_ioctl = xn_ioctl;
2168 ifp->if_output = ether_output;
2169 ifp->if_start = xn_start;
2171 ifp->if_watchdog = xn_watchdog;
2173 ifp->if_init = xn_ifinit;
2174 ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
2176 ifp->if_hwassist = XN_CSUM_FEATURES;
2177 ifp->if_capabilities = IFCAP_HWCSUM;
2178 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2179 ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS;
2180 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
2182 ether_ifattach(ifp, np->mac);
2183 callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE);
2184 netfront_carrier_off(np);
2189 gnttab_free_grant_references(np->gref_tx_head);
2195 * Handle the change of state of the backend to Closing. We must delete our
2196 * device-layer structures now, to ensure that writes are flushed through to
2197 * the backend. Once is this done, we can switch to Closed in
2202 netfront_closing(device_t dev)
2205 struct netfront_info *info = dev->dev_driver_data;
2207 DPRINTK("netfront_closing: %s removed\n", dev->nodename);
2211 xenbus_switch_state(dev, XenbusStateClosed);
2216 netfront_detach(device_t dev)
2218 struct netfront_info *info = device_get_softc(dev);
2220 DPRINTK("%s\n", xenbus_get_node(dev));
2228 netif_free(struct netfront_info *info)
2233 callout_drain(&info->xn_stat_ch);
2234 netif_disconnect_backend(info);
2235 if (info->xn_ifp != NULL) {
2236 ether_ifdetach(info->xn_ifp);
2237 if_free(info->xn_ifp);
2238 info->xn_ifp = NULL;
2240 ifmedia_removeall(&info->sc_media);
2241 netif_release_tx_bufs(info);
2242 if (info->copying_receiver)
2243 netif_release_rx_bufs_copy(info);
2245 gnttab_free_grant_references(info->gref_tx_head);
2246 gnttab_free_grant_references(info->gref_rx_head);
2250 netif_disconnect_backend(struct netfront_info *info)
2254 netfront_carrier_off(info);
2258 free_ring(&info->tx_ring_ref, &info->tx.sring);
2259 free_ring(&info->rx_ring_ref, &info->rx.sring);
2261 xen_intr_unbind(&info->xen_intr_handle);
2265 free_ring(int *ref, void *ring_ptr_ref)
2267 void **ring_ptr_ptr = ring_ptr_ref;
2269 if (*ref != GRANT_REF_INVALID) {
2270 /* This API frees the associated storage. */
2271 gnttab_end_foreign_access(*ref, *ring_ptr_ptr);
2272 *ref = GRANT_REF_INVALID;
2274 *ring_ptr_ptr = NULL;
2278 xn_ifmedia_upd(struct ifnet *ifp)
2284 xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2286 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2287 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2290 /* ** Driver registration ** */
2291 static device_method_t netfront_methods[] = {
2292 /* Device interface */
2293 DEVMETHOD(device_probe, netfront_probe),
2294 DEVMETHOD(device_attach, netfront_attach),
2295 DEVMETHOD(device_detach, netfront_detach),
2296 DEVMETHOD(device_shutdown, bus_generic_shutdown),
2297 DEVMETHOD(device_suspend, netfront_suspend),
2298 DEVMETHOD(device_resume, netfront_resume),
2300 /* Xenbus interface */
2301 DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
2306 static driver_t netfront_driver = {
2309 sizeof(struct netfront_info),
2311 devclass_t netfront_devclass;
2313 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL,