3 * Copyright (c) 2004-2006 Kip Macy
7 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
8 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
9 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
10 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
11 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
13 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
14 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
16 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD$");
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/sockio.h>
27 #include <sys/malloc.h>
28 #include <sys/module.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/sysctl.h>
32 #include <sys/queue.h>
36 #include <net/if_arp.h>
37 #include <net/ethernet.h>
38 #include <net/if_dl.h>
39 #include <net/if_media.h>
43 #include <net/if_types.h>
46 #include <netinet/in_systm.h>
47 #include <netinet/in.h>
48 #include <netinet/ip.h>
49 #include <netinet/if_ether.h>
50 #if __FreeBSD_version >= 700000
51 #include <netinet/tcp.h>
52 #include <netinet/tcp_lro.h>
58 #include <machine/clock.h> /* for DELAY */
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61 #include <machine/frame.h>
62 #include <machine/vmparam.h>
67 #include <machine/intr_machdep.h>
69 #include <machine/xen/xen-os.h>
70 #include <machine/xen/xenfunc.h>
71 #include <xen/hypervisor.h>
72 #include <xen/xen_intr.h>
73 #include <xen/evtchn.h>
74 #include <xen/gnttab.h>
75 #include <xen/interface/memory.h>
76 #include <xen/interface/io/netif.h>
77 #include <xen/xenbus/xenbusvar.h>
79 #include <dev/xen/netfront/mbufq.h>
81 #include "xenbus_if.h"
83 #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP | CSUM_TSO)
85 #define GRANT_INVALID_REF 0
87 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
88 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
90 #if __FreeBSD_version >= 700000
92 * Should the driver do LRO on the RX end
93 * this can be toggled on the fly, but the
94 * interface must be reset (down/up) for it
97 static int xn_enable_lro = 1;
98 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
107 static int MODPARM_rx_copy = 0;
108 module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
109 MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
110 static int MODPARM_rx_flip = 0;
111 module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
112 MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
114 static const int MODPARM_rx_copy = 1;
115 static const int MODPARM_rx_flip = 0;
118 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
119 #define RX_COPY_THRESHOLD 256
121 #define net_ratelimit() 0
123 struct netfront_info;
124 struct netfront_rx_info;
126 static void xn_txeof(struct netfront_info *);
127 static void xn_rxeof(struct netfront_info *);
128 static void network_alloc_rx_buffers(struct netfront_info *);
130 static void xn_tick_locked(struct netfront_info *);
131 static void xn_tick(void *);
133 static void xn_intr(void *);
134 static void xn_start_locked(struct ifnet *);
135 static void xn_start(struct ifnet *);
136 static int xn_ioctl(struct ifnet *, u_long, caddr_t);
137 static void xn_ifinit_locked(struct netfront_info *);
138 static void xn_ifinit(void *);
139 static void xn_stop(struct netfront_info *);
141 static void xn_watchdog(struct ifnet *);
144 static void show_device(struct netfront_info *sc);
146 static void netfront_closing(device_t dev);
148 static void netif_free(struct netfront_info *info);
149 static int netfront_detach(device_t dev);
151 static int talk_to_backend(device_t dev, struct netfront_info *info);
152 static int create_netdev(device_t dev);
153 static void netif_disconnect_backend(struct netfront_info *info);
154 static int setup_device(device_t dev, struct netfront_info *info);
155 static void end_access(int ref, void *page);
157 /* Xenolinux helper functions */
158 int network_connect(struct netfront_info *);
160 static void xn_free_rx_ring(struct netfront_info *);
162 static void xn_free_tx_ring(struct netfront_info *);
164 static int xennet_get_responses(struct netfront_info *np,
165 struct netfront_rx_info *rinfo, RING_IDX rp, struct mbuf **list,
166 int *pages_flipped_p);
168 #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
170 #define INVALID_P2M_ENTRY (~0UL)
173 * Mbuf pointers. We need these to keep track of the virtual addresses
174 * of our mbuf chains since we can only convert from virtual to physical,
175 * not the other way around. The size must track the free index arrays.
177 struct xn_chain_data {
178 struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1];
179 struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1];
183 struct net_device_stats
185 u_long rx_packets; /* total packets received */
186 u_long tx_packets; /* total packets transmitted */
187 u_long rx_bytes; /* total bytes received */
188 u_long tx_bytes; /* total bytes transmitted */
189 u_long rx_errors; /* bad packets received */
190 u_long tx_errors; /* packet transmit problems */
191 u_long rx_dropped; /* no space in linux buffers */
192 u_long tx_dropped; /* no space available in linux */
193 u_long multicast; /* multicast packets received */
196 /* detailed rx_errors: */
197 u_long rx_length_errors;
198 u_long rx_over_errors; /* receiver ring buff overflow */
199 u_long rx_crc_errors; /* recved pkt with crc error */
200 u_long rx_frame_errors; /* recv'd frame alignment error */
201 u_long rx_fifo_errors; /* recv'r fifo overrun */
202 u_long rx_missed_errors; /* receiver missed packet */
204 /* detailed tx_errors */
205 u_long tx_aborted_errors;
206 u_long tx_carrier_errors;
207 u_long tx_fifo_errors;
208 u_long tx_heartbeat_errors;
209 u_long tx_window_errors;
212 u_long rx_compressed;
213 u_long tx_compressed;
216 struct netfront_info {
218 struct ifnet *xn_ifp;
219 #if __FreeBSD_version >= 700000
220 struct lro_ctrl xn_lro;
223 struct net_device_stats stats;
226 netif_tx_front_ring_t tx;
227 netif_rx_front_ring_t rx;
235 u_int copying_receiver;
238 /* Receive-ring batched refills. */
239 #define RX_MIN_TARGET 32
240 #define RX_MAX_TARGET NET_RX_RING_SIZE
241 int rx_min_target, rx_max_target, rx_target;
244 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
245 * array is an index into a chain of free entries.
248 grant_ref_t gref_tx_head;
249 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
250 grant_ref_t gref_rx_head;
251 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
253 #define TX_MAX_TARGET min(NET_RX_RING_SIZE, 256)
257 uint8_t mac[ETHER_ADDR_LEN];
258 struct xn_chain_data xn_cdata; /* mbufs */
259 struct mbuf_head xn_rx_batch; /* head of the batch queue */
262 struct callout xn_stat_ch;
264 u_long rx_pfn_array[NET_RX_RING_SIZE];
265 multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
266 mmu_update_t rx_mmu[NET_RX_RING_SIZE];
269 #define rx_mbufs xn_cdata.xn_rx_chain
270 #define tx_mbufs xn_cdata.xn_tx_chain
272 #define XN_LOCK_INIT(_sc, _name) \
273 mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \
274 mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \
275 sx_init(&(_sc)->sc_lock, #_name"_rx")
277 #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock)
278 #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock)
280 #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock)
281 #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock)
283 #define XN_LOCK(_sc) sx_xlock(&(_sc)->sc_lock);
284 #define XN_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_lock);
286 #define XN_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_lock, SX_LOCKED);
287 #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED);
288 #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED);
289 #define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \
290 mtx_destroy(&(_sc)->tx_lock); \
291 sx_destroy(&(_sc)->sc_lock);
293 struct netfront_rx_info {
294 struct netif_rx_response rx;
295 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
298 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
299 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
300 #define netfront_carrier_ok(netif) ((netif)->carrier)
302 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
307 * Access macros for acquiring freeing slots in tx_skbs[].
311 add_id_to_freelist(struct mbuf **list, unsigned short id)
313 KASSERT(id != 0, ("add_id_to_freelist: the head item (0) must always be free."));
315 list[0] = (void *)(u_long)id;
318 static inline unsigned short
319 get_id_from_freelist(struct mbuf **list)
321 u_int id = (u_int)(u_long)list[0];
322 KASSERT(id != 0, ("get_id_from_freelist: the head item (0) must always remain free."));
328 xennet_rxidx(RING_IDX idx)
330 return idx & (NET_RX_RING_SIZE - 1);
333 static inline struct mbuf *
334 xennet_get_rx_mbuf(struct netfront_info *np,
337 int i = xennet_rxidx(ri);
341 np->rx_mbufs[i] = NULL;
345 static inline grant_ref_t
346 xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
348 int i = xennet_rxidx(ri);
349 grant_ref_t ref = np->grant_rx_ref[i];
350 np->grant_rx_ref[i] = GRANT_INVALID_REF;
357 #define IPRINTK(fmt, args...) \
358 printf("[XEN] " fmt, ##args)
359 #define WPRINTK(fmt, args...) \
360 printf("[XEN] " fmt, ##args)
362 #define DPRINTK(fmt, args...) \
363 printf("[XEN] %s: " fmt, __func__, ##args)
365 #define DPRINTK(fmt, args...)
369 * Read the 'mac' node at the given device's node in the store, and parse that
370 * as colon-separated octets, placing result the given mac array. mac must be
371 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
372 * Return 0 on success, or errno on error.
375 xen_net_read_mac(device_t dev, uint8_t mac[])
378 char *s, *e, *macstr;
380 error = xenbus_read(XBT_NIL, xenbus_get_node(dev), "mac", NULL,
386 for (i = 0; i < ETHER_ADDR_LEN; i++) {
387 mac[i] = strtoul(s, &e, 16);
388 if (s == e || (e[0] != ':' && e[0] != 0)) {
389 free(macstr, M_DEVBUF);
394 free(macstr, M_DEVBUF);
399 * Entry point to this code when a new device is created. Allocate the basic
400 * structures and the ring buffers for communication with the backend, and
401 * inform the backend of the appropriate details for those. Switch to
405 netfront_probe(device_t dev)
408 if (!strcmp(xenbus_get_type(dev), "vif")) {
409 device_set_desc(dev, "Virtual Network Interface");
417 netfront_attach(device_t dev)
421 err = create_netdev(dev);
423 xenbus_dev_fatal(dev, err, "creating netdev");
427 #if __FreeBSD_version >= 700000
428 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
429 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430 OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
431 &xn_enable_lro, 0, "Large Receive Offload");
439 * We are reconnecting to the backend, due to a suspend/resume, or a backend
440 * driver restart. We tear down our netif structure and recreate it, but
441 * leave the device-layer structures intact so that this is transparent to the
442 * rest of the kernel.
445 netfront_resume(device_t dev)
447 struct netfront_info *info = device_get_softc(dev);
449 netif_disconnect_backend(info);
454 /* Common code used when first setting up, and when resuming. */
456 talk_to_backend(device_t dev, struct netfront_info *info)
459 struct xenbus_transaction xbt;
460 const char *node = xenbus_get_node(dev);
463 err = xen_net_read_mac(dev, info->mac);
465 xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
469 /* Create shared ring, alloc event channel. */
470 err = setup_device(dev, info);
475 err = xenbus_transaction_start(&xbt);
477 xenbus_dev_fatal(dev, err, "starting transaction");
480 err = xenbus_printf(xbt, node, "tx-ring-ref","%u",
483 message = "writing tx ring-ref";
484 goto abort_transaction;
486 err = xenbus_printf(xbt, node, "rx-ring-ref","%u",
489 message = "writing rx ring-ref";
490 goto abort_transaction;
492 err = xenbus_printf(xbt, node,
493 "event-channel", "%u", irq_to_evtchn_port(info->irq));
495 message = "writing event-channel";
496 goto abort_transaction;
498 err = xenbus_printf(xbt, node, "request-rx-copy", "%u",
499 info->copying_receiver);
501 message = "writing request-rx-copy";
502 goto abort_transaction;
504 err = xenbus_printf(xbt, node, "feature-rx-notify", "%d", 1);
506 message = "writing feature-rx-notify";
507 goto abort_transaction;
509 err = xenbus_printf(xbt, node, "feature-sg", "%d", 1);
511 message = "writing feature-sg";
512 goto abort_transaction;
514 #if __FreeBSD_version >= 700000
515 err = xenbus_printf(xbt, node, "feature-gso-tcpv4", "%d", 1);
517 message = "writing feature-gso-tcpv4";
518 goto abort_transaction;
522 err = xenbus_transaction_end(xbt, 0);
526 xenbus_dev_fatal(dev, err, "completing transaction");
533 xenbus_transaction_end(xbt, 1);
534 xenbus_dev_fatal(dev, err, "%s", message);
543 setup_device(device_t dev, struct netfront_info *info)
545 netif_tx_sring_t *txs;
546 netif_rx_sring_t *rxs;
552 info->tx_ring_ref = GRANT_INVALID_REF;
553 info->rx_ring_ref = GRANT_INVALID_REF;
554 info->rx.sring = NULL;
555 info->tx.sring = NULL;
558 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
561 xenbus_dev_fatal(dev, error, "allocating tx ring page");
564 SHARED_RING_INIT(txs);
565 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
566 error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref);
570 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
573 xenbus_dev_fatal(dev, error, "allocating rx ring page");
576 SHARED_RING_INIT(rxs);
577 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
579 error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref);
583 error = bind_listening_port_to_irqhandler(xenbus_get_otherend_id(dev),
584 "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, &info->irq);
587 xenbus_dev_fatal(dev, error,
588 "bind_evtchn_to_irqhandler failed");
602 * If this interface has an ipv4 address, send an arp for it. This
603 * helps to get the network going again after migrating hosts.
606 netfront_send_fake_arp(device_t dev, struct netfront_info *info)
612 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
613 if (ifa->ifa_addr->sa_family == AF_INET) {
614 arp_ifinit(ifp, ifa);
620 * Callback received when the backend's state changes.
623 netfront_backend_changed(device_t dev, XenbusState newstate)
625 struct netfront_info *sc = device_get_softc(dev);
627 DPRINTK("newstate=%d\n", newstate);
630 case XenbusStateInitialising:
631 case XenbusStateInitialised:
632 case XenbusStateConnected:
633 case XenbusStateUnknown:
634 case XenbusStateClosed:
635 case XenbusStateReconfigured:
636 case XenbusStateReconfiguring:
638 case XenbusStateInitWait:
639 if (xenbus_get_state(dev) != XenbusStateInitialising)
641 if (network_connect(sc) != 0)
643 xenbus_set_state(dev, XenbusStateConnected);
644 netfront_send_fake_arp(dev, sc);
646 case XenbusStateClosing:
647 xenbus_set_state(dev, XenbusStateClosed);
653 xn_free_rx_ring(struct netfront_info *sc)
658 for (i = 0; i < NET_RX_RING_SIZE; i++) {
659 if (sc->xn_cdata.xn_rx_chain[i] != NULL) {
660 m_freem(sc->xn_cdata.xn_rx_chain[i]);
661 sc->xn_cdata.xn_rx_chain[i] = NULL;
666 sc->xn_rx_if->req_prod = 0;
667 sc->xn_rx_if->event = sc->rx.rsp_cons ;
672 xn_free_tx_ring(struct netfront_info *sc)
677 for (i = 0; i < NET_TX_RING_SIZE; i++) {
678 if (sc->xn_cdata.xn_tx_chain[i] != NULL) {
679 m_freem(sc->xn_cdata.xn_tx_chain[i]);
680 sc->xn_cdata.xn_tx_chain[i] = NULL;
689 * Do some brief math on the number of descriptors available to
690 * determine how many slots are available.
692 * Firstly - wouldn't something with RING_FREE_REQUESTS() be more applicable?
693 * Secondly - MAX_SKB_FRAGS is a Linux construct which may not apply here.
694 * Thirdly - it isn't used here anyway; the magic constant '24' is possibly
696 * The "2" is presumably to ensure there are also enough slots available for
697 * the ring entries used for "options" (eg, the TSO entry before a packet
698 * is queued); I'm not sure why its 2 and not 1. Perhaps to make sure there's
699 * a "free" node in the tx mbuf list (node 0) to represent the freelist?
701 * This only figures out whether any xenbus ring descriptors are available;
702 * it doesn't at all reflect how many tx mbuf ring descriptors are also
706 netfront_tx_slot_available(struct netfront_info *np)
708 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
709 (TX_MAX_TARGET - /* MAX_SKB_FRAGS */ 24 - 2));
712 netif_release_tx_bufs(struct netfront_info *np)
717 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
718 m = np->xn_cdata.xn_tx_chain[i];
720 if (((u_long)m) < KERNBASE)
722 gnttab_grant_foreign_access_ref(np->grant_tx_ref[i],
723 xenbus_get_otherend_id(np->xbdev),
724 virt_to_mfn(mtod(m, vm_offset_t)),
726 gnttab_release_grant_reference(&np->gref_tx_head,
727 np->grant_tx_ref[i]);
728 np->grant_tx_ref[i] = GRANT_INVALID_REF;
729 add_id_to_freelist(np->tx_mbufs, i);
735 network_alloc_rx_buffers(struct netfront_info *sc)
737 int otherend_id = xenbus_get_otherend_id(sc->xbdev);
740 int i, batch_target, notify;
742 struct xen_memory_reservation reservation;
745 netif_rx_request_t *req;
749 req_prod = sc->rx.req_prod_pvt;
751 if (unlikely(sc->carrier == 0))
755 * Allocate skbuffs greedily, even though we batch updates to the
756 * receive ring. This creates a less bursty demand on the memory
757 * allocator, so should reduce the chance of failed allocation
758 * requests both for ourself and for other kernel subsystems.
760 batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
761 for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
762 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
766 m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE);
767 if ((m_new->m_flags & M_EXT) == 0) {
778 m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
780 /* queue the mbufs allocated */
781 mbufq_tail(&sc->xn_rx_batch, m_new);
784 /* Is the batch large enough to be worthwhile? */
785 if (i < (sc->rx_target/2)) {
786 if (req_prod >sc->rx.sring->req_prod)
790 /* Adjust floating fill target if we risked running out of buffers. */
791 if ( ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) &&
792 ((sc->rx_target *= 2) > sc->rx_max_target) )
793 sc->rx_target = sc->rx_max_target;
796 for (nr_flips = i = 0; ; i++) {
797 if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
800 m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
801 vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
803 id = xennet_rxidx(req_prod + i);
805 KASSERT(sc->xn_cdata.xn_rx_chain[id] == NULL,
806 ("non-NULL xm_rx_chain"));
807 sc->xn_cdata.xn_rx_chain[id] = m_new;
809 ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
810 KASSERT((short)ref >= 0, ("negative ref"));
811 sc->grant_rx_ref[id] = ref;
813 vaddr = mtod(m_new, vm_offset_t);
814 pfn = vtophys(vaddr) >> PAGE_SHIFT;
815 req = RING_GET_REQUEST(&sc->rx, req_prod + i);
817 if (sc->copying_receiver == 0) {
818 gnttab_grant_foreign_transfer_ref(ref,
820 sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
821 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
822 /* Remove this page before passing
825 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
826 MULTI_update_va_mapping(&sc->rx_mcl[i],
831 gnttab_grant_foreign_access_ref(ref,
838 sc->rx_pfn_array[i] =
839 vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
842 KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
843 KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
845 * We may have allocated buffers which have entries outstanding
846 * in the page * update queue -- make sure we flush those first!
851 /* Tell the ballon driver what is going on. */
852 balloon_update_driver_allowance(i);
854 set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array);
855 reservation.nr_extents = i;
856 reservation.extent_order = 0;
857 reservation.address_bits = 0;
858 reservation.domid = DOMID_SELF;
860 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
862 /* After all PTEs have been zapped, flush the TLB. */
863 sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
864 UVMF_TLB_FLUSH|UVMF_ALL;
866 /* Give away a batch of pages. */
867 sc->rx_mcl[i].op = __HYPERVISOR_memory_op;
868 sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
869 sc->rx_mcl[i].args[1] = (u_long)&reservation;
870 /* Zap PTEs and give away pages in one big multicall. */
871 (void)HYPERVISOR_multicall(sc->rx_mcl, i+1);
873 /* Check return status of HYPERVISOR_dom_mem_op(). */
874 if (unlikely(sc->rx_mcl[i].result != i))
875 panic("Unable to reduce memory reservation\n");
877 if (HYPERVISOR_memory_op(
878 XENMEM_decrease_reservation, &reservation)
880 panic("Unable to reduce memory "
887 /* Above is a suitable barrier to ensure backend will see requests. */
888 sc->rx.req_prod_pvt = req_prod + i;
890 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
892 notify_remote_via_irq(sc->irq);
896 xn_rxeof(struct netfront_info *np)
899 #if __FreeBSD_version >= 700000
900 struct lro_ctrl *lro = &np->xn_lro;
901 struct lro_entry *queued;
903 struct netfront_rx_info rinfo;
904 struct netif_rx_response *rx = &rinfo.rx;
905 struct netif_extra_info *extras = rinfo.extras;
907 multicall_entry_t *mcl;
909 struct mbuf_head rxq, errq;
910 int err, pages_flipped = 0, work_to_do;
913 XN_RX_LOCK_ASSERT(np);
914 if (!netfront_carrier_ok(np))
922 rp = np->rx.sring->rsp_prod;
923 rmb(); /* Ensure we see queued responses up to 'rp'. */
927 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
928 memset(extras, 0, sizeof(rinfo.extras));
931 err = xennet_get_responses(np, &rinfo, rp, &m,
936 mbufq_tail(&errq, m);
937 np->stats.rx_errors++;
942 m->m_pkthdr.rcvif = ifp;
943 if ( rx->flags & NETRXF_data_validated ) {
944 /* Tell the stack the checksums are okay */
946 * XXX this isn't necessarily the case - need to add
950 m->m_pkthdr.csum_flags |=
951 (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
953 m->m_pkthdr.csum_data = 0xffff;
956 np->stats.rx_packets++;
957 np->stats.rx_bytes += m->m_pkthdr.len;
960 np->rx.rsp_cons = ++i;
964 /* Some pages are no longer absent... */
966 balloon_update_driver_allowance(-pages_flipped);
968 /* Do all the remapping work, and M->P updates, in one big
971 if (!!xen_feature(XENFEAT_auto_translated_physmap)) {
972 mcl = np->rx_mcl + pages_flipped;
973 mcl->op = __HYPERVISOR_mmu_update;
974 mcl->args[0] = (u_long)np->rx_mmu;
975 mcl->args[1] = pages_flipped;
977 mcl->args[3] = DOMID_SELF;
978 (void)HYPERVISOR_multicall(np->rx_mcl,
983 while ((m = mbufq_dequeue(&errq)))
987 * Process all the mbufs after the remapping is complete.
988 * Break the mbuf chain first though.
990 while ((m = mbufq_dequeue(&rxq)) != NULL) {
994 * Do we really need to drop the rx lock?
997 #if __FreeBSD_version >= 700000
998 /* Use LRO if possible */
999 if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
1000 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
1002 * If LRO fails, pass up to the stack
1005 (*ifp->if_input)(ifp, m);
1008 (*ifp->if_input)(ifp, m);
1013 np->rx.rsp_cons = i;
1015 #if __FreeBSD_version >= 700000
1017 * Flush any outstanding LRO work
1019 while (!SLIST_EMPTY(&lro->lro_active)) {
1020 queued = SLIST_FIRST(&lro->lro_active);
1021 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1022 tcp_lro_flush(lro, queued);
1027 /* If we get a callback with very few responses, reduce fill target. */
1028 /* NB. Note exponential increase, linear decrease. */
1029 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1030 ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
1031 np->rx_target = np->rx_min_target;
1034 network_alloc_rx_buffers(np);
1036 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
1037 } while (work_to_do);
1041 xn_txeof(struct netfront_info *np)
1046 netif_tx_response_t *txr;
1049 XN_TX_LOCK_ASSERT(np);
1051 if (!netfront_carrier_ok(np))
1058 prod = np->tx.sring->rsp_prod;
1059 rmb(); /* Ensure we see responses up to 'rp'. */
1061 for (i = np->tx.rsp_cons; i != prod; i++) {
1062 txr = RING_GET_RESPONSE(&np->tx, i);
1063 if (txr->status == NETIF_RSP_NULL)
1067 m = np->xn_cdata.xn_tx_chain[id];
1070 * Increment packet count if this is the last
1071 * mbuf of the chain.
1075 KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
1077 if (unlikely(gnttab_query_foreign_access(
1078 np->grant_tx_ref[id]) != 0)) {
1079 printf("network_tx_buf_gc: warning "
1080 "-- grant still in use by backend "
1084 gnttab_end_foreign_access_ref(
1085 np->grant_tx_ref[id]);
1086 gnttab_release_grant_reference(
1087 &np->gref_tx_head, np->grant_tx_ref[id]);
1088 np->grant_tx_ref[id] = GRANT_INVALID_REF;
1090 np->xn_cdata.xn_tx_chain[id] = NULL;
1091 add_id_to_freelist(np->xn_cdata.xn_tx_chain, id);
1094 np->tx.rsp_cons = prod;
1097 * Set a new event, then check for race with update of
1098 * tx_cons. Note that it is essential to schedule a
1099 * callback, no matter how few buffers are pending. Even if
1100 * there is space in the transmit ring, higher layers may
1101 * be blocked because too much data is outstanding: in such
1102 * cases notification from Xen is likely to be the only kick
1105 np->tx.sring->rsp_event =
1106 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
1110 } while (prod != np->tx.sring->rsp_prod);
1114 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1117 if (np->user_state == UST_OPEN)
1118 netif_wake_queue(dev);
1127 struct netfront_info *np = xsc;
1128 struct ifnet *ifp = np->xn_ifp;
1131 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
1132 likely(netfront_carrier_ok(np)) &&
1133 ifp->if_drv_flags & IFF_DRV_RUNNING))
1136 if (np->tx.rsp_cons != np->tx.sring->rsp_prod) {
1146 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1147 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1153 xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
1156 int new = xennet_rxidx(np->rx.req_prod_pvt);
1158 KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
1159 np->rx_mbufs[new] = m;
1160 np->grant_rx_ref[new] = ref;
1161 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1162 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1163 np->rx.req_prod_pvt++;
1167 xennet_get_extras(struct netfront_info *np,
1168 struct netif_extra_info *extras, RING_IDX rp)
1170 struct netif_extra_info *extra;
1171 RING_IDX cons = np->rx.rsp_cons;
1179 if (unlikely(cons + 1 == rp)) {
1181 if (net_ratelimit())
1182 WPRINTK("Missing extra info\n");
1188 extra = (struct netif_extra_info *)
1189 RING_GET_RESPONSE(&np->rx, ++cons);
1191 if (unlikely(!extra->type ||
1192 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1194 if (net_ratelimit())
1195 WPRINTK("Invalid extra type: %d\n",
1200 memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1203 m = xennet_get_rx_mbuf(np, cons);
1204 ref = xennet_get_rx_ref(np, cons);
1205 xennet_move_rx_slot(np, m, ref);
1206 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1208 np->rx.rsp_cons = cons;
1213 xennet_get_responses(struct netfront_info *np,
1214 struct netfront_rx_info *rinfo, RING_IDX rp,
1216 int *pages_flipped_p)
1218 int pages_flipped = *pages_flipped_p;
1219 struct mmu_update *mmu;
1220 struct multicall_entry *mcl;
1221 struct netif_rx_response *rx = &rinfo->rx;
1222 struct netif_extra_info *extras = rinfo->extras;
1223 RING_IDX cons = np->rx.rsp_cons;
1224 struct mbuf *m, *m0, *m_prev;
1225 grant_ref_t ref = xennet_get_rx_ref(np, cons);
1226 int max = 5 /* MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */;
1231 m0 = m = m_prev = xennet_get_rx_mbuf(np, cons);
1234 if (rx->flags & NETRXF_extra_info) {
1235 err = xennet_get_extras(np, extras, rp);
1236 cons = np->rx.rsp_cons;
1241 m0->m_pkthdr.len = 0;
1249 printf("rx->status=%hd rx->offset=%hu frags=%u\n",
1250 rx->status, rx->offset, frags);
1252 if (unlikely(rx->status < 0 ||
1253 rx->offset + rx->status > PAGE_SIZE)) {
1255 if (net_ratelimit())
1256 WPRINTK("rx->offset: %x, size: %u\n",
1257 rx->offset, rx->status);
1259 xennet_move_rx_slot(np, m, ref);
1265 * This definitely indicates a bug, either in this driver or in
1266 * the backend driver. In future this should flag the bad
1267 * situation to the system controller to reboot the backed.
1269 if (ref == GRANT_INVALID_REF) {
1271 if (net_ratelimit())
1272 WPRINTK("Bad rx response id %d.\n", rx->id);
1278 if (!np->copying_receiver) {
1279 /* Memory pressure, insufficient buffer
1282 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1283 if (net_ratelimit())
1284 WPRINTK("Unfulfilled rx req "
1285 "(id=%d, st=%d).\n",
1286 rx->id, rx->status);
1287 xennet_move_rx_slot(np, m, ref);
1292 if (!xen_feature( XENFEAT_auto_translated_physmap)) {
1293 /* Remap the page. */
1294 void *vaddr = mtod(m, void *);
1297 mcl = np->rx_mcl + pages_flipped;
1298 mmu = np->rx_mmu + pages_flipped;
1300 MULTI_update_va_mapping(mcl, (u_long)vaddr,
1301 (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW |
1302 PG_V | PG_M | PG_A, 0);
1303 pfn = (uintptr_t)m->m_ext.ext_arg1;
1304 mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) |
1305 MMU_MACHPHYS_UPDATE;
1308 set_phys_to_machine(pfn, mfn);
1312 ret = gnttab_end_foreign_access_ref(ref);
1313 KASSERT(ret, ("ret != 0"));
1316 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1322 m->m_len = rx->status;
1323 m->m_data += rx->offset;
1324 m0->m_pkthdr.len += rx->status;
1326 if (!(rx->flags & NETRXF_more_data))
1329 if (cons + frags == rp) {
1330 if (net_ratelimit())
1331 WPRINTK("Need more frags\n");
1337 rx = RING_GET_RESPONSE(&np->rx, cons + frags);
1338 m = xennet_get_rx_mbuf(np, cons + frags);
1342 ref = xennet_get_rx_ref(np, cons + frags);
1347 if (unlikely(frags > max)) {
1348 if (net_ratelimit())
1349 WPRINTK("Too many frags\n");
1354 np->rx.rsp_cons = cons + frags;
1356 *pages_flipped_p = pages_flipped;
1362 xn_tick_locked(struct netfront_info *sc)
1364 XN_RX_LOCK_ASSERT(sc);
1365 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1367 /* XXX placeholder for printing debug information */
1375 struct netfront_info *sc;
1384 xn_start_locked(struct ifnet *ifp)
1388 struct mbuf *m_head, *m;
1389 struct netfront_info *sc;
1390 netif_tx_request_t *tx;
1391 netif_extra_info_t *extra;
1394 u_long mfn, tx_bytes;
1398 otherend_id = xenbus_get_otherend_id(sc->xbdev);
1401 if (!netfront_carrier_ok(sc))
1404 for (i = sc->tx.req_prod_pvt; TRUE; i++) {
1405 IF_DEQUEUE(&ifp->if_snd, m_head);
1410 * netfront_tx_slot_available() tries to do some math to
1411 * ensure that there'll be enough xenbus ring slots available
1412 * for the maximum number of packet fragments (and a couple more
1413 * for what I guess are TSO and other ring entry items.)
1415 if (!netfront_tx_slot_available(sc)) {
1416 IF_PREPEND(&ifp->if_snd, m_head);
1417 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1423 * Defragment the mbuf if necessary.
1425 for (m = m_head, nfrags = 0; m; m = m->m_next)
1427 if (nfrags > MAX_SKB_FRAGS) {
1428 m = m_defrag(m_head, M_DONTWAIT);
1437 * Start packing the mbufs in this chain into
1438 * the fragment pointers. Stop when we run out
1439 * of fragments or hit the end of the mbuf chain.
1443 for (m = m_head; m; m = m->m_next) {
1444 tx = RING_GET_REQUEST(&sc->tx, i);
1445 id = get_id_from_freelist(sc->xn_cdata.xn_tx_chain);
1446 sc->xn_cdata.xn_tx_chain[id] = m;
1448 ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
1449 KASSERT((short)ref >= 0, ("Negative ref"));
1450 mfn = virt_to_mfn(mtod(m, vm_offset_t));
1451 gnttab_grant_foreign_access_ref(ref, otherend_id,
1452 mfn, GNTMAP_readonly);
1453 tx->gref = sc->grant_tx_ref[id] = ref;
1454 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
1458 * The first fragment has the entire packet
1459 * size, subsequent fragments have just the
1460 * fragment size. The backend works out the
1461 * true size of the first fragment by
1462 * subtracting the sizes of the other
1465 tx->size = m->m_pkthdr.len;
1468 * The first fragment contains the
1469 * checksum flags and is optionally
1470 * followed by extra data for TSO etc.
1472 if (m->m_pkthdr.csum_flags
1473 & CSUM_DELAY_DATA) {
1474 tx->flags |= (NETTXF_csum_blank
1475 | NETTXF_data_validated);
1477 #if __FreeBSD_version >= 700000
1478 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1479 struct netif_extra_info *gso =
1480 (struct netif_extra_info *)
1481 RING_GET_REQUEST(&sc->tx, ++i);
1484 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
1486 tx->flags |= NETTXF_extra_info;
1488 gso->u.gso.size = m->m_pkthdr.tso_segsz;
1490 XEN_NETIF_GSO_TYPE_TCPV4;
1492 gso->u.gso.features = 0;
1494 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1500 tx->size = m->m_len;
1503 tx->flags |= NETTXF_more_data;
1508 BPF_MTAP(ifp, m_head);
1510 sc->stats.tx_bytes += m_head->m_pkthdr.len;
1511 sc->stats.tx_packets++;
1514 sc->tx.req_prod_pvt = i;
1515 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
1517 notify_remote_via_irq(sc->irq);
1521 if (RING_FULL(&sc->tx)) {
1524 netif_stop_queue(dev);
1532 xn_start(struct ifnet *ifp)
1534 struct netfront_info *sc;
1537 xn_start_locked(ifp);
1541 /* equivalent of network_open() in Linux */
1543 xn_ifinit_locked(struct netfront_info *sc)
1551 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1556 network_alloc_rx_buffers(sc);
1557 sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
1559 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1560 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1562 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1568 xn_ifinit(void *xsc)
1570 struct netfront_info *sc = xsc;
1573 xn_ifinit_locked(sc);
1580 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1582 struct netfront_info *sc = ifp->if_softc;
1583 struct ifreq *ifr = (struct ifreq *) data;
1584 struct ifaddr *ifa = (struct ifaddr *)data;
1586 int mask, error = 0;
1591 if (ifa->ifa_addr->sa_family == AF_INET) {
1592 ifp->if_flags |= IFF_UP;
1593 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1594 xn_ifinit_locked(sc);
1595 arp_ifinit(ifp, ifa);
1599 error = ether_ioctl(ifp, cmd, data);
1603 /* XXX can we alter the MTU on a VN ?*/
1605 if (ifr->ifr_mtu > XN_JUMBO_MTU)
1610 ifp->if_mtu = ifr->ifr_mtu;
1611 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1617 if (ifp->if_flags & IFF_UP) {
1619 * If only the state of the PROMISC flag changed,
1620 * then just use the 'set promisc mode' command
1621 * instead of reinitializing the entire NIC. Doing
1622 * a full re-init means reloading the firmware and
1623 * waiting for it to start up, which may take a
1627 /* No promiscuous mode with Xen */
1628 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1629 ifp->if_flags & IFF_PROMISC &&
1630 !(sc->xn_if_flags & IFF_PROMISC)) {
1631 XN_SETBIT(sc, XN_RX_MODE,
1632 XN_RXMODE_RX_PROMISC);
1633 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1634 !(ifp->if_flags & IFF_PROMISC) &&
1635 sc->xn_if_flags & IFF_PROMISC) {
1636 XN_CLRBIT(sc, XN_RX_MODE,
1637 XN_RXMODE_RX_PROMISC);
1640 xn_ifinit_locked(sc);
1642 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1646 sc->xn_if_flags = ifp->if_flags;
1651 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1652 if (mask & IFCAP_TXCSUM) {
1653 if (IFCAP_TXCSUM & ifp->if_capenable) {
1654 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1655 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1656 | CSUM_IP | CSUM_TSO);
1658 ifp->if_capenable |= IFCAP_TXCSUM;
1659 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
1663 if (mask & IFCAP_RXCSUM) {
1664 ifp->if_capenable ^= IFCAP_RXCSUM;
1666 #if __FreeBSD_version >= 700000
1667 if (mask & IFCAP_TSO4) {
1668 if (IFCAP_TSO4 & ifp->if_capenable) {
1669 ifp->if_capenable &= ~IFCAP_TSO4;
1670 ifp->if_hwassist &= ~CSUM_TSO;
1671 } else if (IFCAP_TXCSUM & ifp->if_capenable) {
1672 ifp->if_capenable |= IFCAP_TSO4;
1673 ifp->if_hwassist |= CSUM_TSO;
1675 DPRINTK("Xen requires tx checksum offload"
1676 " be enabled to use TSO\n");
1680 if (mask & IFCAP_LRO) {
1681 ifp->if_capenable ^= IFCAP_LRO;
1690 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1703 error = ether_ioctl(ifp, cmd, data);
1710 xn_stop(struct netfront_info *sc)
1718 callout_stop(&sc->xn_stat_ch);
1720 xn_free_rx_ring(sc);
1721 xn_free_tx_ring(sc);
1723 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1726 /* START of Xenolinux helper functions adapted to FreeBSD */
1728 network_connect(struct netfront_info *np)
1730 int i, requeue_idx, error;
1732 netif_rx_request_t *req;
1733 u_int feature_rx_copy, feature_rx_flip;
1735 error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev),
1736 "feature-rx-copy", NULL, "%u", &feature_rx_copy);
1738 feature_rx_copy = 0;
1739 error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev),
1740 "feature-rx-flip", NULL, "%u", &feature_rx_flip);
1742 feature_rx_flip = 1;
1745 * Copy packets on receive path if:
1746 * (a) This was requested by user, and the backend supports it; or
1747 * (b) Flipping was requested, but this is unsupported by the backend.
1749 np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
1750 (MODPARM_rx_flip && !feature_rx_flip));
1753 /* Recovery procedure: */
1754 error = talk_to_backend(np->xbdev, np);
1758 /* Step 1: Reinitialise variables. */
1759 netif_release_tx_bufs(np);
1761 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1762 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1766 if (np->rx_mbufs[i] == NULL)
1769 m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
1770 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1771 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1772 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1774 if (!np->copying_receiver) {
1775 gnttab_grant_foreign_transfer_ref(ref,
1776 xenbus_get_otherend_id(np->xbdev),
1779 gnttab_grant_foreign_access_ref(ref,
1780 xenbus_get_otherend_id(np->xbdev),
1784 req->id = requeue_idx;
1789 np->rx.req_prod_pvt = requeue_idx;
1791 /* Step 3: All public and private state should now be sane. Get
1792 * ready to start sending and receiving packets and give the driver
1793 * domain a kick because we've probably just requeued some
1796 netfront_carrier_on(np);
1797 notify_remote_via_irq(np->irq);
1801 network_alloc_rx_buffers(np);
1808 show_device(struct netfront_info *sc)
1812 IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
1814 be_state_name[sc->xn_backend_state],
1815 sc->xn_user_state ? "open" : "closed",
1821 IPRINTK("<vif NULL>\n");
1826 /** Create a network device.
1827 * @param handle device handle
1830 create_netdev(device_t dev)
1833 struct netfront_info *np;
1837 np = device_get_softc(dev);
1841 XN_LOCK_INIT(np, xennetif);
1842 np->rx_target = RX_MIN_TARGET;
1843 np->rx_min_target = RX_MIN_TARGET;
1844 np->rx_max_target = RX_MAX_TARGET;
1846 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
1847 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
1848 np->tx_mbufs[i] = (void *) ((u_long) i+1);
1849 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1851 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
1852 np->rx_mbufs[i] = NULL;
1853 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1855 /* A grant for every tx ring slot */
1856 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1857 &np->gref_tx_head) < 0) {
1858 printf("#### netfront can't alloc tx grant refs\n");
1862 /* A grant for every rx ring slot */
1863 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1864 &np->gref_rx_head) < 0) {
1865 printf("#### netfront can't alloc rx grant refs\n");
1866 gnttab_free_grant_references(np->gref_tx_head);
1871 err = xen_net_read_mac(dev, np->mac);
1873 xenbus_dev_fatal(dev, err, "parsing %s/mac",
1874 xenbus_get_node(dev));
1878 /* Set up ifnet structure */
1879 ifp = np->xn_ifp = if_alloc(IFT_ETHER);
1881 if_initname(ifp, "xn", device_get_unit(dev));
1882 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1883 ifp->if_ioctl = xn_ioctl;
1884 ifp->if_output = ether_output;
1885 ifp->if_start = xn_start;
1887 ifp->if_watchdog = xn_watchdog;
1889 ifp->if_init = xn_ifinit;
1890 ifp->if_mtu = ETHERMTU;
1891 ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
1893 ifp->if_hwassist = XN_CSUM_FEATURES;
1894 ifp->if_capabilities = IFCAP_HWCSUM;
1895 #if __FreeBSD_version >= 700000
1896 ifp->if_capabilities |= IFCAP_TSO4;
1897 if (xn_enable_lro) {
1898 int err = tcp_lro_init(&np->xn_lro);
1900 device_printf(dev, "LRO initialization failed\n");
1903 np->xn_lro.ifp = ifp;
1904 ifp->if_capabilities |= IFCAP_LRO;
1907 ifp->if_capenable = ifp->if_capabilities;
1909 ether_ifattach(ifp, np->mac);
1910 callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE);
1911 netfront_carrier_off(np);
1916 gnttab_free_grant_references(np->gref_tx_head);
1918 panic("do something smart");
1923 * Handle the change of state of the backend to Closing. We must delete our
1924 * device-layer structures now, to ensure that writes are flushed through to
1925 * the backend. Once is this done, we can switch to Closed in
1929 static void netfront_closing(device_t dev)
1932 struct netfront_info *info = dev->dev_driver_data;
1934 DPRINTK("netfront_closing: %s removed\n", dev->nodename);
1938 xenbus_switch_state(dev, XenbusStateClosed);
1942 static int netfront_detach(device_t dev)
1944 struct netfront_info *info = device_get_softc(dev);
1946 DPRINTK("%s\n", xenbus_get_node(dev));
1954 static void netif_free(struct netfront_info *info)
1956 netif_disconnect_backend(info);
1962 static void netif_disconnect_backend(struct netfront_info *info)
1966 netfront_carrier_off(info);
1970 end_access(info->tx_ring_ref, info->tx.sring);
1971 end_access(info->rx_ring_ref, info->rx.sring);
1972 info->tx_ring_ref = GRANT_INVALID_REF;
1973 info->rx_ring_ref = GRANT_INVALID_REF;
1974 info->tx.sring = NULL;
1975 info->rx.sring = NULL;
1978 unbind_from_irqhandler(info->irq);
1984 static void end_access(int ref, void *page)
1986 if (ref != GRANT_INVALID_REF)
1987 gnttab_end_foreign_access(ref, page);
1990 /* ** Driver registration ** */
1991 static device_method_t netfront_methods[] = {
1992 /* Device interface */
1993 DEVMETHOD(device_probe, netfront_probe),
1994 DEVMETHOD(device_attach, netfront_attach),
1995 DEVMETHOD(device_detach, netfront_detach),
1996 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1997 DEVMETHOD(device_suspend, bus_generic_suspend),
1998 DEVMETHOD(device_resume, netfront_resume),
2000 /* Xenbus interface */
2001 DEVMETHOD(xenbus_backend_changed, netfront_backend_changed),
2006 static driver_t netfront_driver = {
2009 sizeof(struct netfront_info),
2011 devclass_t netfront_devclass;
2013 DRIVER_MODULE(xe, xenbus, netfront_driver, netfront_devclass, 0, 0);