3 * Copyright (c) 2004-2006 Kip Macy
7 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
8 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
9 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
10 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
11 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
13 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
14 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
16 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD$");
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/sockio.h>
27 #include <sys/malloc.h>
28 #include <sys/module.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/queue.h>
35 #include <net/if_arp.h>
36 #include <net/ethernet.h>
37 #include <net/if_dl.h>
38 #include <net/if_media.h>
42 #include <net/if_types.h>
45 #include <netinet/in_systm.h>
46 #include <netinet/in.h>
47 #include <netinet/ip.h>
48 #include <netinet/if_ether.h>
53 #include <machine/clock.h> /* for DELAY */
54 #include <machine/bus.h>
55 #include <machine/resource.h>
56 #include <machine/frame.h>
57 #include <machine/vmparam.h>
62 #include <machine/intr_machdep.h>
64 #include <machine/xen/xen-os.h>
65 #include <machine/xen/hypervisor.h>
66 #include <machine/xen/xen_intr.h>
67 #include <machine/xen/evtchn.h>
68 #include <xen/gnttab.h>
69 #include <xen/interface/memory.h>
70 #include <dev/xen/netfront/mbufq.h>
71 #include <machine/xen/features.h>
72 #include <xen/interface/io/netif.h>
73 #include <xen/xenbus/xenbusvar.h>
75 #include "xenbus_if.h"
77 #define GRANT_INVALID_REF 0
79 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
80 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
83 static int MODPARM_rx_copy = 0;
84 module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
85 MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
86 static int MODPARM_rx_flip = 0;
87 module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
88 MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
90 static const int MODPARM_rx_copy = 1;
91 static const int MODPARM_rx_flip = 0;
94 #define RX_COPY_THRESHOLD 256
96 #define net_ratelimit() 0
99 struct netfront_rx_info;
101 static void xn_txeof(struct netfront_info *);
102 static void xn_rxeof(struct netfront_info *);
103 static void network_alloc_rx_buffers(struct netfront_info *);
105 static void xn_tick_locked(struct netfront_info *);
106 static void xn_tick(void *);
108 static void xn_intr(void *);
109 static void xn_start_locked(struct ifnet *);
110 static void xn_start(struct ifnet *);
111 static int xn_ioctl(struct ifnet *, u_long, caddr_t);
112 static void xn_ifinit_locked(struct netfront_info *);
113 static void xn_ifinit(void *);
114 static void xn_stop(struct netfront_info *);
116 static void xn_watchdog(struct ifnet *);
119 static void show_device(struct netfront_info *sc);
121 static void netfront_closing(device_t dev);
123 static void netif_free(struct netfront_info *info);
124 static int netfront_detach(device_t dev);
126 static int talk_to_backend(device_t dev, struct netfront_info *info);
127 static int create_netdev(device_t dev);
128 static void netif_disconnect_backend(struct netfront_info *info);
129 static int setup_device(device_t dev, struct netfront_info *info);
130 static void end_access(int ref, void *page);
132 /* Xenolinux helper functions */
133 int network_connect(struct netfront_info *);
135 static void xn_free_rx_ring(struct netfront_info *);
137 static void xn_free_tx_ring(struct netfront_info *);
139 static int xennet_get_responses(struct netfront_info *np,
140 struct netfront_rx_info *rinfo, RING_IDX rp, struct mbuf **list,
141 int *pages_flipped_p);
143 #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
145 #define INVALID_P2M_ENTRY (~0UL)
148 * Mbuf pointers. We need these to keep track of the virtual addresses
149 * of our mbuf chains since we can only convert from virtual to physical,
150 * not the other way around. The size must track the free index arrays.
152 struct xn_chain_data {
153 struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1];
154 struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1];
158 struct net_device_stats
160 u_long rx_packets; /* total packets received */
161 u_long tx_packets; /* total packets transmitted */
162 u_long rx_bytes; /* total bytes received */
163 u_long tx_bytes; /* total bytes transmitted */
164 u_long rx_errors; /* bad packets received */
165 u_long tx_errors; /* packet transmit problems */
166 u_long rx_dropped; /* no space in linux buffers */
167 u_long tx_dropped; /* no space available in linux */
168 u_long multicast; /* multicast packets received */
171 /* detailed rx_errors: */
172 u_long rx_length_errors;
173 u_long rx_over_errors; /* receiver ring buff overflow */
174 u_long rx_crc_errors; /* recved pkt with crc error */
175 u_long rx_frame_errors; /* recv'd frame alignment error */
176 u_long rx_fifo_errors; /* recv'r fifo overrun */
177 u_long rx_missed_errors; /* receiver missed packet */
179 /* detailed tx_errors */
180 u_long tx_aborted_errors;
181 u_long tx_carrier_errors;
182 u_long tx_fifo_errors;
183 u_long tx_heartbeat_errors;
184 u_long tx_window_errors;
187 u_long rx_compressed;
188 u_long tx_compressed;
191 struct netfront_info {
193 struct ifnet *xn_ifp;
195 struct net_device_stats stats;
198 netif_tx_front_ring_t tx;
199 netif_rx_front_ring_t rx;
207 u_int copying_receiver;
210 /* Receive-ring batched refills. */
211 #define RX_MIN_TARGET 32
212 #define RX_MAX_TARGET NET_RX_RING_SIZE
213 int rx_min_target, rx_max_target, rx_target;
216 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
217 * array is an index into a chain of free entries.
220 grant_ref_t gref_tx_head;
221 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
222 grant_ref_t gref_rx_head;
223 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
225 #define TX_MAX_TARGET min(NET_RX_RING_SIZE, 256)
229 uint8_t mac[ETHER_ADDR_LEN];
230 struct xn_chain_data xn_cdata; /* mbufs */
231 struct mbuf_head xn_rx_batch; /* head of the batch queue */
234 struct callout xn_stat_ch;
236 u_long rx_pfn_array[NET_RX_RING_SIZE];
237 multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
238 mmu_update_t rx_mmu[NET_RX_RING_SIZE];
241 #define rx_mbufs xn_cdata.xn_rx_chain
242 #define tx_mbufs xn_cdata.xn_tx_chain
244 #define XN_LOCK_INIT(_sc, _name) \
245 mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \
246 mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \
247 sx_init(&(_sc)->sc_lock, #_name"_rx")
249 #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock)
250 #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock)
252 #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock)
253 #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock)
255 #define XN_LOCK(_sc) sx_xlock(&(_sc)->sc_lock);
256 #define XN_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_lock);
258 #define XN_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_lock, SX_LOCKED);
259 #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED);
260 #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED);
261 #define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \
262 mtx_destroy(&(_sc)->tx_lock); \
263 sx_destroy(&(_sc)->sc_lock);
265 struct netfront_rx_info {
266 struct netif_rx_response rx;
267 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
270 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
271 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
272 #define netfront_carrier_ok(netif) ((netif)->carrier)
274 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
279 * Access macros for acquiring freeing slots in tx_skbs[].
283 add_id_to_freelist(struct mbuf **list, unsigned short id)
286 list[0] = (void *)(u_long)id;
289 static inline unsigned short
290 get_id_from_freelist(struct mbuf **list)
292 u_int id = (u_int)(u_long)list[0];
298 xennet_rxidx(RING_IDX idx)
300 return idx & (NET_RX_RING_SIZE - 1);
303 static inline struct mbuf *
304 xennet_get_rx_mbuf(struct netfront_info *np,
307 int i = xennet_rxidx(ri);
311 np->rx_mbufs[i] = NULL;
315 static inline grant_ref_t
316 xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
318 int i = xennet_rxidx(ri);
319 grant_ref_t ref = np->grant_rx_ref[i];
320 np->grant_rx_ref[i] = GRANT_INVALID_REF;
327 #define IPRINTK(fmt, args...) \
328 printf("[XEN] " fmt, ##args)
329 #define WPRINTK(fmt, args...) \
330 printf("[XEN] " fmt, ##args)
331 #define DPRINTK(fmt, args...) \
332 printf("[XEN] %s: " fmt, __func__, ##args)
335 static __inline struct mbuf*
336 makembuf (struct mbuf *buf)
338 struct mbuf *m = NULL;
340 MGETHDR (m, M_DONTWAIT, MT_DATA);
345 M_MOVE_PKTHDR(m, buf);
347 m_cljget(m, M_DONTWAIT, MJUMPAGESIZE);
348 m->m_pkthdr.len = buf->m_pkthdr.len;
349 m->m_len = buf->m_len;
350 m_copydata(buf, 0, buf->m_pkthdr.len, mtod(m,caddr_t) );
352 m->m_ext.ext_arg1 = (caddr_t *)(uintptr_t)(vtophys(mtod(m,caddr_t)) >> PAGE_SHIFT);
358 * Read the 'mac' node at the given device's node in the store, and parse that
359 * as colon-separated octets, placing result the given mac array. mac must be
360 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
361 * Return 0 on success, or errno on error.
364 xen_net_read_mac(device_t dev, uint8_t mac[])
369 char *macstr = xenbus_read(XBT_NIL, xenbus_get_node(dev), "mac", NULL);
370 if (IS_ERR(macstr)) {
371 return PTR_ERR(macstr);
374 for (i = 0; i < ETHER_ADDR_LEN; i++) {
375 mac[i] = strtoul(s, &e, 16);
376 if (s == e || (e[0] != ':' && e[0] != 0)) {
377 free(macstr, M_DEVBUF);
382 free(macstr, M_DEVBUF);
387 * Entry point to this code when a new device is created. Allocate the basic
388 * structures and the ring buffers for communication with the backend, and
389 * inform the backend of the appropriate details for those. Switch to
393 netfront_probe(device_t dev)
396 if (!strcmp(xenbus_get_type(dev), "vif")) {
397 device_set_desc(dev, "Virtual Network Interface");
405 netfront_attach(device_t dev)
409 err = create_netdev(dev);
411 xenbus_dev_fatal(dev, err, "creating netdev");
420 * We are reconnecting to the backend, due to a suspend/resume, or a backend
421 * driver restart. We tear down our netif structure and recreate it, but
422 * leave the device-layer structures intact so that this is transparent to the
423 * rest of the kernel.
426 netfront_resume(device_t dev)
428 struct netfront_info *info = device_get_softc(dev);
430 DPRINTK("%s\n", xenbus_get_node(dev));
432 netif_disconnect_backend(info);
437 /* Common code used when first setting up, and when resuming. */
439 talk_to_backend(device_t dev, struct netfront_info *info)
442 struct xenbus_transaction xbt;
443 const char *node = xenbus_get_node(dev);
446 err = xen_net_read_mac(dev, info->mac);
448 xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
452 /* Create shared ring, alloc event channel. */
453 err = setup_device(dev, info);
458 err = xenbus_transaction_start(&xbt);
460 xenbus_dev_fatal(dev, err, "starting transaction");
463 err = xenbus_printf(xbt, node, "tx-ring-ref","%u",
466 message = "writing tx ring-ref";
467 goto abort_transaction;
469 err = xenbus_printf(xbt, node, "rx-ring-ref","%u",
472 message = "writing rx ring-ref";
473 goto abort_transaction;
475 err = xenbus_printf(xbt, node,
476 "event-channel", "%u", irq_to_evtchn_port(info->irq));
478 message = "writing event-channel";
479 goto abort_transaction;
481 err = xenbus_printf(xbt, node, "request-rx-copy", "%u",
482 info->copying_receiver);
484 message = "writing request-rx-copy";
485 goto abort_transaction;
487 err = xenbus_printf(xbt, node, "feature-rx-notify", "%d", 1);
489 message = "writing feature-rx-notify";
490 goto abort_transaction;
492 err = xenbus_printf(xbt, node, "feature-no-csum-offload", "%d", 1);
494 message = "writing feature-no-csum-offload";
495 goto abort_transaction;
497 err = xenbus_printf(xbt, node, "feature-sg", "%d", 1);
499 message = "writing feature-sg";
500 goto abort_transaction;
503 err = xenbus_printf(xbt, node, "feature-gso-tcpv4", "%d", 1);
505 message = "writing feature-gso-tcpv4";
506 goto abort_transaction;
510 err = xenbus_transaction_end(xbt, 0);
514 xenbus_dev_fatal(dev, err, "completing transaction");
521 xenbus_transaction_end(xbt, 1);
522 xenbus_dev_fatal(dev, err, "%s", message);
531 setup_device(device_t dev, struct netfront_info *info)
533 netif_tx_sring_t *txs;
534 netif_rx_sring_t *rxs;
540 info->tx_ring_ref = GRANT_INVALID_REF;
541 info->rx_ring_ref = GRANT_INVALID_REF;
542 info->rx.sring = NULL;
543 info->tx.sring = NULL;
546 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
549 xenbus_dev_fatal(dev, err, "allocating tx ring page");
552 SHARED_RING_INIT(txs);
553 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
554 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
557 info->tx_ring_ref = err;
559 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
562 xenbus_dev_fatal(dev, err, "allocating rx ring page");
565 SHARED_RING_INIT(rxs);
566 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
568 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
571 info->rx_ring_ref = err;
574 network_connect(info);
576 err = bind_listening_port_to_irqhandler(xenbus_get_otherend_id(dev),
577 "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, NULL);
580 xenbus_dev_fatal(dev, err,
581 "bind_evtchn_to_irqhandler failed");
596 * Callback received when the backend's state changes.
599 netfront_backend_changed(device_t dev, XenbusState newstate)
601 struct netfront_info *sc = device_get_softc(dev);
603 DPRINTK("newstate=%d\n", newstate);
606 case XenbusStateInitialising:
607 case XenbusStateInitialised:
608 case XenbusStateConnected:
609 case XenbusStateUnknown:
610 case XenbusStateClosed:
611 case XenbusStateReconfigured:
612 case XenbusStateReconfiguring:
614 case XenbusStateInitWait:
615 if (xenbus_get_state(dev) != XenbusStateInitialising)
617 if (network_connect(sc) != 0)
619 xenbus_set_state(dev, XenbusStateConnected);
621 (void)send_fake_arp(netdev);
624 case XenbusStateClosing:
625 xenbus_set_state(dev, XenbusStateClosed);
631 xn_free_rx_ring(struct netfront_info *sc)
636 for (i = 0; i < NET_RX_RING_SIZE; i++) {
637 if (sc->xn_cdata.xn_rx_chain[i] != NULL) {
638 m_freem(sc->xn_cdata.xn_rx_chain[i]);
639 sc->xn_cdata.xn_rx_chain[i] = NULL;
644 sc->xn_rx_if->req_prod = 0;
645 sc->xn_rx_if->event = sc->rx.rsp_cons ;
650 xn_free_tx_ring(struct netfront_info *sc)
655 for (i = 0; i < NET_TX_RING_SIZE; i++) {
656 if (sc->xn_cdata.xn_tx_chain[i] != NULL) {
657 m_freem(sc->xn_cdata.xn_tx_chain[i]);
658 sc->xn_cdata.xn_tx_chain[i] = NULL;
667 netfront_tx_slot_available(struct netfront_info *np)
669 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
670 (TX_MAX_TARGET - /* MAX_SKB_FRAGS */ 24 - 2));
673 netif_release_tx_bufs(struct netfront_info *np)
678 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
679 m = np->xn_cdata.xn_tx_chain[i];
681 if (((u_long)m) < KERNBASE)
683 gnttab_grant_foreign_access_ref(np->grant_tx_ref[i],
684 xenbus_get_otherend_id(np->xbdev),
685 virt_to_mfn(mtod(m, vm_offset_t)),
687 gnttab_release_grant_reference(&np->gref_tx_head,
688 np->grant_tx_ref[i]);
689 np->grant_tx_ref[i] = GRANT_INVALID_REF;
690 add_id_to_freelist(np->tx_mbufs, i);
696 network_alloc_rx_buffers(struct netfront_info *sc)
698 int otherend_id = xenbus_get_otherend_id(sc->xbdev);
701 int i, batch_target, notify;
703 struct xen_memory_reservation reservation;
706 netif_rx_request_t *req;
710 req_prod = sc->rx.req_prod_pvt;
712 if (unlikely(sc->carrier == 0))
716 * Allocate skbuffs greedily, even though we batch updates to the
717 * receive ring. This creates a less bursty demand on the memory
718 * allocator, so should reduce the chance of failed allocation
719 * requests both for ourself and for other kernel subsystems.
721 batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
722 for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
723 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
727 m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE);
728 if ((m_new->m_flags & M_EXT) == 0) {
739 m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
741 /* queue the mbufs allocated */
742 mbufq_tail(&sc->xn_rx_batch, m_new);
745 /* Is the batch large enough to be worthwhile? */
746 if (i < (sc->rx_target/2)) {
747 if (req_prod >sc->rx.sring->req_prod)
751 /* Adjust floating fill target if we risked running out of buffers. */
752 if ( ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) &&
753 ((sc->rx_target *= 2) > sc->rx_max_target) )
754 sc->rx_target = sc->rx_max_target;
757 for (nr_flips = i = 0; ; i++) {
758 if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
761 m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
762 vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
764 id = xennet_rxidx(req_prod + i);
766 KASSERT(sc->xn_cdata.xn_rx_chain[id] == NULL,
767 ("non-NULL xm_rx_chain"));
768 sc->xn_cdata.xn_rx_chain[id] = m_new;
770 ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
771 KASSERT((short)ref >= 0, ("negative ref"));
772 sc->grant_rx_ref[id] = ref;
774 vaddr = mtod(m_new, vm_offset_t);
775 pfn = vtophys(vaddr) >> PAGE_SHIFT;
776 req = RING_GET_REQUEST(&sc->rx, req_prod + i);
778 if (sc->copying_receiver == 0) {
779 gnttab_grant_foreign_transfer_ref(ref,
781 sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
782 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
783 /* Remove this page before passing
786 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
787 MULTI_update_va_mapping(&sc->rx_mcl[i],
792 gnttab_grant_foreign_access_ref(ref,
799 sc->rx_pfn_array[i] =
800 vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
803 KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
804 KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
806 * We may have allocated buffers which have entries outstanding
807 * in the page * update queue -- make sure we flush those first!
812 /* Tell the ballon driver what is going on. */
813 balloon_update_driver_allowance(i);
815 set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array);
816 reservation.nr_extents = i;
817 reservation.extent_order = 0;
818 reservation.address_bits = 0;
819 reservation.domid = DOMID_SELF;
821 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
823 /* After all PTEs have been zapped, flush the TLB. */
824 sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
825 UVMF_TLB_FLUSH|UVMF_ALL;
827 /* Give away a batch of pages. */
828 sc->rx_mcl[i].op = __HYPERVISOR_memory_op;
829 sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
830 sc->rx_mcl[i].args[1] = (u_long)&reservation;
831 /* Zap PTEs and give away pages in one big multicall. */
832 (void)HYPERVISOR_multicall(sc->rx_mcl, i+1);
834 /* Check return status of HYPERVISOR_dom_mem_op(). */
835 if (unlikely(sc->rx_mcl[i].result != i))
836 panic("Unable to reduce memory reservation\n");
838 if (HYPERVISOR_memory_op(
839 XENMEM_decrease_reservation, &reservation)
841 panic("Unable to reduce memory "
848 /* Above is a suitable barrier to ensure backend will see requests. */
849 sc->rx.req_prod_pvt = req_prod + i;
851 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
853 notify_remote_via_irq(sc->irq);
857 xn_rxeof(struct netfront_info *np)
860 struct netfront_rx_info rinfo;
861 struct netif_rx_response *rx = &rinfo.rx;
862 struct netif_extra_info *extras = rinfo.extras;
864 multicall_entry_t *mcl;
866 struct mbuf_head rxq, errq;
867 int err, pages_flipped = 0, work_to_do;
870 XN_RX_LOCK_ASSERT(np);
871 if (!netfront_carrier_ok(np))
879 rp = np->rx.sring->rsp_prod;
880 rmb(); /* Ensure we see queued responses up to 'rp'. */
884 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
885 memset(extras, 0, sizeof(rinfo.extras));
888 err = xennet_get_responses(np, &rinfo, rp, &m,
893 mbufq_tail(&errq, m);
894 np->stats.rx_errors++;
899 m->m_pkthdr.rcvif = ifp;
900 if ( rx->flags & NETRXF_data_validated ) {
901 /* Tell the stack the checksums are okay */
903 * XXX this isn't necessarily the case - need to add
907 m->m_pkthdr.csum_flags |=
908 (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
910 m->m_pkthdr.csum_data = 0xffff;
913 np->stats.rx_packets++;
914 np->stats.rx_bytes += m->m_pkthdr.len;
917 np->rx.rsp_cons = ++i;
921 /* Some pages are no longer absent... */
923 balloon_update_driver_allowance(-pages_flipped);
925 /* Do all the remapping work, and M->P updates, in one big
928 if (!!xen_feature(XENFEAT_auto_translated_physmap)) {
929 mcl = np->rx_mcl + pages_flipped;
930 mcl->op = __HYPERVISOR_mmu_update;
931 mcl->args[0] = (u_long)np->rx_mmu;
932 mcl->args[1] = pages_flipped;
934 mcl->args[3] = DOMID_SELF;
935 (void)HYPERVISOR_multicall(np->rx_mcl,
940 while ((m = mbufq_dequeue(&errq)))
944 * Process all the mbufs after the remapping is complete.
945 * Break the mbuf chain first though.
947 while ((m = mbufq_dequeue(&rxq)) != NULL) {
951 * Do we really need to drop the rx lock?
955 (*ifp->if_input)(ifp, m);
962 /* If we get a callback with very few responses, reduce fill target. */
963 /* NB. Note exponential increase, linear decrease. */
964 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
965 ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
966 np->rx_target = np->rx_min_target;
969 network_alloc_rx_buffers(np);
971 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
972 } while (work_to_do);
976 xn_txeof(struct netfront_info *np)
983 XN_TX_LOCK_ASSERT(np);
985 if (!netfront_carrier_ok(np))
992 prod = np->tx.sring->rsp_prod;
993 rmb(); /* Ensure we see responses up to 'rp'. */
995 for (i = np->tx.rsp_cons; i != prod; i++) {
996 id = RING_GET_RESPONSE(&np->tx, i)->id;
997 m = np->xn_cdata.xn_tx_chain[id];
1000 KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
1002 if (unlikely(gnttab_query_foreign_access(
1003 np->grant_tx_ref[id]) != 0)) {
1004 printf("network_tx_buf_gc: warning "
1005 "-- grant still in use by backend "
1009 gnttab_end_foreign_access_ref(
1010 np->grant_tx_ref[id]);
1011 gnttab_release_grant_reference(
1012 &np->gref_tx_head, np->grant_tx_ref[id]);
1013 np->grant_tx_ref[id] = GRANT_INVALID_REF;
1015 np->xn_cdata.xn_tx_chain[id] = NULL;
1016 add_id_to_freelist(np->xn_cdata.xn_tx_chain, id);
1019 np->tx.rsp_cons = prod;
1022 * Set a new event, then check for race with update of
1023 * tx_cons. Note that it is essential to schedule a
1024 * callback, no matter how few buffers are pending. Even if
1025 * there is space in the transmit ring, higher layers may
1026 * be blocked because too much data is outstanding: in such
1027 * cases notification from Xen is likely to be the only kick
1030 np->tx.sring->rsp_event =
1031 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
1035 } while (prod != np->tx.sring->rsp_prod);
1039 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1042 if (np->user_state == UST_OPEN)
1043 netif_wake_queue(dev);
1052 struct netfront_info *np = xsc;
1053 struct ifnet *ifp = np->xn_ifp;
1056 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
1057 likely(netfront_carrier_ok(np)) &&
1058 ifp->if_drv_flags & IFF_DRV_RUNNING))
1061 if (np->tx.rsp_cons != np->tx.sring->rsp_prod) {
1071 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1072 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1078 xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
1081 int new = xennet_rxidx(np->rx.req_prod_pvt);
1083 KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
1084 np->rx_mbufs[new] = m;
1085 np->grant_rx_ref[new] = ref;
1086 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1087 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1088 np->rx.req_prod_pvt++;
1092 xennet_get_extras(struct netfront_info *np,
1093 struct netif_extra_info *extras, RING_IDX rp)
1095 struct netif_extra_info *extra;
1096 RING_IDX cons = np->rx.rsp_cons;
1104 if (unlikely(cons + 1 == rp)) {
1106 if (net_ratelimit())
1107 WPRINTK("Missing extra info\n");
1113 extra = (struct netif_extra_info *)
1114 RING_GET_RESPONSE(&np->rx, ++cons);
1116 if (unlikely(!extra->type ||
1117 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1119 if (net_ratelimit())
1120 WPRINTK("Invalid extra type: %d\n",
1125 memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1128 m = xennet_get_rx_mbuf(np, cons);
1129 ref = xennet_get_rx_ref(np, cons);
1130 xennet_move_rx_slot(np, m, ref);
1131 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1133 np->rx.rsp_cons = cons;
1138 xennet_get_responses(struct netfront_info *np,
1139 struct netfront_rx_info *rinfo, RING_IDX rp,
1141 int *pages_flipped_p)
1143 int pages_flipped = *pages_flipped_p;
1144 struct mmu_update *mmu;
1145 struct multicall_entry *mcl;
1146 struct netif_rx_response *rx = &rinfo->rx;
1147 struct netif_extra_info *extras = rinfo->extras;
1148 RING_IDX cons = np->rx.rsp_cons;
1149 struct mbuf *m, *m0, *m_prev;
1150 grant_ref_t ref = xennet_get_rx_ref(np, cons);
1151 int max = 5 /* MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */;
1156 m0 = m = m_prev = xennet_get_rx_mbuf(np, cons);
1159 if (rx->flags & NETRXF_extra_info) {
1160 err = xennet_get_extras(np, extras, rp);
1161 cons = np->rx.rsp_cons;
1166 m0->m_pkthdr.len = 0;
1174 printf("rx->status=%hd rx->offset=%hu frags=%u\n",
1175 rx->status, rx->offset, frags);
1177 if (unlikely(rx->status < 0 ||
1178 rx->offset + rx->status > PAGE_SIZE)) {
1180 if (net_ratelimit())
1181 WPRINTK("rx->offset: %x, size: %u\n",
1182 rx->offset, rx->status);
1184 xennet_move_rx_slot(np, m, ref);
1190 * This definitely indicates a bug, either in this driver or in
1191 * the backend driver. In future this should flag the bad
1192 * situation to the system controller to reboot the backed.
1194 if (ref == GRANT_INVALID_REF) {
1196 if (net_ratelimit())
1197 WPRINTK("Bad rx response id %d.\n", rx->id);
1203 if (!np->copying_receiver) {
1204 /* Memory pressure, insufficient buffer
1207 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1208 if (net_ratelimit())
1209 WPRINTK("Unfulfilled rx req "
1210 "(id=%d, st=%d).\n",
1211 rx->id, rx->status);
1212 xennet_move_rx_slot(np, m, ref);
1217 if (!xen_feature( XENFEAT_auto_translated_physmap)) {
1218 /* Remap the page. */
1219 void *vaddr = mtod(m, void *);
1222 mcl = np->rx_mcl + pages_flipped;
1223 mmu = np->rx_mmu + pages_flipped;
1225 MULTI_update_va_mapping(mcl, (u_long)vaddr,
1226 (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW |
1227 PG_V | PG_M | PG_A, 0);
1228 pfn = (uint32_t)m->m_ext.ext_arg1;
1229 mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) |
1230 MMU_MACHPHYS_UPDATE;
1233 set_phys_to_machine(pfn, mfn);
1237 ret = gnttab_end_foreign_access_ref(ref);
1238 KASSERT(ret, ("ret != 0"));
1241 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1245 m->m_len = rx->status;
1246 m->m_data += rx->offset;
1247 m0->m_pkthdr.len += rx->status;
1250 if (!(rx->flags & NETRXF_more_data))
1253 if (cons + frags == rp) {
1254 if (net_ratelimit())
1255 WPRINTK("Need more frags\n");
1261 rx = RING_GET_RESPONSE(&np->rx, cons + frags);
1262 m = xennet_get_rx_mbuf(np, cons + frags);
1266 ref = xennet_get_rx_ref(np, cons + frags);
1271 if (unlikely(frags > max)) {
1272 if (net_ratelimit())
1273 WPRINTK("Too many frags\n");
1278 np->rx.rsp_cons = cons + frags;
1280 *pages_flipped_p = pages_flipped;
1286 xn_tick_locked(struct netfront_info *sc)
1288 XN_RX_LOCK_ASSERT(sc);
1289 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1291 /* XXX placeholder for printing debug information */
1299 struct netfront_info *sc;
1308 xn_start_locked(struct ifnet *ifp)
1312 struct mbuf *m_head, *new_m;
1313 struct netfront_info *sc;
1314 netif_tx_request_t *tx;
1317 u_long mfn, tx_bytes;
1321 otherend_id = xenbus_get_otherend_id(sc->xbdev);
1324 if (!netfront_carrier_ok(sc))
1327 for (i = sc->tx.req_prod_pvt; TRUE; i++) {
1328 IF_DEQUEUE(&ifp->if_snd, m_head);
1332 if (!netfront_tx_slot_available(sc)) {
1333 IF_PREPEND(&ifp->if_snd, m_head);
1334 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1338 id = get_id_from_freelist(sc->xn_cdata.xn_tx_chain);
1341 * Start packing the mbufs in this chain into
1342 * the fragment pointers. Stop when we run out
1343 * of fragments or hit the end of the mbuf chain.
1345 new_m = makembuf(m_head);
1346 tx = RING_GET_REQUEST(&sc->tx, i);
1348 ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
1349 KASSERT((short)ref >= 0, ("Negative ref"));
1350 mfn = virt_to_mfn(mtod(new_m, vm_offset_t));
1351 gnttab_grant_foreign_access_ref(ref, otherend_id,
1352 mfn, GNTMAP_readonly);
1353 tx->gref = sc->grant_tx_ref[id] = ref;
1354 tx->size = new_m->m_pkthdr.len;
1356 tx->flags = (skb->ip_summed == CHECKSUM_HW) ? NETTXF_csum_blank : 0;
1359 new_m->m_next = NULL;
1360 new_m->m_nextpkt = NULL;
1364 sc->xn_cdata.xn_tx_chain[id] = new_m;
1365 BPF_MTAP(ifp, new_m);
1367 sc->stats.tx_bytes += new_m->m_pkthdr.len;
1368 sc->stats.tx_packets++;
1371 sc->tx.req_prod_pvt = i;
1372 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
1374 notify_remote_via_irq(sc->irq);
1378 if (RING_FULL(&sc->tx)) {
1381 netif_stop_queue(dev);
1389 xn_start(struct ifnet *ifp)
1391 struct netfront_info *sc;
1394 xn_start_locked(ifp);
1398 /* equivalent of network_open() in Linux */
1400 xn_ifinit_locked(struct netfront_info *sc)
1408 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1413 network_alloc_rx_buffers(sc);
1414 sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
1416 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1417 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1419 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1425 xn_ifinit(void *xsc)
1427 struct netfront_info *sc = xsc;
1430 xn_ifinit_locked(sc);
1437 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1439 struct netfront_info *sc = ifp->if_softc;
1440 struct ifreq *ifr = (struct ifreq *) data;
1441 struct ifaddr *ifa = (struct ifaddr *)data;
1443 int mask, error = 0;
1448 if (ifa->ifa_addr->sa_family == AF_INET) {
1449 ifp->if_flags |= IFF_UP;
1450 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1451 xn_ifinit_locked(sc);
1452 arp_ifinit(ifp, ifa);
1456 error = ether_ioctl(ifp, cmd, data);
1460 /* XXX can we alter the MTU on a VN ?*/
1462 if (ifr->ifr_mtu > XN_JUMBO_MTU)
1467 ifp->if_mtu = ifr->ifr_mtu;
1468 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1474 if (ifp->if_flags & IFF_UP) {
1476 * If only the state of the PROMISC flag changed,
1477 * then just use the 'set promisc mode' command
1478 * instead of reinitializing the entire NIC. Doing
1479 * a full re-init means reloading the firmware and
1480 * waiting for it to start up, which may take a
1484 /* No promiscuous mode with Xen */
1485 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1486 ifp->if_flags & IFF_PROMISC &&
1487 !(sc->xn_if_flags & IFF_PROMISC)) {
1488 XN_SETBIT(sc, XN_RX_MODE,
1489 XN_RXMODE_RX_PROMISC);
1490 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1491 !(ifp->if_flags & IFF_PROMISC) &&
1492 sc->xn_if_flags & IFF_PROMISC) {
1493 XN_CLRBIT(sc, XN_RX_MODE,
1494 XN_RXMODE_RX_PROMISC);
1497 xn_ifinit_locked(sc);
1499 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1503 sc->xn_if_flags = ifp->if_flags;
1508 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1509 if (mask & IFCAP_HWCSUM) {
1510 if (IFCAP_HWCSUM & ifp->if_capenable)
1511 ifp->if_capenable &= ~IFCAP_HWCSUM;
1513 ifp->if_capenable |= IFCAP_HWCSUM;
1520 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1533 error = ether_ioctl(ifp, cmd, data);
1540 xn_stop(struct netfront_info *sc)
1548 callout_stop(&sc->xn_stat_ch);
1550 xn_free_rx_ring(sc);
1551 xn_free_tx_ring(sc);
1553 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1556 /* START of Xenolinux helper functions adapted to FreeBSD */
1558 network_connect(struct netfront_info *np)
1560 int i, requeue_idx, err;
1562 netif_rx_request_t *req;
1563 u_int feature_rx_copy, feature_rx_flip;
1565 err = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev),
1566 "feature-rx-copy", "%u", &feature_rx_copy);
1568 feature_rx_copy = 0;
1569 err = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev),
1570 "feature-rx-flip", "%u", &feature_rx_flip);
1572 feature_rx_flip = 1;
1575 * Copy packets on receive path if:
1576 * (a) This was requested by user, and the backend supports it; or
1577 * (b) Flipping was requested, but this is unsupported by the backend.
1579 np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
1580 (MODPARM_rx_flip && !feature_rx_flip));
1583 /* Recovery procedure: */
1584 err = talk_to_backend(np->xbdev, np);
1588 /* Step 1: Reinitialise variables. */
1589 netif_release_tx_bufs(np);
1591 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1592 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1595 if (np->rx_mbufs[i] == NULL)
1598 m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
1599 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1600 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1602 if (!np->copying_receiver) {
1603 gnttab_grant_foreign_transfer_ref(ref,
1604 xenbus_get_otherend_id(np->xbdev),
1605 vtophys(mtod(m, vm_offset_t)));
1607 gnttab_grant_foreign_access_ref(ref,
1608 xenbus_get_otherend_id(np->xbdev),
1609 vtophys(mtod(m, vm_offset_t)), 0);
1612 req->id = requeue_idx;
1617 np->rx.req_prod_pvt = requeue_idx;
1619 /* Step 3: All public and private state should now be sane. Get
1620 * ready to start sending and receiving packets and give the driver
1621 * domain a kick because we've probably just requeued some
1624 netfront_carrier_on(np);
1625 notify_remote_via_irq(np->irq);
1629 network_alloc_rx_buffers(np);
1636 show_device(struct netfront_info *sc)
1640 IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
1642 be_state_name[sc->xn_backend_state],
1643 sc->xn_user_state ? "open" : "closed",
1649 IPRINTK("<vif NULL>\n");
1654 /** Create a network device.
1655 * @param handle device handle
1658 create_netdev(device_t dev)
1661 struct netfront_info *np;
1665 np = device_get_softc(dev);
1669 XN_LOCK_INIT(np, xennetif);
1670 np->rx_target = RX_MIN_TARGET;
1671 np->rx_min_target = RX_MIN_TARGET;
1672 np->rx_max_target = RX_MAX_TARGET;
1674 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
1675 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
1676 np->tx_mbufs[i] = (void *) ((u_long) i+1);
1677 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1679 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
1680 np->rx_mbufs[i] = NULL;
1681 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1683 /* A grant for every tx ring slot */
1684 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1685 &np->gref_tx_head) < 0) {
1686 printf("#### netfront can't alloc tx grant refs\n");
1690 /* A grant for every rx ring slot */
1691 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1692 &np->gref_rx_head) < 0) {
1693 printf("#### netfront can't alloc rx grant refs\n");
1694 gnttab_free_grant_references(np->gref_tx_head);
1699 err = xen_net_read_mac(dev, np->mac);
1701 xenbus_dev_fatal(dev, err, "parsing %s/mac",
1702 xenbus_get_node(dev));
1706 /* Set up ifnet structure */
1707 ifp = np->xn_ifp = if_alloc(IFT_ETHER);
1709 if_initname(ifp, "xn", device_get_unit(dev));
1710 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX;
1711 ifp->if_ioctl = xn_ioctl;
1712 ifp->if_output = ether_output;
1713 ifp->if_start = xn_start;
1715 ifp->if_watchdog = xn_watchdog;
1717 ifp->if_init = xn_ifinit;
1718 ifp->if_mtu = ETHERMTU;
1719 ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
1722 ifp->if_hwassist = XN_CSUM_FEATURES;
1723 ifp->if_capabilities = IFCAP_HWCSUM;
1724 ifp->if_capenable = ifp->if_capabilities;
1727 ether_ifattach(ifp, np->mac);
1728 callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE);
1729 netfront_carrier_off(np);
1734 gnttab_free_grant_references(np->gref_tx_head);
1736 panic("do something smart");
1741 * Handle the change of state of the backend to Closing. We must delete our
1742 * device-layer structures now, to ensure that writes are flushed through to
1743 * the backend. Once is this done, we can switch to Closed in
1747 static void netfront_closing(device_t dev)
1750 struct netfront_info *info = dev->dev_driver_data;
1752 DPRINTK("netfront_closing: %s removed\n", dev->nodename);
1756 xenbus_switch_state(dev, XenbusStateClosed);
1760 static int netfront_detach(device_t dev)
1762 struct netfront_info *info = device_get_softc(dev);
1764 DPRINTK("%s\n", xenbus_get_node(dev));
1772 static void netif_free(struct netfront_info *info)
1774 netif_disconnect_backend(info);
1782 static void netif_disconnect_backend(struct netfront_info *info)
1785 end_access(info->tx_ring_ref, info->tx.sring);
1786 end_access(info->rx_ring_ref, info->rx.sring);
1787 info->tx_ring_ref = GRANT_INVALID_REF;
1788 info->rx_ring_ref = GRANT_INVALID_REF;
1789 info->tx.sring = NULL;
1790 info->rx.sring = NULL;
1794 unbind_from_irqhandler(info->irq, info->netdev);
1802 static void end_access(int ref, void *page)
1804 if (ref != GRANT_INVALID_REF)
1805 gnttab_end_foreign_access(ref, page);
1808 /* ** Driver registration ** */
1809 static device_method_t netfront_methods[] = {
1810 /* Device interface */
1811 DEVMETHOD(device_probe, netfront_probe),
1812 DEVMETHOD(device_attach, netfront_attach),
1813 DEVMETHOD(device_detach, netfront_detach),
1814 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1815 DEVMETHOD(device_suspend, bus_generic_suspend),
1816 DEVMETHOD(device_resume, netfront_resume),
1818 /* Xenbus interface */
1819 DEVMETHOD(xenbus_backend_changed, netfront_backend_changed),
1824 static driver_t netfront_driver = {
1827 sizeof(struct netfront_info),
1829 devclass_t netfront_devclass;
1831 DRIVER_MODULE(xe, xenbus, netfront_driver, netfront_devclass, 0, 0);