3 * Copyright (c) 2004-2006 Kip Macy
7 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
8 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
9 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
10 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
11 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
13 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
14 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
16 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD$");
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/sockio.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/socket.h>
30 #include <sys/queue.h>
34 #include <net/if_arp.h>
35 #include <net/ethernet.h>
36 #include <net/if_dl.h>
37 #include <net/if_media.h>
41 #include <net/if_types.h>
44 #include <netinet/in_systm.h>
45 #include <netinet/in.h>
46 #include <netinet/ip.h>
47 #include <netinet/if_ether.h>
52 #include <machine/clock.h> /* for DELAY */
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <machine/frame.h>
61 #include <machine/intr_machdep.h>
63 #include <machine/xen/xen-os.h>
64 #include <machine/xen/hypervisor.h>
65 #include <machine/xen/xen_intr.h>
66 #include <machine/xen/evtchn.h>
67 #include <machine/xen/xenbus.h>
68 #include <xen/gnttab.h>
69 #include <xen/interface/memory.h>
70 #include <dev/xen/netfront/mbufq.h>
71 #include <machine/xen/features.h>
72 #include <xen/interface/io/netif.h>
75 #define GRANT_INVALID_REF 0
77 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
78 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
81 static int MODPARM_rx_copy = 0;
82 module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
83 MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
84 static int MODPARM_rx_flip = 0;
85 module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
86 MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
88 static const int MODPARM_rx_copy = 1;
89 static const int MODPARM_rx_flip = 0;
92 #define RX_COPY_THRESHOLD 256
94 #define net_ratelimit() 0
97 struct netfront_rx_info;
99 static void xn_txeof(struct netfront_info *);
100 static void xn_rxeof(struct netfront_info *);
101 static void network_alloc_rx_buffers(struct netfront_info *);
103 static void xn_tick_locked(struct netfront_info *);
104 static void xn_tick(void *);
106 static void xn_intr(void *);
107 static void xn_start_locked(struct ifnet *);
108 static void xn_start(struct ifnet *);
109 static int xn_ioctl(struct ifnet *, u_long, caddr_t);
110 static void xn_ifinit_locked(struct netfront_info *);
111 static void xn_ifinit(void *);
112 static void xn_stop(struct netfront_info *);
114 static void xn_watchdog(struct ifnet *);
117 static void show_device(struct netfront_info *sc);
119 static void netfront_closing(struct xenbus_device *dev);
121 static void netif_free(struct netfront_info *info);
122 static int netfront_remove(struct xenbus_device *dev);
124 static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info);
125 static int create_netdev(struct xenbus_device *dev, struct ifnet **ifp);
126 static void netif_disconnect_backend(struct netfront_info *info);
127 static int setup_device(struct xenbus_device *dev, struct netfront_info *info);
128 static void end_access(int ref, void *page);
130 /* Xenolinux helper functions */
131 static int network_connect(struct ifnet *ifp);
133 static void xn_free_rx_ring(struct netfront_info *);
135 static void xn_free_tx_ring(struct netfront_info *);
137 static int xennet_get_responses(struct netfront_info *np,
138 struct netfront_rx_info *rinfo, RING_IDX rp, struct mbuf_head *list,
139 int *pages_flipped_p);
141 #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
143 #define INVALID_P2M_ENTRY (~0UL)
146 * Mbuf pointers. We need these to keep track of the virtual addresses
147 * of our mbuf chains since we can only convert from virtual to physical,
148 * not the other way around. The size must track the free index arrays.
150 struct xn_chain_data {
151 struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1];
152 struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1];
156 struct net_device_stats
158 u_long rx_packets; /* total packets received */
159 u_long tx_packets; /* total packets transmitted */
160 u_long rx_bytes; /* total bytes received */
161 u_long tx_bytes; /* total bytes transmitted */
162 u_long rx_errors; /* bad packets received */
163 u_long tx_errors; /* packet transmit problems */
164 u_long rx_dropped; /* no space in linux buffers */
165 u_long tx_dropped; /* no space available in linux */
166 u_long multicast; /* multicast packets received */
169 /* detailed rx_errors: */
170 u_long rx_length_errors;
171 u_long rx_over_errors; /* receiver ring buff overflow */
172 u_long rx_crc_errors; /* recved pkt with crc error */
173 u_long rx_frame_errors; /* recv'd frame alignment error */
174 u_long rx_fifo_errors; /* recv'r fifo overrun */
175 u_long rx_missed_errors; /* receiver missed packet */
177 /* detailed tx_errors */
178 u_long tx_aborted_errors;
179 u_long tx_carrier_errors;
180 u_long tx_fifo_errors;
181 u_long tx_heartbeat_errors;
182 u_long tx_window_errors;
185 u_long rx_compressed;
186 u_long tx_compressed;
189 struct netfront_info {
191 struct ifnet *xn_ifp;
193 struct net_device_stats stats;
196 netif_tx_front_ring_t tx;
197 netif_rx_front_ring_t rx;
205 u_int copying_receiver;
208 /* Receive-ring batched refills. */
209 #define RX_MIN_TARGET 32
210 #define RX_MAX_TARGET NET_RX_RING_SIZE
211 int rx_min_target, rx_max_target, rx_target;
214 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
215 * array is an index into a chain of free entries.
218 grant_ref_t gref_tx_head;
219 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
220 grant_ref_t gref_rx_head;
221 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
223 #define TX_MAX_TARGET min(NET_RX_RING_SIZE, 256)
224 struct xenbus_device *xbdev;
227 uint8_t mac[ETHER_ADDR_LEN];
228 struct xn_chain_data xn_cdata; /* mbufs */
229 struct mbuf_head xn_rx_batch; /* head of the batch queue */
232 struct callout xn_stat_ch;
234 u_long rx_pfn_array[NET_RX_RING_SIZE];
235 multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
236 mmu_update_t rx_mmu[NET_RX_RING_SIZE];
239 #define rx_mbufs xn_cdata.xn_rx_chain
240 #define tx_mbufs xn_cdata.xn_tx_chain
242 #define XN_LOCK_INIT(_sc, _name) \
243 mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \
244 mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \
245 sx_init(&(_sc)->sc_lock, #_name"_rx")
247 #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock)
248 #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock)
250 #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock)
251 #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock)
253 #define XN_LOCK(_sc) sx_xlock(&(_sc)->sc_lock);
254 #define XN_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_lock);
256 #define XN_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_lock, SX_LOCKED);
257 #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED);
258 #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED);
259 #define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \
260 mtx_destroy(&(_sc)->tx_lock); \
261 sx_destroy(&(_sc)->sc_lock);
263 struct netfront_rx_info {
264 struct netif_rx_response rx;
265 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
268 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
269 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
270 #define netfront_carrier_ok(netif) ((netif)->carrier)
272 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
277 * Access macros for acquiring freeing slots in tx_skbs[].
281 add_id_to_freelist(struct mbuf **list, unsigned short id)
284 list[0] = (void *)(u_long)id;
287 static inline unsigned short
288 get_id_from_freelist(struct mbuf **list)
290 u_int id = (u_int)(u_long)list[0];
296 xennet_rxidx(RING_IDX idx)
298 return idx & (NET_RX_RING_SIZE - 1);
301 static inline struct mbuf *
302 xennet_get_rx_mbuf(struct netfront_info *np,
305 int i = xennet_rxidx(ri);
309 np->rx_mbufs[i] = NULL;
313 static inline grant_ref_t
314 xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
316 int i = xennet_rxidx(ri);
317 grant_ref_t ref = np->grant_rx_ref[i];
318 np->grant_rx_ref[i] = GRANT_INVALID_REF;
325 #define IPRINTK(fmt, args...) \
326 printf("[XEN] " fmt, ##args)
327 #define WPRINTK(fmt, args...) \
328 printf("[XEN] " fmt, ##args)
329 #define DPRINTK(fmt, args...) \
330 printf("[XEN] " fmt, ##args)
333 static __inline struct mbuf*
334 makembuf (struct mbuf *buf)
336 struct mbuf *m = NULL;
338 MGETHDR (m, M_DONTWAIT, MT_DATA);
343 M_MOVE_PKTHDR(m, buf);
345 m_cljget(m, M_DONTWAIT, MJUMPAGESIZE);
346 m->m_pkthdr.len = buf->m_pkthdr.len;
347 m->m_len = buf->m_len;
348 m_copydata(buf, 0, buf->m_pkthdr.len, mtod(m,caddr_t) );
350 m->m_ext.ext_arg1 = (caddr_t *)(uintptr_t)(vtophys(mtod(m,caddr_t)) >> PAGE_SHIFT);
356 * Read the 'mac' node at the given device's node in the store, and parse that
357 * as colon-separated octets, placing result the given mac array. mac must be
358 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
359 * Return 0 on success, or errno on error.
362 xen_net_read_mac(struct xenbus_device *dev, uint8_t mac[])
367 char *macstr = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
368 if (IS_ERR(macstr)) {
369 return PTR_ERR(macstr);
372 for (i = 0; i < ETHER_ADDR_LEN; i++) {
373 mac[i] = strtoul(s, &e, 16);
374 if (s == e || (e[0] != ':' && e[0] != 0)) {
375 free(macstr, M_DEVBUF);
380 free(macstr, M_DEVBUF);
385 * Entry point to this code when a new device is created. Allocate the basic
386 * structures and the ring buffers for communication with the backend, and
387 * inform the backend of the appropriate details for those. Switch to
391 netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id)
395 struct netfront_info *info;
397 printf("netfront_probe() \n");
399 err = create_netdev(dev, &ifp);
401 xenbus_dev_fatal(dev, err, "creating netdev");
405 info = ifp->if_softc;
406 dev->dev_driver_data = info;
414 * We are reconnecting to the backend, due to a suspend/resume, or a backend
415 * driver restart. We tear down our netif structure and recreate it, but
416 * leave the device-layer structures intact so that this is transparent to the
417 * rest of the kernel.
420 netfront_resume(struct xenbus_device *dev)
422 struct netfront_info *info = dev->dev_driver_data;
424 DPRINTK("%s\n", dev->nodename);
426 netif_disconnect_backend(info);
431 /* Common code used when first setting up, and when resuming. */
433 talk_to_backend(struct xenbus_device *dev, struct netfront_info *info)
436 struct xenbus_transaction xbt;
439 err = xen_net_read_mac(dev, info->mac);
441 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
445 /* Create shared ring, alloc event channel. */
446 err = setup_device(dev, info);
451 err = xenbus_transaction_start(&xbt);
453 xenbus_dev_fatal(dev, err, "starting transaction");
456 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
459 message = "writing tx ring-ref";
460 goto abort_transaction;
462 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
465 message = "writing rx ring-ref";
466 goto abort_transaction;
468 err = xenbus_printf(xbt, dev->nodename,
469 "event-channel", "%u", irq_to_evtchn_port(info->irq));
471 message = "writing event-channel";
472 goto abort_transaction;
474 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
475 info->copying_receiver);
477 message = "writing request-rx-copy";
478 goto abort_transaction;
480 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
482 message = "writing feature-rx-notify";
483 goto abort_transaction;
485 err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", 1);
487 message = "writing feature-no-csum-offload";
488 goto abort_transaction;
490 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
492 message = "writing feature-sg";
493 goto abort_transaction;
496 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
498 message = "writing feature-gso-tcpv4";
499 goto abort_transaction;
503 err = xenbus_transaction_end(xbt, 0);
507 xenbus_dev_fatal(dev, err, "completing transaction");
514 xenbus_transaction_end(xbt, 1);
515 xenbus_dev_fatal(dev, err, "%s", message);
524 setup_device(struct xenbus_device *dev, struct netfront_info *info)
526 netif_tx_sring_t *txs;
527 netif_rx_sring_t *rxs;
533 info->tx_ring_ref = GRANT_INVALID_REF;
534 info->rx_ring_ref = GRANT_INVALID_REF;
535 info->rx.sring = NULL;
536 info->tx.sring = NULL;
539 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
542 xenbus_dev_fatal(dev, err, "allocating tx ring page");
545 SHARED_RING_INIT(txs);
546 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
547 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
550 info->tx_ring_ref = err;
552 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
555 xenbus_dev_fatal(dev, err, "allocating rx ring page");
558 SHARED_RING_INIT(rxs);
559 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
561 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
564 info->rx_ring_ref = err;
567 network_connect(ifp);
569 err = bind_listening_port_to_irqhandler(dev->otherend_id,
570 "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, NULL);
573 xenbus_dev_fatal(dev, err,
574 "bind_evtchn_to_irqhandler failed");
589 * Callback received when the backend's state changes.
592 backend_changed(struct xenbus_device *dev,
593 XenbusState backend_state)
595 struct netfront_info *sc = dev->dev_driver_data;
599 switch (backend_state) {
600 case XenbusStateInitialising:
601 case XenbusStateInitialised:
602 case XenbusStateConnected:
603 case XenbusStateUnknown:
604 case XenbusStateClosed:
606 case XenbusStateInitWait:
607 if (dev->state != XenbusStateInitialising)
609 if (network_connect(sc->xn_ifp) != 0)
611 xenbus_switch_state(dev, XenbusStateConnected);
613 (void)send_fake_arp(netdev);
616 case XenbusStateClosing:
617 xenbus_frontend_closed(dev);
623 xn_free_rx_ring(struct netfront_info *sc)
628 for (i = 0; i < NET_RX_RING_SIZE; i++) {
629 if (sc->xn_cdata.xn_rx_chain[i] != NULL) {
630 m_freem(sc->xn_cdata.xn_rx_chain[i]);
631 sc->xn_cdata.xn_rx_chain[i] = NULL;
636 sc->xn_rx_if->req_prod = 0;
637 sc->xn_rx_if->event = sc->rx.rsp_cons ;
642 xn_free_tx_ring(struct netfront_info *sc)
647 for (i = 0; i < NET_TX_RING_SIZE; i++) {
648 if (sc->xn_cdata.xn_tx_chain[i] != NULL) {
649 m_freem(sc->xn_cdata.xn_tx_chain[i]);
650 sc->xn_cdata.xn_tx_chain[i] = NULL;
659 netfront_tx_slot_available(struct netfront_info *np)
661 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
662 (TX_MAX_TARGET - /* MAX_SKB_FRAGS */ 24 - 2));
665 netif_release_tx_bufs(struct netfront_info *np)
670 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
671 m = np->xn_cdata.xn_tx_chain[i];
673 if (((u_long)m) < KERNBASE)
675 gnttab_grant_foreign_access_ref(np->grant_tx_ref[i],
676 np->xbdev->otherend_id, virt_to_mfn(mtod(m, vm_offset_t)),
678 gnttab_release_grant_reference(&np->gref_tx_head,
679 np->grant_tx_ref[i]);
680 np->grant_tx_ref[i] = GRANT_INVALID_REF;
681 add_id_to_freelist(np->tx_mbufs, i);
687 network_alloc_rx_buffers(struct netfront_info *sc)
691 int i, batch_target, notify;
693 struct xen_memory_reservation reservation;
696 netif_rx_request_t *req;
700 req_prod = sc->rx.req_prod_pvt;
702 if (unlikely(sc->carrier == 0))
706 * Allocate skbuffs greedily, even though we batch updates to the
707 * receive ring. This creates a less bursty demand on the memory
708 * allocator, so should reduce the chance of failed allocation
709 * requests both for ourself and for other kernel subsystems.
711 batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
712 for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
713 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
717 m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE);
718 if ((m_new->m_flags & M_EXT) == 0) {
729 m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
731 /* queue the mbufs allocated */
732 mbufq_tail(&sc->xn_rx_batch, m_new);
735 /* Is the batch large enough to be worthwhile? */
736 if (i < (sc->rx_target/2)) {
737 if (req_prod >sc->rx.sring->req_prod)
741 /* Adjust floating fill target if we risked running out of buffers. */
742 if ( ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) &&
743 ((sc->rx_target *= 2) > sc->rx_max_target) )
744 sc->rx_target = sc->rx_max_target;
747 for (nr_flips = i = 0; ; i++) {
748 if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
751 m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
752 vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
754 id = xennet_rxidx(req_prod + i);
756 KASSERT(sc->xn_cdata.xn_rx_chain[id] == NULL,
757 ("non-NULL xm_rx_chain"));
758 sc->xn_cdata.xn_rx_chain[id] = m_new;
760 ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
761 KASSERT((short)ref >= 0, ("negative ref"));
762 sc->grant_rx_ref[id] = ref;
764 vaddr = mtod(m_new, vm_offset_t);
765 pfn = vtophys(vaddr) >> PAGE_SHIFT;
766 req = RING_GET_REQUEST(&sc->rx, req_prod + i);
768 if (sc->copying_receiver == 0) {
769 gnttab_grant_foreign_transfer_ref(ref,
770 sc->xbdev->otherend_id, pfn);
771 sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
772 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
773 /* Remove this page before passing
776 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
777 MULTI_update_va_mapping(&sc->rx_mcl[i],
782 gnttab_grant_foreign_access_ref(ref,
783 sc->xbdev->otherend_id,
789 sc->rx_pfn_array[i] =
790 vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
793 KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
794 KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
796 * We may have allocated buffers which have entries outstanding
797 * in the page * update queue -- make sure we flush those first!
802 /* Tell the ballon driver what is going on. */
803 balloon_update_driver_allowance(i);
805 set_xen_guest_handle(reservation.extent_start,sc->rx_pfn_array);
806 reservation.nr_extents = i;
807 reservation.extent_order = 0;
808 reservation.address_bits = 0;
809 reservation.domid = DOMID_SELF;
811 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
813 /* After all PTEs have been zapped, flush the TLB. */
814 sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
815 UVMF_TLB_FLUSH|UVMF_ALL;
817 /* Give away a batch of pages. */
818 sc->rx_mcl[i].op = __HYPERVISOR_memory_op;
819 sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
820 sc->rx_mcl[i].args[1] = (u_long)&reservation;
821 /* Zap PTEs and give away pages in one big multicall. */
822 (void)HYPERVISOR_multicall(sc->rx_mcl, i+1);
824 /* Check return status of HYPERVISOR_dom_mem_op(). */
825 if (unlikely(sc->rx_mcl[i].result != i))
826 panic("Unable to reduce memory reservation\n");
828 if (HYPERVISOR_memory_op(
829 XENMEM_decrease_reservation, &reservation)
831 panic("Unable to reduce memory "
838 /* Above is a suitable barrier to ensure backend will see requests. */
839 sc->rx.req_prod_pvt = req_prod + i;
841 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
843 notify_remote_via_irq(sc->irq);
847 xn_rxeof(struct netfront_info *np)
850 struct netfront_rx_info rinfo;
851 struct netif_rx_response *rx = &rinfo.rx;
852 struct netif_extra_info *extras = rinfo.extras;
854 multicall_entry_t *mcl;
856 struct mbuf_head rxq, errq, tmpq;
857 int err, pages_flipped = 0;
859 XN_RX_LOCK_ASSERT(np);
860 if (!netfront_carrier_ok(np))
869 rp = np->rx.sring->rsp_prod;
870 rmb(); /* Ensure we see queued responses up to 'rp'. */
874 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
875 memset(extras, 0, sizeof(rinfo.extras));
877 err = xennet_get_responses(np, &rinfo, rp, &tmpq,
881 while ((m = mbufq_dequeue(&tmpq)))
882 mbufq_tail(&errq, m);
883 np->stats.rx_errors++;
888 m = mbufq_dequeue(&tmpq);
890 m->m_data += rx->offset;/* (rx->addr & PAGE_MASK); */
891 m->m_pkthdr.len = m->m_len = rx->status;
892 m->m_pkthdr.rcvif = ifp;
894 if ( rx->flags & NETRXF_data_validated ) {
895 /* Tell the stack the checksums are okay */
897 * XXX this isn't necessarily the case - need to add
901 m->m_pkthdr.csum_flags |=
902 (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
904 m->m_pkthdr.csum_data = 0xffff;
907 np->stats.rx_packets++;
908 np->stats.rx_bytes += rx->status;
911 np->rx.rsp_cons = ++i;
915 /* Some pages are no longer absent... */
917 balloon_update_driver_allowance(-pages_flipped);
919 /* Do all the remapping work, and M->P updates, in one big
922 if (!!xen_feature(XENFEAT_auto_translated_physmap)) {
923 mcl = np->rx_mcl + pages_flipped;
924 mcl->op = __HYPERVISOR_mmu_update;
925 mcl->args[0] = (u_long)np->rx_mmu;
926 mcl->args[1] = pages_flipped;
928 mcl->args[3] = DOMID_SELF;
929 (void)HYPERVISOR_multicall(np->rx_mcl,
934 while ((m = mbufq_dequeue(&errq)))
938 * Process all the mbufs after the remapping is complete.
939 * Break the mbuf chain first though.
941 while ((m = mbufq_dequeue(&rxq)) != NULL) {
945 * Do we really need to drop the rx lock?
949 (*ifp->if_input)(ifp, m);
956 /* If we get a callback with very few responses, reduce fill target. */
957 /* NB. Note exponential increase, linear decrease. */
958 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
959 ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
960 np->rx_target = np->rx_min_target;
963 network_alloc_rx_buffers(np);
965 np->rx.sring->rsp_event = i + 1;
969 xn_txeof(struct netfront_info *np)
976 XN_TX_LOCK_ASSERT(np);
978 if (!netfront_carrier_ok(np))
985 prod = np->tx.sring->rsp_prod;
986 rmb(); /* Ensure we see responses up to 'rp'. */
988 for (i = np->tx.rsp_cons; i != prod; i++) {
989 id = RING_GET_RESPONSE(&np->tx, i)->id;
990 m = np->xn_cdata.xn_tx_chain[id];
993 KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
995 if (unlikely(gnttab_query_foreign_access(
996 np->grant_tx_ref[id]) != 0)) {
997 printf("network_tx_buf_gc: warning "
998 "-- grant still in use by backend "
1002 gnttab_end_foreign_access_ref(
1003 np->grant_tx_ref[id], GNTMAP_readonly);
1004 gnttab_release_grant_reference(
1005 &np->gref_tx_head, np->grant_tx_ref[id]);
1006 np->grant_tx_ref[id] = GRANT_INVALID_REF;
1008 np->xn_cdata.xn_tx_chain[id] = NULL;
1009 add_id_to_freelist(np->xn_cdata.xn_tx_chain, id);
1012 np->tx.rsp_cons = prod;
1015 * Set a new event, then check for race with update of
1016 * tx_cons. Note that it is essential to schedule a
1017 * callback, no matter how few buffers are pending. Even if
1018 * there is space in the transmit ring, higher layers may
1019 * be blocked because too much data is outstanding: in such
1020 * cases notification from Xen is likely to be the only kick
1023 np->tx.sring->rsp_event =
1024 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
1028 } while (prod != np->tx.sring->rsp_prod);
1032 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1035 if (np->user_state == UST_OPEN)
1036 netif_wake_queue(dev);
1045 struct netfront_info *np = xsc;
1046 struct ifnet *ifp = np->xn_ifp;
1049 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
1050 likely(netfront_carrier_ok(np)) &&
1051 ifp->if_drv_flags & IFF_DRV_RUNNING))
1054 if (np->tx.rsp_cons != np->tx.sring->rsp_prod) {
1064 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1065 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1071 xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
1074 int new = xennet_rxidx(np->rx.req_prod_pvt);
1076 KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
1077 np->rx_mbufs[new] = m;
1078 np->grant_rx_ref[new] = ref;
1079 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1080 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1081 np->rx.req_prod_pvt++;
1085 xennet_get_extras(struct netfront_info *np,
1086 struct netif_extra_info *extras, RING_IDX rp)
1088 struct netif_extra_info *extra;
1089 RING_IDX cons = np->rx.rsp_cons;
1097 if (unlikely(cons + 1 == rp)) {
1099 if (net_ratelimit())
1100 WPRINTK("Missing extra info\n");
1106 extra = (struct netif_extra_info *)
1107 RING_GET_RESPONSE(&np->rx, ++cons);
1109 if (unlikely(!extra->type ||
1110 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1112 if (net_ratelimit())
1113 WPRINTK("Invalid extra type: %d\n",
1118 memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1121 m = xennet_get_rx_mbuf(np, cons);
1122 ref = xennet_get_rx_ref(np, cons);
1123 xennet_move_rx_slot(np, m, ref);
1124 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1126 np->rx.rsp_cons = cons;
1131 xennet_get_responses(struct netfront_info *np,
1132 struct netfront_rx_info *rinfo, RING_IDX rp,
1133 struct mbuf_head *list,
1134 int *pages_flipped_p)
1136 int pages_flipped = *pages_flipped_p;
1137 struct mmu_update *mmu;
1138 struct multicall_entry *mcl;
1139 struct netif_rx_response *rx = &rinfo->rx;
1140 struct netif_extra_info *extras = rinfo->extras;
1141 RING_IDX cons = np->rx.rsp_cons;
1142 struct mbuf *m = xennet_get_rx_mbuf(np, cons);
1143 grant_ref_t ref = xennet_get_rx_ref(np, cons);
1144 int max = 24 /* MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */;
1149 if (rx->flags & NETRXF_extra_info) {
1150 err = xennet_get_extras(np, extras, rp);
1151 cons = np->rx.rsp_cons;
1157 if (unlikely(rx->status < 0 ||
1158 rx->offset + rx->status > PAGE_SIZE)) {
1160 if (net_ratelimit())
1161 WPRINTK("rx->offset: %x, size: %u\n",
1162 rx->offset, rx->status);
1164 xennet_move_rx_slot(np, m, ref);
1170 * This definitely indicates a bug, either in this driver or in
1171 * the backend driver. In future this should flag the bad
1172 * situation to the system controller to reboot the backed.
1174 if (ref == GRANT_INVALID_REF) {
1176 if (net_ratelimit())
1177 WPRINTK("Bad rx response id %d.\n", rx->id);
1183 if (!np->copying_receiver) {
1184 /* Memory pressure, insufficient buffer
1187 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1188 if (net_ratelimit())
1189 WPRINTK("Unfulfilled rx req "
1190 "(id=%d, st=%d).\n",
1191 rx->id, rx->status);
1192 xennet_move_rx_slot(np, m, ref);
1197 if (!xen_feature( XENFEAT_auto_translated_physmap)) {
1198 /* Remap the page. */
1199 void *vaddr = mtod(m, void *);
1202 mcl = np->rx_mcl + pages_flipped;
1203 mmu = np->rx_mmu + pages_flipped;
1205 MULTI_update_va_mapping(mcl, (u_long)vaddr,
1206 (mfn << PAGE_SHIFT) | PG_RW |
1207 PG_V | PG_M | PG_A, 0);
1208 pfn = (uint32_t)m->m_ext.ext_arg1;
1209 mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) |
1210 MMU_MACHPHYS_UPDATE;
1213 set_phys_to_machine(pfn, mfn);
1217 ret = gnttab_end_foreign_access_ref(ref, 0);
1218 KASSERT(ret, ("ret != 0"));
1221 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1222 mbufq_tail(list, m);
1225 if (!(rx->flags & NETRXF_more_data))
1228 if (cons + frags == rp) {
1229 if (net_ratelimit())
1230 WPRINTK("Need more frags\n");
1235 rx = RING_GET_RESPONSE(&np->rx, cons + frags);
1236 m = xennet_get_rx_mbuf(np, cons + frags);
1237 ref = xennet_get_rx_ref(np, cons + frags);
1241 if (unlikely(frags > max)) {
1242 if (net_ratelimit())
1243 WPRINTK("Too many frags\n");
1248 np->rx.rsp_cons = cons + frags;
1250 *pages_flipped_p = pages_flipped;
1256 xn_tick_locked(struct netfront_info *sc)
1258 XN_RX_LOCK_ASSERT(sc);
1259 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1261 /* XXX placeholder for printing debug information */
1269 struct netfront_info *sc;
1278 xn_start_locked(struct ifnet *ifp)
1281 struct mbuf *m_head, *new_m;
1282 struct netfront_info *sc;
1283 netif_tx_request_t *tx;
1286 u_long mfn, tx_bytes;
1292 if (!netfront_carrier_ok(sc))
1295 for (i = sc->tx.req_prod_pvt; TRUE; i++) {
1296 IF_DEQUEUE(&ifp->if_snd, m_head);
1300 if (!netfront_tx_slot_available(sc)) {
1301 IF_PREPEND(&ifp->if_snd, m_head);
1302 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1306 id = get_id_from_freelist(sc->xn_cdata.xn_tx_chain);
1309 * Start packing the mbufs in this chain into
1310 * the fragment pointers. Stop when we run out
1311 * of fragments or hit the end of the mbuf chain.
1313 new_m = makembuf(m_head);
1314 tx = RING_GET_REQUEST(&sc->tx, i);
1316 ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
1317 KASSERT((short)ref >= 0, ("Negative ref"));
1318 mfn = virt_to_mfn(mtod(new_m, vm_offset_t));
1319 gnttab_grant_foreign_access_ref(ref, sc->xbdev->otherend_id,
1320 mfn, GNTMAP_readonly);
1321 tx->gref = sc->grant_tx_ref[id] = ref;
1322 tx->size = new_m->m_pkthdr.len;
1324 tx->flags = (skb->ip_summed == CHECKSUM_HW) ? NETTXF_csum_blank : 0;
1327 new_m->m_next = NULL;
1328 new_m->m_nextpkt = NULL;
1332 sc->xn_cdata.xn_tx_chain[id] = new_m;
1333 BPF_MTAP(ifp, new_m);
1335 sc->stats.tx_bytes += new_m->m_pkthdr.len;
1336 sc->stats.tx_packets++;
1339 sc->tx.req_prod_pvt = i;
1340 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
1342 notify_remote_via_irq(sc->irq);
1346 if (RING_FULL(&sc->tx)) {
1349 netif_stop_queue(dev);
1357 xn_start(struct ifnet *ifp)
1359 struct netfront_info *sc;
1362 xn_start_locked(ifp);
1366 /* equivalent of network_open() in Linux */
1368 xn_ifinit_locked(struct netfront_info *sc)
1376 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1381 network_alloc_rx_buffers(sc);
1382 sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
1384 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1385 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1387 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1393 xn_ifinit(void *xsc)
1395 struct netfront_info *sc = xsc;
1398 xn_ifinit_locked(sc);
1405 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1407 struct netfront_info *sc = ifp->if_softc;
1408 struct ifreq *ifr = (struct ifreq *) data;
1409 struct ifaddr *ifa = (struct ifaddr *)data;
1411 int mask, error = 0;
1416 if (ifa->ifa_addr->sa_family == AF_INET) {
1417 ifp->if_flags |= IFF_UP;
1418 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1419 xn_ifinit_locked(sc);
1420 arp_ifinit(ifp, ifa);
1422 error = ether_ioctl(ifp, cmd, data);
1426 /* XXX can we alter the MTU on a VN ?*/
1428 if (ifr->ifr_mtu > XN_JUMBO_MTU)
1433 ifp->if_mtu = ifr->ifr_mtu;
1434 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1440 if (ifp->if_flags & IFF_UP) {
1442 * If only the state of the PROMISC flag changed,
1443 * then just use the 'set promisc mode' command
1444 * instead of reinitializing the entire NIC. Doing
1445 * a full re-init means reloading the firmware and
1446 * waiting for it to start up, which may take a
1450 /* No promiscuous mode with Xen */
1451 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1452 ifp->if_flags & IFF_PROMISC &&
1453 !(sc->xn_if_flags & IFF_PROMISC)) {
1454 XN_SETBIT(sc, XN_RX_MODE,
1455 XN_RXMODE_RX_PROMISC);
1456 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1457 !(ifp->if_flags & IFF_PROMISC) &&
1458 sc->xn_if_flags & IFF_PROMISC) {
1459 XN_CLRBIT(sc, XN_RX_MODE,
1460 XN_RXMODE_RX_PROMISC);
1463 xn_ifinit_locked(sc);
1465 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1469 sc->xn_if_flags = ifp->if_flags;
1474 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1475 if (mask & IFCAP_HWCSUM) {
1476 if (IFCAP_HWCSUM & ifp->if_capenable)
1477 ifp->if_capenable &= ~IFCAP_HWCSUM;
1479 ifp->if_capenable |= IFCAP_HWCSUM;
1486 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1499 error = ether_ioctl(ifp, cmd, data);
1506 xn_stop(struct netfront_info *sc)
1514 callout_stop(&sc->xn_stat_ch);
1516 xn_free_rx_ring(sc);
1517 xn_free_tx_ring(sc);
1519 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1522 /* START of Xenolinux helper functions adapted to FreeBSD */
1524 network_connect(struct ifnet *ifp)
1526 struct netfront_info *np;
1527 int i, requeue_idx, err;
1529 netif_rx_request_t *req;
1530 u_int feature_rx_copy, feature_rx_flip;
1532 printf("network_connect\n");
1535 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1536 "feature-rx-copy", "%u", &feature_rx_copy);
1538 feature_rx_copy = 0;
1539 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1540 "feature-rx-flip", "%u", &feature_rx_flip);
1542 feature_rx_flip = 1;
1545 * Copy packets on receive path if:
1546 * (a) This was requested by user, and the backend supports it; or
1547 * (b) Flipping was requested, but this is unsupported by the backend.
1549 np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
1550 (MODPARM_rx_flip && !feature_rx_flip));
1553 /* Recovery procedure: */
1554 err = talk_to_backend(np->xbdev, np);
1558 /* Step 1: Reinitialise variables. */
1559 netif_release_tx_bufs(np);
1561 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1562 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1565 if (np->rx_mbufs[i] == NULL)
1568 m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
1569 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1570 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1572 if (!np->copying_receiver) {
1573 gnttab_grant_foreign_transfer_ref(ref,
1574 np->xbdev->otherend_id,
1575 vtophys(mtod(m, vm_offset_t)));
1577 gnttab_grant_foreign_access_ref(ref,
1578 np->xbdev->otherend_id,
1579 vtophys(mtod(m, vm_offset_t)), 0);
1582 req->id = requeue_idx;
1587 np->rx.req_prod_pvt = requeue_idx;
1589 /* Step 3: All public and private state should now be sane. Get
1590 * ready to start sending and receiving packets and give the driver
1591 * domain a kick because we've probably just requeued some
1594 netfront_carrier_on(np);
1595 notify_remote_via_irq(np->irq);
1599 network_alloc_rx_buffers(np);
1607 show_device(struct netfront_info *sc)
1611 IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
1613 be_state_name[sc->xn_backend_state],
1614 sc->xn_user_state ? "open" : "closed",
1620 IPRINTK("<vif NULL>\n");
1625 static int ifno = 0;
1627 /** Create a network device.
1628 * @param handle device handle
1631 create_netdev(struct xenbus_device *dev, struct ifnet **ifpp)
1634 struct netfront_info *np;
1638 np = (struct netfront_info *)malloc(sizeof(struct netfront_info),
1639 M_DEVBUF, M_NOWAIT);
1643 memset(np, 0, sizeof(struct netfront_info));
1647 XN_LOCK_INIT(np, xennetif);
1648 np->rx_target = RX_MIN_TARGET;
1649 np->rx_min_target = RX_MIN_TARGET;
1650 np->rx_max_target = RX_MAX_TARGET;
1652 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
1653 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
1654 np->tx_mbufs[i] = (void *) ((u_long) i+1);
1655 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1657 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
1658 np->rx_mbufs[i] = NULL;
1659 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1661 /* A grant for every tx ring slot */
1662 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1663 &np->gref_tx_head) < 0) {
1664 printf("#### netfront can't alloc tx grant refs\n");
1668 /* A grant for every rx ring slot */
1669 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1670 &np->gref_rx_head) < 0) {
1671 printf("#### netfront can't alloc rx grant refs\n");
1672 gnttab_free_grant_references(np->gref_tx_head);
1677 err = xen_net_read_mac(dev, np->mac);
1679 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1683 /* Set up ifnet structure */
1684 *ifpp = ifp = np->xn_ifp = if_alloc(IFT_ETHER);
1686 if_initname(ifp, "xn", ifno++/* ifno */);
1687 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX;
1688 ifp->if_ioctl = xn_ioctl;
1689 ifp->if_output = ether_output;
1690 ifp->if_start = xn_start;
1692 ifp->if_watchdog = xn_watchdog;
1694 ifp->if_init = xn_ifinit;
1695 ifp->if_mtu = ETHERMTU;
1696 ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
1699 ifp->if_hwassist = XN_CSUM_FEATURES;
1700 ifp->if_capabilities = IFCAP_HWCSUM;
1701 ifp->if_capenable = ifp->if_capabilities;
1704 ether_ifattach(ifp, np->mac);
1705 callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE);
1706 netfront_carrier_off(np);
1711 gnttab_free_grant_references(np->gref_tx_head);
1713 panic("do something smart");
1718 * Handle the change of state of the backend to Closing. We must delete our
1719 * device-layer structures now, to ensure that writes are flushed through to
1720 * the backend. Once is this done, we can switch to Closed in
1724 static void netfront_closing(struct xenbus_device *dev)
1727 struct netfront_info *info = dev->dev_driver_data;
1729 DPRINTK("netfront_closing: %s removed\n", dev->nodename);
1733 xenbus_switch_state(dev, XenbusStateClosed);
1737 static int netfront_remove(struct xenbus_device *dev)
1739 struct netfront_info *info = dev->dev_driver_data;
1741 DPRINTK("%s\n", dev->nodename);
1744 free(info, M_DEVBUF);
1750 static void netif_free(struct netfront_info *info)
1752 netif_disconnect_backend(info);
1760 static void netif_disconnect_backend(struct netfront_info *info)
1763 end_access(info->tx_ring_ref, info->tx.sring);
1764 end_access(info->rx_ring_ref, info->rx.sring);
1765 info->tx_ring_ref = GRANT_INVALID_REF;
1766 info->rx_ring_ref = GRANT_INVALID_REF;
1767 info->tx.sring = NULL;
1768 info->rx.sring = NULL;
1772 unbind_from_irqhandler(info->irq, info->netdev);
1780 static void end_access(int ref, void *page)
1782 if (ref != GRANT_INVALID_REF)
1783 gnttab_end_foreign_access(ref, 0, page);
1787 /* ** Driver registration ** */
1790 static struct xenbus_device_id netfront_ids[] = {
1796 static struct xenbus_driver netfront = {
1798 .ids = netfront_ids,
1799 .probe = netfront_probe,
1800 .remove = netfront_remove,
1801 .resume = netfront_resume,
1802 .otherend_changed = backend_changed,
1806 netif_init(void *unused)
1808 if (!is_running_on_xen())
1811 if (is_initial_xendomain())
1814 IPRINTK("Initialising virtual ethernet driver.\n");
1816 xenbus_register_frontend(&netfront);
1819 SYSINIT(xennetif, SI_SUB_PSEUDO, SI_ORDER_SECOND, netif_init, NULL)
1824 * c-set-style: "BSD"
1827 * indent-tabs-mode: t