2 * Copyright (c) 2009-2011 Spectra Logic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * substantially similar to the "NO WARRANTY" disclaimer below
13 * ("Disclaimer") and any redistribution must be conditioned upon
14 * including a substantially similar Disclaimer requirement for further
15 * binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
30 * Authors: Justin T. Gibbs (Spectra Logic Corporation)
31 * Alan Somers (Spectra Logic Corporation)
32 * John Suykerbuyk (Spectra Logic Corporation)
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
41 * \brief Device driver supporting the vending of network access
42 * from this FreeBSD domain to other domains.
45 #include "opt_global.h"
49 #include <sys/param.h>
50 #include <sys/kernel.h>
53 #include <sys/module.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
60 #include <net/if_arp.h>
61 #include <net/ethernet.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
64 #include <net/if_types.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #include <netinet/if_ether.h>
69 #if __FreeBSD_version >= 700000
70 #include <netinet/tcp.h>
72 #include <netinet/ip_icmp.h>
73 #include <netinet/udp.h>
74 #include <machine/in_cksum.h>
78 #include <vm/vm_extern.h>
79 #include <vm/vm_kern.h>
81 #include <machine/_inttypes.h>
83 #include <xen/xen-os.h>
84 #include <xen/hypervisor.h>
85 #include <xen/xen_intr.h>
86 #include <xen/interface/io/netif.h>
87 #include <xen/xenbus/xenbusvar.h>
89 #include <machine/xen/xenvar.h>
91 /*--------------------------- Compile-time Tunables --------------------------*/
93 /*---------------------------------- Macros ----------------------------------*/
95 * Custom malloc type for all driver allocations.
97 static MALLOC_DEFINE(M_XENNETBACK, "xnb", "Xen Net Back Driver Data");
99 #define XNB_SG 1 /* netback driver supports feature-sg */
100 #define XNB_GSO_TCPV4 1 /* netback driver supports feature-gso-tcpv4 */
101 #define XNB_RX_COPY 1 /* netback driver supports feature-rx-copy */
102 #define XNB_RX_FLIP 0 /* netback driver does not support feature-rx-flip */
105 #define XNB_DEBUG /* hardcode on during development */
108 #define DPRINTF(fmt, args...) \
109 printf("xnb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
111 #define DPRINTF(fmt, args...) do {} while (0)
114 /* Default length for stack-allocated grant tables */
115 #define GNTTAB_LEN (64)
117 /* Features supported by all backends. TSO and LRO can be negotiated */
118 #define XNB_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
120 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
121 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
124 * Two argument version of the standard macro. Second argument is a tentative
127 #define RING_HAS_UNCONSUMED_REQUESTS_2(_r, cons) ({ \
128 unsigned int req = (_r)->sring->req_prod - cons; \
129 unsigned int rsp = RING_SIZE(_r) - \
130 (cons - (_r)->rsp_prod_pvt); \
131 req < rsp ? req : rsp; \
134 #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
135 #define virt_to_offset(x) ((x) & (PAGE_SIZE - 1))
138 * Predefined array type of grant table copy descriptors. Used to pass around
139 * statically allocated memory structures.
141 typedef struct gnttab_copy gnttab_copy_table[GNTTAB_LEN];
143 /*--------------------------- Forward Declarations ---------------------------*/
147 static void xnb_attach_failed(struct xnb_softc *xnb,
148 int err, const char *fmt, ...)
150 static int xnb_shutdown(struct xnb_softc *xnb);
151 static int create_netdev(device_t dev);
152 static int xnb_detach(device_t dev);
153 static int xen_net_read_mac(device_t dev, uint8_t mac[]);
154 static int xnb_ifmedia_upd(struct ifnet *ifp);
155 static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
156 static void xnb_intr(void *arg);
157 static int xnb_send(netif_rx_back_ring_t *rxb, domid_t otherend,
158 const struct mbuf *mbufc, gnttab_copy_table gnttab);
159 static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend,
160 struct mbuf **mbufc, struct ifnet *ifnet,
161 gnttab_copy_table gnttab);
162 static int xnb_ring2pkt(struct xnb_pkt *pkt,
163 const netif_tx_back_ring_t *tx_ring,
165 static void xnb_txpkt2rsp(const struct xnb_pkt *pkt,
166 netif_tx_back_ring_t *ring, int error);
167 static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp);
168 static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt,
169 const struct mbuf *mbufc,
170 gnttab_copy_table gnttab,
171 const netif_tx_back_ring_t *txb,
172 domid_t otherend_id);
173 static void xnb_update_mbufc(struct mbuf *mbufc,
174 const gnttab_copy_table gnttab, int n_entries);
175 static int xnb_mbufc2pkt(const struct mbuf *mbufc,
177 RING_IDX start, int space);
178 static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt,
179 const struct mbuf *mbufc,
180 gnttab_copy_table gnttab,
181 const netif_rx_back_ring_t *rxb,
182 domid_t otherend_id);
183 static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt,
184 const gnttab_copy_table gnttab, int n_entries,
185 netif_rx_back_ring_t *ring);
186 static void xnb_add_mbuf_cksum(struct mbuf *mbufc);
187 static void xnb_stop(struct xnb_softc*);
188 static int xnb_ioctl(struct ifnet*, u_long, caddr_t);
189 static void xnb_start_locked(struct ifnet*);
190 static void xnb_start(struct ifnet*);
191 static void xnb_ifinit_locked(struct xnb_softc*);
192 static void xnb_ifinit(void*);
194 static int xnb_unit_test_main(SYSCTL_HANDLER_ARGS);
195 static int xnb_dump_rings(SYSCTL_HANDLER_ARGS);
197 /*------------------------------ Data Structures -----------------------------*/
201 * Representation of a xennet packet. Simplified version of a packet as
202 * stored in the Xen tx ring. Applicable to both RX and TX packets
206 * Array index of the first data-bearing (eg, not extra info) entry
212 * Array index of the second data-bearing entry for this packet.
213 * Invalid if the packet has only one data-bearing entry. If the
214 * packet has more than two data-bearing entries, then the second
215 * through the last will be sequential modulo the ring size
220 * Optional extra info. Only valid if flags contains
221 * NETTXF_extra_info. Note that extra.type will always be
222 * XEN_NETIF_EXTRA_TYPE_GSO. Currently, no known netfront or netback
223 * driver will ever set XEN_NETIF_EXTRA_TYPE_MCAST_*
225 netif_extra_info_t extra;
227 /** Size of entire packet in bytes. */
230 /** The size of the first entry's data in bytes */
234 * Either NETTXF_ or NETRXF_ flags. Note that the flag values are
235 * not the same for TX and RX packets
240 * The number of valid data-bearing entries (either netif_tx_request's
241 * or netif_rx_response's) in the packet. If this is 0, it means the
242 * entire packet is invalid.
246 /** There was an error processing the packet */
250 /** xnb_pkt method: initialize it */
252 xnb_pkt_initialize(struct xnb_pkt *pxnb)
254 bzero(pxnb, sizeof(*pxnb));
257 /** xnb_pkt method: mark the packet as valid */
259 xnb_pkt_validate(struct xnb_pkt *pxnb)
264 /** xnb_pkt method: mark the packet as invalid */
266 xnb_pkt_invalidate(struct xnb_pkt *pxnb)
271 /** xnb_pkt method: Check whether the packet is valid */
273 xnb_pkt_is_valid(const struct xnb_pkt *pxnb)
275 return (! pxnb->error);
279 /** xnb_pkt method: print the packet's contents in human-readable format*/
281 xnb_dump_pkt(const struct xnb_pkt *pkt) {
283 DPRINTF("Was passed a null pointer.\n");
286 DPRINTF("pkt address= %p\n", pkt);
287 DPRINTF("pkt->size=%d\n", pkt->size);
288 DPRINTF("pkt->car_size=%d\n", pkt->car_size);
289 DPRINTF("pkt->flags=0x%04x\n", pkt->flags);
290 DPRINTF("pkt->list_len=%d\n", pkt->list_len);
291 /* DPRINTF("pkt->extra"); TODO */
292 DPRINTF("pkt->car=%d\n", pkt->car);
293 DPRINTF("pkt->cdr=%d\n", pkt->cdr);
294 DPRINTF("pkt->error=%d\n", pkt->error);
296 #endif /* XNB_DEBUG */
299 xnb_dump_txreq(RING_IDX idx, const struct netif_tx_request *txreq)
302 DPRINTF("netif_tx_request index =%u\n", idx);
303 DPRINTF("netif_tx_request.gref =%u\n", txreq->gref);
304 DPRINTF("netif_tx_request.offset=%hu\n", txreq->offset);
305 DPRINTF("netif_tx_request.flags =%hu\n", txreq->flags);
306 DPRINTF("netif_tx_request.id =%hu\n", txreq->id);
307 DPRINTF("netif_tx_request.size =%hu\n", txreq->size);
313 * \brief Configuration data for a shared memory request ring
314 * used to communicate with the front-end client of this
317 struct xnb_ring_config {
319 * Runtime structures for ring access. Unfortunately, TX and RX rings
320 * use different data structures, and that cannot be changed since it
321 * is part of the interdomain protocol.
324 netif_rx_back_ring_t rx_ring;
325 netif_tx_back_ring_t tx_ring;
329 * The device bus address returned by the hypervisor when
330 * mapping the ring and required to unmap it when a connection
335 /** The pseudo-physical address where ring memory is mapped.*/
338 /** KVA address where ring memory is mapped. */
342 * Grant table handles, one per-ring page, returned by the
343 * hyperpervisor upon mapping of the ring and required to
344 * unmap it when a connection is torn down.
346 grant_handle_t handle;
348 /** The number of ring pages mapped for the current connection. */
352 * The grant references, one per-ring page, supplied by the
353 * front-end, allowing us to reference the ring pages in the
354 * front-end's domain and to map these pages into our own domain.
356 grant_ref_t ring_ref;
360 * Per-instance connection state flags.
364 /** Communication with the front-end has been established. */
365 XNBF_RING_CONNECTED = 0x01,
368 * Front-end requests exist in the ring and are waiting for
369 * xnb_xen_req objects to free up.
371 XNBF_RESOURCE_SHORTAGE = 0x02,
373 /** Connection teardown has started. */
374 XNBF_SHUTDOWN = 0x04,
376 /** A thread is already performing shutdown processing. */
377 XNBF_IN_SHUTDOWN = 0x08
381 * Types of rings. Used for array indices and to identify a ring's control
382 * data structure type
385 XNB_RING_TYPE_TX = 0, /* ID of TX rings, used for array indices */
386 XNB_RING_TYPE_RX = 1, /* ID of RX rings, used for array indices */
391 * Per-instance configuration data.
394 /** NewBus device corresponding to this instance. */
397 /* Media related fields */
399 /** Generic network media state */
400 struct ifmedia sc_media;
402 /** Media carrier info */
403 struct ifnet *xnb_ifp;
405 /** Our own private carrier state */
408 /** Device MAC Address */
409 uint8_t mac[ETHER_ADDR_LEN];
411 /* Xen related fields */
414 * \brief The netif protocol abi in effect.
416 * There are situations where the back and front ends can
417 * have a different, native abi (e.g. intel x86_64 and
418 * 32bit x86 domains on the same machine). The back-end
419 * always accomodates the front-end's native abi. That
420 * value is pulled from the XenStore and recorded here.
425 * Name of the bridge to which this VIF is connected, if any
426 * This field is dynamically allocated by xenbus and must be free()ed
427 * when no longer needed
431 /** The interrupt driven even channel used to signal ring events. */
432 evtchn_port_t evtchn;
434 /** Xen device handle.*/
437 /** Handle to the communication ring event channel. */
438 xen_intr_handle_t xen_intr_handle;
441 * \brief Cached value of the front-end's domain id.
443 * This value is used at once for each mapped page in
444 * a transaction. We cache it to avoid incuring the
445 * cost of an ivar access every time this is needed.
450 * Undocumented frontend feature. Has something to do with
454 /** Undocumented frontend feature */
456 /** Undocumented frontend feature */
458 /** Can checksum TCP/UDP over IPv4 */
461 /* Implementation related fields */
463 * Preallocated grant table copy descriptor for RX operations.
464 * Access must be protected by rx_lock
466 gnttab_copy_table rx_gnttab;
469 * Preallocated grant table copy descriptor for TX operations.
470 * Access must be protected by tx_lock
472 gnttab_copy_table tx_gnttab;
476 * Resource representing allocated physical address space
477 * associated with our per-instance kva region.
479 struct resource *pseudo_phys_res;
481 /** Resource id for allocated physical address space. */
482 int pseudo_phys_res_id;
485 /** Ring mapping and interrupt configuration data. */
486 struct xnb_ring_config ring_configs[XNB_NUM_RING_TYPES];
489 * Global pool of kva used for mapping remote domain ring
490 * and I/O transaction data.
494 /** Psuedo-physical address corresponding to kva. */
495 uint64_t gnt_base_addr;
497 /** Various configuration and state bit flags. */
500 /** Mutex protecting per-instance data in the receive path. */
503 /** Mutex protecting per-instance data in the softc structure. */
506 /** Mutex protecting per-instance data in the transmit path. */
509 /** The size of the global kva pool. */
513 /*---------------------------- Debugging functions ---------------------------*/
516 xnb_dump_gnttab_copy(const struct gnttab_copy *entry)
519 printf("NULL grant table pointer\n");
523 if (entry->flags & GNTCOPY_dest_gref)
524 printf("gnttab dest ref=\t%u\n", entry->dest.u.ref);
526 printf("gnttab dest gmfn=\t%lu\n", entry->dest.u.gmfn);
527 printf("gnttab dest offset=\t%hu\n", entry->dest.offset);
528 printf("gnttab dest domid=\t%hu\n", entry->dest.domid);
529 if (entry->flags & GNTCOPY_source_gref)
530 printf("gnttab source ref=\t%u\n", entry->source.u.ref);
532 printf("gnttab source gmfn=\t%lu\n", entry->source.u.gmfn);
533 printf("gnttab source offset=\t%hu\n", entry->source.offset);
534 printf("gnttab source domid=\t%hu\n", entry->source.domid);
535 printf("gnttab len=\t%hu\n", entry->len);
536 printf("gnttab flags=\t%hu\n", entry->flags);
537 printf("gnttab status=\t%hd\n", entry->status);
541 xnb_dump_rings(SYSCTL_HANDLER_ARGS)
543 static char results[720];
544 struct xnb_softc const* xnb = (struct xnb_softc*)arg1;
545 netif_rx_back_ring_t const* rxb =
546 &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring;
547 netif_tx_back_ring_t const* txb =
548 &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring;
550 /* empty the result strings */
553 if ( !txb || !txb->sring || !rxb || !rxb->sring )
554 return (SYSCTL_OUT(req, results, strnlen(results, 720)));
556 snprintf(results, 720,
557 "\n\t%35s %18s\n" /* TX, RX */
558 "\t%16s %18d %18d\n" /* req_cons */
559 "\t%16s %18d %18d\n" /* nr_ents */
560 "\t%16s %18d %18d\n" /* rsp_prod_pvt */
561 "\t%16s %18p %18p\n" /* sring */
562 "\t%16s %18d %18d\n" /* req_prod */
563 "\t%16s %18d %18d\n" /* req_event */
564 "\t%16s %18d %18d\n" /* rsp_prod */
565 "\t%16s %18d %18d\n", /* rsp_event */
567 "req_cons", txb->req_cons, rxb->req_cons,
568 "nr_ents", txb->nr_ents, rxb->nr_ents,
569 "rsp_prod_pvt", txb->rsp_prod_pvt, rxb->rsp_prod_pvt,
570 "sring", txb->sring, rxb->sring,
571 "sring->req_prod", txb->sring->req_prod, rxb->sring->req_prod,
572 "sring->req_event", txb->sring->req_event, rxb->sring->req_event,
573 "sring->rsp_prod", txb->sring->rsp_prod, rxb->sring->rsp_prod,
574 "sring->rsp_event", txb->sring->rsp_event, rxb->sring->rsp_event);
576 return (SYSCTL_OUT(req, results, strnlen(results, 720)));
580 xnb_dump_mbuf(const struct mbuf *m)
587 printf("xnb_dump_mbuf:\n");
588 if (m->m_flags & M_PKTHDR) {
589 printf(" flowid=%10d, csum_flags=%#8x, csum_data=%#8x, "
591 m->m_pkthdr.flowid, (int)m->m_pkthdr.csum_flags,
592 m->m_pkthdr.csum_data, m->m_pkthdr.tso_segsz);
593 printf(" rcvif=%16p, len=%19d\n",
594 m->m_pkthdr.rcvif, m->m_pkthdr.len);
596 printf(" m_next=%16p, m_nextpk=%16p, m_data=%16p\n",
597 m->m_next, m->m_nextpkt, m->m_data);
598 printf(" m_len=%17d, m_flags=%#15x, m_type=%18u\n",
599 m->m_len, m->m_flags, m->m_type);
602 d = mtod(m, uint8_t*);
606 for (i = 0; (i < 16) && (len > 0); i++, len--) {
607 printf("%02hhx ", *(d++));
612 #endif /* XNB_DEBUG */
614 /*------------------------ Inter-Domain Communication ------------------------*/
616 * Free dynamically allocated KVA or pseudo-physical address allocations.
618 * \param xnb Per-instance xnb configuration structure.
621 xnb_free_communication_mem(struct xnb_softc *xnb)
625 kva_free(xnb->kva, xnb->kva_size);
627 if (xnb->pseudo_phys_res != NULL) {
628 bus_release_resource(xnb->dev, SYS_RES_MEMORY,
629 xnb->pseudo_phys_res_id,
630 xnb->pseudo_phys_res);
631 xnb->pseudo_phys_res = NULL;
636 xnb->gnt_base_addr = 0;
640 * Cleanup all inter-domain communication mechanisms.
642 * \param xnb Per-instance xnb configuration structure.
645 xnb_disconnect(struct xnb_softc *xnb)
647 struct gnttab_unmap_grant_ref gnts[XNB_NUM_RING_TYPES];
651 xen_intr_unbind(xnb->xen_intr_handle);
654 * We may still have another thread currently processing requests. We
655 * must acquire the rx and tx locks to make sure those threads are done,
656 * but we can release those locks as soon as we acquire them, because no
657 * more interrupts will be arriving.
659 mtx_lock(&xnb->tx_lock);
660 mtx_unlock(&xnb->tx_lock);
661 mtx_lock(&xnb->rx_lock);
662 mtx_unlock(&xnb->rx_lock);
664 /* Free malloc'd softc member variables */
665 if (xnb->bridge != NULL)
666 free(xnb->bridge, M_XENSTORE);
668 /* All request processing has stopped, so unmap the rings */
669 for (i=0; i < XNB_NUM_RING_TYPES; i++) {
670 gnts[i].host_addr = xnb->ring_configs[i].gnt_addr;
671 gnts[i].dev_bus_addr = xnb->ring_configs[i].bus_addr;
672 gnts[i].handle = xnb->ring_configs[i].handle;
674 error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, gnts,
676 KASSERT(error == 0, ("Grant table unmap op failed (%d)", error));
678 xnb_free_communication_mem(xnb);
680 * Zero the ring config structs because the pointers, handles, and
681 * grant refs contained therein are no longer valid.
683 bzero(&xnb->ring_configs[XNB_RING_TYPE_TX],
684 sizeof(struct xnb_ring_config));
685 bzero(&xnb->ring_configs[XNB_RING_TYPE_RX],
686 sizeof(struct xnb_ring_config));
688 xnb->flags &= ~XNBF_RING_CONNECTED;
693 * Map a single shared memory ring into domain local address space and
694 * initialize its control structure
696 * \param xnb Per-instance xnb configuration structure
697 * \param ring_type Array index of this ring in the xnb's array of rings
701 xnb_connect_ring(struct xnb_softc *xnb, xnb_ring_type_t ring_type)
703 struct gnttab_map_grant_ref gnt;
704 struct xnb_ring_config *ring = &xnb->ring_configs[ring_type];
707 /* TX ring type = 0, RX =1 */
708 ring->va = xnb->kva + ring_type * PAGE_SIZE;
709 ring->gnt_addr = xnb->gnt_base_addr + ring_type * PAGE_SIZE;
711 gnt.host_addr = ring->gnt_addr;
712 gnt.flags = GNTMAP_host_map;
713 gnt.ref = ring->ring_ref;
714 gnt.dom = xnb->otherend_id;
716 error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &gnt, 1);
718 panic("netback: Ring page grant table op failed (%d)", error);
720 if (gnt.status != 0) {
723 xenbus_dev_fatal(xnb->dev, error,
724 "Ring shared page mapping failed. "
725 "Status %d.", gnt.status);
727 ring->handle = gnt.handle;
728 ring->bus_addr = gnt.dev_bus_addr;
730 if (ring_type == XNB_RING_TYPE_TX) {
731 BACK_RING_INIT(&ring->back_ring.tx_ring,
732 (netif_tx_sring_t*)ring->va,
733 ring->ring_pages * PAGE_SIZE);
734 } else if (ring_type == XNB_RING_TYPE_RX) {
735 BACK_RING_INIT(&ring->back_ring.rx_ring,
736 (netif_rx_sring_t*)ring->va,
737 ring->ring_pages * PAGE_SIZE);
739 xenbus_dev_fatal(xnb->dev, error,
740 "Unknown ring type %d", ring_type);
748 * Setup the shared memory rings and bind an interrupt to the event channel
749 * used to notify us of ring changes.
751 * \param xnb Per-instance xnb configuration structure.
754 xnb_connect_comms(struct xnb_softc *xnb)
759 if ((xnb->flags & XNBF_RING_CONNECTED) != 0)
763 * Kva for our rings are at the tail of the region of kva allocated
764 * by xnb_alloc_communication_mem().
766 for (i=0; i < XNB_NUM_RING_TYPES; i++) {
767 error = xnb_connect_ring(xnb, i);
772 xnb->flags |= XNBF_RING_CONNECTED;
774 error = xen_intr_bind_remote_port(xnb->dev,
778 xnb_intr, /*arg*/xnb,
779 INTR_TYPE_BIO | INTR_MPSAFE,
780 &xnb->xen_intr_handle);
782 (void)xnb_disconnect(xnb);
783 xenbus_dev_fatal(xnb->dev, error, "binding event channel");
787 DPRINTF("rings connected!\n");
793 * Size KVA and pseudo-physical address allocations based on negotiated
794 * values for the size and number of I/O requests, and the size of our
795 * communication ring.
797 * \param xnb Per-instance xnb configuration structure.
799 * These address spaces are used to dynamically map pages in the
800 * front-end's domain into our own.
803 xnb_alloc_communication_mem(struct xnb_softc *xnb)
808 for (i=0; i < XNB_NUM_RING_TYPES; i++) {
809 xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE;
812 xnb->kva = kva_alloc(xnb->kva_size);
815 xnb->gnt_base_addr = xnb->kva;
816 #else /* defined XENHVM */
818 * Reserve a range of pseudo physical memory that we can map
819 * into kva. These pages will only be backed by machine
820 * pages ("real memory") during the lifetime of front-end requests
821 * via grant table operations. We will map the netif tx and rx rings
824 xnb->pseudo_phys_res_id = 0;
825 xnb->pseudo_phys_res = bus_alloc_resource(xnb->dev, SYS_RES_MEMORY,
826 &xnb->pseudo_phys_res_id,
827 0, ~0, xnb->kva_size,
829 if (xnb->pseudo_phys_res == NULL) {
833 xnb->kva = (vm_offset_t)rman_get_virtual(xnb->pseudo_phys_res);
834 xnb->gnt_base_addr = rman_get_start(xnb->pseudo_phys_res);
835 #endif /* !defined XENHVM */
840 * Collect information from the XenStore related to our device and its frontend
842 * \param xnb Per-instance xnb configuration structure.
845 xnb_collect_xenstore_info(struct xnb_softc *xnb)
848 * \todo Linux collects the following info. We should collect most
850 * "feature-rx-notify"
852 const char *otherend_path;
853 const char *our_path;
855 unsigned int rx_copy, bridge_len;
856 uint8_t no_csum_offload;
858 otherend_path = xenbus_get_otherend_path(xnb->dev);
859 our_path = xenbus_get_node(xnb->dev);
861 /* Collect the critical communication parameters */
862 err = xs_gather(XST_NIL, otherend_path,
863 "tx-ring-ref", "%l" PRIu32,
864 &xnb->ring_configs[XNB_RING_TYPE_TX].ring_ref,
865 "rx-ring-ref", "%l" PRIu32,
866 &xnb->ring_configs[XNB_RING_TYPE_RX].ring_ref,
867 "event-channel", "%" PRIu32, &xnb->evtchn,
870 xenbus_dev_fatal(xnb->dev, err,
871 "Unable to retrieve ring information from "
872 "frontend %s. Unable to connect.",
877 /* Collect the handle from xenstore */
878 err = xs_scanf(XST_NIL, our_path, "handle", NULL, "%li", &xnb->handle);
880 xenbus_dev_fatal(xnb->dev, err,
881 "Error reading handle from frontend %s. "
882 "Unable to connect.", otherend_path);
886 * Collect the bridgename, if any. We do not need bridge_len; we just
889 err = xs_read(XST_NIL, our_path, "bridge", &bridge_len,
890 (void**)&xnb->bridge);
895 * Does the frontend request that we use rx copy? If not, return an
896 * error because this driver only supports rx copy.
898 err = xs_scanf(XST_NIL, otherend_path, "request-rx-copy", NULL,
899 "%" PRIu32, &rx_copy);
905 xenbus_dev_fatal(xnb->dev, err, "reading %s/request-rx-copy",
910 * \todo: figure out the exact meaning of this feature, and when
911 * the frontend will set it to true. It should be set to true
915 /* return EOPNOTSUPP;*/
917 /** \todo Collect the rx notify feature */
919 /* Collect the feature-sg. */
920 if (xs_scanf(XST_NIL, otherend_path, "feature-sg", NULL,
921 "%hhu", &xnb->can_sg) < 0)
924 /* Collect remaining frontend features */
925 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4", NULL,
926 "%hhu", &xnb->gso) < 0)
929 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4-prefix", NULL,
930 "%hhu", &xnb->gso_prefix) < 0)
933 if (xs_scanf(XST_NIL, otherend_path, "feature-no-csum-offload", NULL,
934 "%hhu", &no_csum_offload) < 0)
936 xnb->ip_csum = (no_csum_offload == 0);
942 * Supply information about the physical device to the frontend
945 * \param xnb Per-instance xnb configuration structure.
948 xnb_publish_backend_info(struct xnb_softc *xnb)
950 struct xs_transaction xst;
951 const char *our_path;
954 our_path = xenbus_get_node(xnb->dev);
957 error = xs_transaction_start(&xst);
959 xenbus_dev_fatal(xnb->dev, error,
960 "Error publishing backend info "
961 "(start transaction)");
965 error = xs_printf(xst, our_path, "feature-sg",
970 error = xs_printf(xst, our_path, "feature-gso-tcpv4",
971 "%d", XNB_GSO_TCPV4);
975 error = xs_printf(xst, our_path, "feature-rx-copy",
980 error = xs_printf(xst, our_path, "feature-rx-flip",
985 error = xs_transaction_end(xst, 0);
986 if (error != 0 && error != EAGAIN) {
987 xenbus_dev_fatal(xnb->dev, error, "ending transaction");
991 } while (error == EAGAIN);
997 * Connect to our netfront peer now that it has completed publishing
998 * its configuration into the XenStore.
1000 * \param xnb Per-instance xnb configuration structure.
1003 xnb_connect(struct xnb_softc *xnb)
1007 if (xenbus_get_state(xnb->dev) == XenbusStateConnected)
1010 if (xnb_collect_xenstore_info(xnb) != 0)
1013 xnb->flags &= ~XNBF_SHUTDOWN;
1015 /* Read front end configuration. */
1017 /* Allocate resources whose size depends on front-end configuration. */
1018 error = xnb_alloc_communication_mem(xnb);
1020 xenbus_dev_fatal(xnb->dev, error,
1021 "Unable to allocate communication memory");
1026 * Connect communication channel.
1028 error = xnb_connect_comms(xnb);
1030 /* Specific errors are reported by xnb_connect_comms(). */
1035 /* Ready for I/O. */
1036 xenbus_set_state(xnb->dev, XenbusStateConnected);
1039 /*-------------------------- Device Teardown Support -------------------------*/
1041 * Perform device shutdown functions.
1043 * \param xnb Per-instance xnb configuration structure.
1045 * Mark this instance as shutting down, wait for any active requests
1046 * to drain, disconnect from the front-end, and notify any waiters (e.g.
1047 * a thread invoking our detach method) that detach can now proceed.
1050 xnb_shutdown(struct xnb_softc *xnb)
1053 * Due to the need to drop our mutex during some
1054 * xenbus operations, it is possible for two threads
1055 * to attempt to close out shutdown processing at
1056 * the same time. Tell the caller that hits this
1057 * race to try back later.
1059 if ((xnb->flags & XNBF_IN_SHUTDOWN) != 0)
1062 xnb->flags |= XNBF_SHUTDOWN;
1064 xnb->flags |= XNBF_IN_SHUTDOWN;
1066 mtx_unlock(&xnb->sc_lock);
1067 /* Free the network interface */
1069 if (xnb->xnb_ifp != NULL) {
1070 ether_ifdetach(xnb->xnb_ifp);
1071 if_free(xnb->xnb_ifp);
1072 xnb->xnb_ifp = NULL;
1074 mtx_lock(&xnb->sc_lock);
1076 xnb_disconnect(xnb);
1078 mtx_unlock(&xnb->sc_lock);
1079 if (xenbus_get_state(xnb->dev) < XenbusStateClosing)
1080 xenbus_set_state(xnb->dev, XenbusStateClosing);
1081 mtx_lock(&xnb->sc_lock);
1083 xnb->flags &= ~XNBF_IN_SHUTDOWN;
1086 /* Indicate to xnb_detach() that is it safe to proceed. */
1093 * Report an attach time error to the console and Xen, and cleanup
1094 * this instance by forcing immediate detach processing.
1096 * \param xnb Per-instance xnb configuration structure.
1097 * \param err Errno describing the error.
1098 * \param fmt Printf style format and arguments
1101 xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...)
1107 va_copy(ap_hotplug, ap);
1108 xs_vprintf(XST_NIL, xenbus_get_node(xnb->dev),
1109 "hotplug-error", fmt, ap_hotplug);
1111 xs_printf(XST_NIL, xenbus_get_node(xnb->dev),
1112 "hotplug-status", "error");
1114 xenbus_dev_vfatal(xnb->dev, err, fmt, ap);
1117 xs_printf(XST_NIL, xenbus_get_node(xnb->dev),
1119 xnb_detach(xnb->dev);
1122 /*---------------------------- NewBus Entrypoints ----------------------------*/
1124 * Inspect a XenBus device and claim it if is of the appropriate type.
1126 * \param dev NewBus device object representing a candidate XenBus device.
1128 * \return 0 for success, errno codes for failure.
1131 xnb_probe(device_t dev)
1133 if (!strcmp(xenbus_get_type(dev), "vif")) {
1134 DPRINTF("Claiming device %d, %s\n", device_get_unit(dev),
1135 devclass_get_name(device_get_devclass(dev)));
1136 device_set_desc(dev, "Backend Virtual Network Device");
1144 * Setup sysctl variables to control various Network Back parameters.
1146 * \param xnb Xen Net Back softc.
1150 xnb_setup_sysctl(struct xnb_softc *xnb)
1152 struct sysctl_ctx_list *sysctl_ctx = NULL;
1153 struct sysctl_oid *sysctl_tree = NULL;
1155 sysctl_ctx = device_get_sysctl_ctx(xnb->dev);
1156 if (sysctl_ctx == NULL)
1159 sysctl_tree = device_get_sysctl_tree(xnb->dev);
1160 if (sysctl_tree == NULL)
1164 SYSCTL_ADD_PROC(sysctl_ctx,
1165 SYSCTL_CHILDREN(sysctl_tree),
1167 "unit_test_results",
1168 CTLTYPE_STRING | CTLFLAG_RD,
1173 "Results of builtin unit tests");
1175 SYSCTL_ADD_PROC(sysctl_ctx,
1176 SYSCTL_CHILDREN(sysctl_tree),
1179 CTLTYPE_STRING | CTLFLAG_RD,
1184 "Xennet Back Rings");
1185 #endif /* XNB_DEBUG */
1189 * Create a network device.
1190 * @param handle device handle
1193 create_netdev(device_t dev)
1196 struct xnb_softc *xnb;
1199 xnb = device_get_softc(dev);
1200 mtx_init(&xnb->sc_lock, "xnb_softc", "xen netback softc lock", MTX_DEF);
1201 mtx_init(&xnb->tx_lock, "xnb_tx", "xen netback tx lock", MTX_DEF);
1202 mtx_init(&xnb->rx_lock, "xnb_rx", "xen netback rx lock", MTX_DEF);
1206 ifmedia_init(&xnb->sc_media, 0, xnb_ifmedia_upd, xnb_ifmedia_sts);
1207 ifmedia_add(&xnb->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
1208 ifmedia_set(&xnb->sc_media, IFM_ETHER|IFM_MANUAL);
1210 err = xen_net_read_mac(dev, xnb->mac);
1212 /* Set up ifnet structure */
1213 ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER);
1214 ifp->if_softc = xnb;
1215 if_initname(ifp, "xnb", device_get_unit(dev));
1216 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1217 ifp->if_ioctl = xnb_ioctl;
1218 ifp->if_output = ether_output;
1219 ifp->if_start = xnb_start;
1221 ifp->if_watchdog = xnb_watchdog;
1223 ifp->if_init = xnb_ifinit;
1224 ifp->if_mtu = ETHERMTU;
1225 ifp->if_snd.ifq_maxlen = NET_RX_RING_SIZE - 1;
1227 ifp->if_hwassist = XNB_CSUM_FEATURES;
1228 ifp->if_capabilities = IFCAP_HWCSUM;
1229 ifp->if_capenable = IFCAP_HWCSUM;
1231 ether_ifattach(ifp, xnb->mac);
1239 * Attach to a XenBus device that has been claimed by our probe routine.
1241 * \param dev NewBus device object representing this Xen Net Back instance.
1243 * \return 0 for success, errno codes for failure.
1246 xnb_attach(device_t dev)
1248 struct xnb_softc *xnb;
1252 error = create_netdev(dev);
1254 xenbus_dev_fatal(dev, error, "creating netdev");
1258 DPRINTF("Attaching to %s\n", xenbus_get_node(dev));
1261 * Basic initialization.
1262 * After this block it is safe to call xnb_detach()
1263 * to clean up any allocated data for this instance.
1265 xnb = device_get_softc(dev);
1266 xnb->otherend_id = xenbus_get_otherend_id(dev);
1267 for (i=0; i < XNB_NUM_RING_TYPES; i++) {
1268 xnb->ring_configs[i].ring_pages = 1;
1272 * Setup sysctl variables.
1274 xnb_setup_sysctl(xnb);
1276 /* Update hot-plug status to satisfy xend. */
1277 error = xs_printf(XST_NIL, xenbus_get_node(xnb->dev),
1278 "hotplug-status", "connected");
1280 xnb_attach_failed(xnb, error, "writing %s/hotplug-status",
1281 xenbus_get_node(xnb->dev));
1285 if ((error = xnb_publish_backend_info(xnb)) != 0) {
1287 * If we can't publish our data, we cannot participate
1288 * in this connection, and waiting for a front-end state
1289 * change will not help the situation.
1291 xnb_attach_failed(xnb, error,
1292 "Publishing backend status for %s",
1293 xenbus_get_node(xnb->dev));
1297 /* Tell the front end that we are ready to connect. */
1298 xenbus_set_state(dev, XenbusStateInitWait);
1304 * Detach from a net back device instance.
1306 * \param dev NewBus device object representing this Xen Net Back instance.
1308 * \return 0 for success, errno codes for failure.
1310 * \note A net back device may be detached at any time in its life-cycle,
1311 * including part way through the attach process. For this reason,
1312 * initialization order and the intialization state checks in this
1313 * routine must be carefully coupled so that attach time failures
1314 * are gracefully handled.
1317 xnb_detach(device_t dev)
1319 struct xnb_softc *xnb;
1323 xnb = device_get_softc(dev);
1324 mtx_lock(&xnb->sc_lock);
1325 while (xnb_shutdown(xnb) == EAGAIN) {
1326 msleep(xnb, &xnb->sc_lock, /*wakeup prio unchanged*/0,
1329 mtx_unlock(&xnb->sc_lock);
1332 mtx_destroy(&xnb->tx_lock);
1333 mtx_destroy(&xnb->rx_lock);
1334 mtx_destroy(&xnb->sc_lock);
1339 * Prepare this net back device for suspension of this VM.
1341 * \param dev NewBus device object representing this Xen net Back instance.
1343 * \return 0 for success, errno codes for failure.
1346 xnb_suspend(device_t dev)
1352 * Perform any processing required to recover from a suspended state.
1354 * \param dev NewBus device object representing this Xen Net Back instance.
1356 * \return 0 for success, errno codes for failure.
1359 xnb_resume(device_t dev)
1365 * Handle state changes expressed via the XenStore by our front-end peer.
1367 * \param dev NewBus device object representing this Xen
1368 * Net Back instance.
1369 * \param frontend_state The new state of the front-end.
1371 * \return 0 for success, errno codes for failure.
1374 xnb_frontend_changed(device_t dev, XenbusState frontend_state)
1376 struct xnb_softc *xnb;
1378 xnb = device_get_softc(dev);
1380 DPRINTF("frontend_state=%s, xnb_state=%s\n",
1381 xenbus_strstate(frontend_state),
1382 xenbus_strstate(xenbus_get_state(xnb->dev)));
1384 switch (frontend_state) {
1385 case XenbusStateInitialising:
1387 case XenbusStateInitialised:
1388 case XenbusStateConnected:
1391 case XenbusStateClosing:
1392 case XenbusStateClosed:
1393 mtx_lock(&xnb->sc_lock);
1395 mtx_unlock(&xnb->sc_lock);
1396 if (frontend_state == XenbusStateClosed)
1397 xenbus_set_state(xnb->dev, XenbusStateClosed);
1400 xenbus_dev_fatal(xnb->dev, EINVAL, "saw state %d at frontend",
1407 /*---------------------------- Request Processing ----------------------------*/
1409 * Interrupt handler bound to the shared ring's event channel.
1410 * Entry point for the xennet transmit path in netback
1411 * Transfers packets from the Xen ring to the host's generic networking stack
1413 * \param arg Callback argument registerd during event channel
1414 * binding - the xnb_softc for this instance.
1419 struct xnb_softc *xnb;
1421 netif_tx_back_ring_t *txb;
1422 RING_IDX req_prod_local;
1424 xnb = (struct xnb_softc *)arg;
1426 txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring;
1428 mtx_lock(&xnb->tx_lock);
1431 req_prod_local = txb->sring->req_prod;
1438 err = xnb_recv(txb, xnb->otherend_id, &mbufc, ifp,
1440 if (err || (mbufc == NULL))
1443 /* Send the packet to the generic network stack */
1444 (*xnb->xnb_ifp->if_input)(xnb->xnb_ifp, mbufc);
1447 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(txb, notify);
1449 xen_intr_signal(xnb->xen_intr_handle);
1451 txb->sring->req_event = txb->req_cons + 1;
1453 } while (txb->sring->req_prod != req_prod_local) ;
1454 mtx_unlock(&xnb->tx_lock);
1461 * Build a struct xnb_pkt based on netif_tx_request's from a netif tx ring.
1462 * Will read exactly 0 or 1 packets from the ring; never a partial packet.
1463 * \param[out] pkt The returned packet. If there is an error building
1464 * the packet, pkt.list_len will be set to 0.
1465 * \param[in] tx_ring Pointer to the Ring that is the input to this function
1466 * \param[in] start The ring index of the first potential request
1467 * \return The number of requests consumed to build this packet
1470 xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring,
1476 * 2) Read the first request of the packet
1477 * 3) Read the extras
1479 * 5) Loop on the remainder of the packet
1480 * 6) Finalize pkt (stuff like car_size and list_len)
1483 int discard = 0; /* whether to discard the packet */
1484 int more_data = 0; /* there are more request past the last one */
1485 uint16_t cdr_size = 0; /* accumulated size of requests 2 through n */
1487 xnb_pkt_initialize(pkt);
1489 /* Read the first request */
1490 if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) {
1491 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx);
1492 pkt->size = tx->size;
1493 pkt->flags = tx->flags & ~NETTXF_more_data;
1494 more_data = tx->flags & NETTXF_more_data;
1500 /* Read the extra info */
1501 if ((pkt->flags & NETTXF_extra_info) &&
1502 RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) {
1503 netif_extra_info_t *ext =
1504 (netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx);
1505 pkt->extra.type = ext->type;
1506 switch (pkt->extra.type) {
1507 case XEN_NETIF_EXTRA_TYPE_GSO:
1508 pkt->extra.u.gso = ext->u.gso;
1512 * The reference Linux netfront driver will
1513 * never set any other extra.type. So we don't
1514 * know what to do with it. Let's print an
1515 * error, then consume and discard the packet
1517 printf("xnb(%s:%d): Unknown extra info type %d."
1518 " Discarding packet\n",
1519 __func__, __LINE__, pkt->extra.type);
1520 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring,
1522 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring,
1528 pkt->extra.flags = ext->flags;
1529 if (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE) {
1531 * The reference linux netfront driver never sets this
1532 * flag (nor does any other known netfront). So we
1533 * will discard the packet.
1535 printf("xnb(%s:%d): Request sets "
1536 "XEN_NETIF_EXTRA_FLAG_MORE, but we can't handle "
1537 "that\n", __func__, __LINE__);
1538 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start));
1539 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx));
1546 /* Set cdr. If there is not more data, cdr is invalid */
1549 /* Loop on remainder of packet */
1550 while (more_data && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) {
1551 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx);
1553 cdr_size += tx->size;
1554 if (tx->flags & ~NETTXF_more_data) {
1555 /* There should be no other flags set at this point */
1556 printf("xnb(%s:%d): Request sets unknown flags %d "
1557 "after the 1st request in the packet.\n",
1558 __func__, __LINE__, tx->flags);
1559 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start));
1560 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx));
1563 more_data = tx->flags & NETTXF_more_data;
1567 /* Finalize packet */
1568 if (more_data != 0) {
1569 /* The ring ran out of requests before finishing the packet */
1570 xnb_pkt_invalidate(pkt);
1571 idx = start; /* tell caller that we consumed no requests */
1573 /* Calculate car_size */
1574 pkt->car_size = pkt->size - cdr_size;
1577 xnb_pkt_invalidate(pkt);
1585 * Respond to all the requests that constituted pkt. Builds the responses and
1586 * writes them to the ring, but doesn't push them to the shared ring.
1587 * \param[in] pkt the packet that needs a response
1588 * \param[in] error true if there was an error handling the packet, such
1589 * as in the hypervisor copy op or mbuf allocation
1590 * \param[out] ring Responses go here
1593 xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring,
1598 * 1) Respond to the first request
1599 * 2) Respond to the extra info reques
1600 * Loop through every remaining request in the packet, generating
1601 * responses that copy those requests' ids and sets the status
1604 netif_tx_request_t *tx;
1605 netif_tx_response_t *rsp;
1609 status = (xnb_pkt_is_valid(pkt) == 0) || error ?
1610 NETIF_RSP_ERROR : NETIF_RSP_OKAY;
1611 KASSERT((pkt->list_len == 0) || (ring->rsp_prod_pvt == pkt->car),
1612 ("Cannot respond to ring requests out of order"));
1614 if (pkt->list_len >= 1) {
1616 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt);
1618 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
1620 rsp->status = status;
1621 ring->rsp_prod_pvt++;
1623 if (pkt->flags & NETRXF_extra_info) {
1624 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
1625 rsp->status = NETIF_RSP_NULL;
1626 ring->rsp_prod_pvt++;
1630 for (i=0; i < pkt->list_len - 1; i++) {
1632 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt);
1634 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
1636 rsp->status = status;
1637 ring->rsp_prod_pvt++;
1642 * Create an mbuf chain to represent a packet. Initializes all of the headers
1643 * in the mbuf chain, but does not copy the data. The returned chain must be
1644 * free()'d when no longer needed
1645 * \param[in] pkt A packet to model the mbuf chain after
1646 * \return A newly allocated mbuf chain, possibly with clusters attached.
1650 xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp)
1653 * \todo consider using a memory pool for mbufs instead of
1654 * reallocating them for every packet
1656 /** \todo handle extra data */
1659 m = m_getm(NULL, pkt->size, M_NOWAIT, MT_DATA);
1662 m->m_pkthdr.rcvif = ifp;
1663 if (pkt->flags & NETTXF_data_validated) {
1665 * We lie to the host OS and always tell it that the
1666 * checksums are ok, because the packet is unlikely to
1667 * get corrupted going across domains.
1669 m->m_pkthdr.csum_flags = (
1675 m->m_pkthdr.csum_data = 0xffff;
1682 * Build a gnttab_copy table that can be used to copy data from a pkt
1683 * to an mbufc. Does not actually perform the copy. Always uses gref's on
1685 * \param[in] pkt pkt's associated requests form the src for
1686 * the copy operation
1687 * \param[in] mbufc mbufc's storage forms the dest for the copy operation
1688 * \param[out] gnttab Storage for the returned grant table
1689 * \param[in] txb Pointer to the backend ring structure
1690 * \param[in] otherend_id The domain ID of the other end of the copy
1691 * \return The number of gnttab entries filled
1694 xnb_txpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc,
1695 gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb,
1696 domid_t otherend_id)
1699 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */
1700 int gnt_idx = 0; /* index into grant table */
1701 RING_IDX r_idx = pkt->car; /* index into tx ring buffer */
1702 int r_ofs = 0; /* offset of next data within tx request's data area */
1703 int m_ofs = 0; /* offset of next data within mbuf's data area */
1704 /* size in bytes that still needs to be represented in the table */
1705 uint16_t size_remaining = pkt->size;
1707 while (size_remaining > 0) {
1708 const netif_tx_request_t *txq = RING_GET_REQUEST(txb, r_idx);
1709 const size_t mbuf_space = M_TRAILINGSPACE(mbuf) - m_ofs;
1710 const size_t req_size =
1711 r_idx == pkt->car ? pkt->car_size : txq->size;
1712 const size_t pkt_space = req_size - r_ofs;
1714 * space is the largest amount of data that can be copied in the
1715 * grant table's next entry
1717 const size_t space = MIN(pkt_space, mbuf_space);
1719 /* TODO: handle this error condition without panicking */
1720 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short"));
1722 gnttab[gnt_idx].source.u.ref = txq->gref;
1723 gnttab[gnt_idx].source.domid = otherend_id;
1724 gnttab[gnt_idx].source.offset = txq->offset + r_ofs;
1725 gnttab[gnt_idx].dest.u.gmfn = virt_to_mfn(
1726 mtod(mbuf, vm_offset_t) + m_ofs);
1727 gnttab[gnt_idx].dest.offset = virt_to_offset(
1728 mtod(mbuf, vm_offset_t) + m_ofs);
1729 gnttab[gnt_idx].dest.domid = DOMID_SELF;
1730 gnttab[gnt_idx].len = space;
1731 gnttab[gnt_idx].flags = GNTCOPY_source_gref;
1736 size_remaining -= space;
1737 if (req_size - r_ofs <= 0) {
1738 /* Must move to the next tx request */
1740 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1;
1742 if (M_TRAILINGSPACE(mbuf) - m_ofs <= 0) {
1743 /* Must move to the next mbuf */
1745 mbuf = mbuf->m_next;
1753 * Check the status of the grant copy operations, and update mbufs various
1754 * non-data fields to reflect the data present.
1755 * \param[in,out] mbufc mbuf chain to update. The chain must be valid and of
1756 * the correct length, and data should already be present
1757 * \param[in] gnttab A grant table for a just completed copy op
1758 * \param[in] n_entries The number of valid entries in the grant table
1761 xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab,
1764 struct mbuf *mbuf = mbufc;
1766 size_t total_size = 0;
1768 for (i = 0; i < n_entries; i++) {
1769 KASSERT(gnttab[i].status == GNTST_okay,
1770 ("Some gnttab_copy entry had error status %hd\n",
1773 mbuf->m_len += gnttab[i].len;
1774 total_size += gnttab[i].len;
1775 if (M_TRAILINGSPACE(mbuf) <= 0) {
1776 mbuf = mbuf->m_next;
1779 mbufc->m_pkthdr.len = total_size;
1781 xnb_add_mbuf_cksum(mbufc);
1785 * Dequeue at most one packet from the shared ring
1786 * \param[in,out] txb Netif tx ring. A packet will be removed from it, and
1787 * its private indices will be updated. But the indices
1788 * will not be pushed to the shared ring.
1789 * \param[in] ifnet Interface to which the packet will be sent
1790 * \param[in] otherend Domain ID of the other end of the ring
1791 * \param[out] mbufc The assembled mbuf chain, ready to send to the generic
1793 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make
1794 * this a function parameter so that we will take less
1796 * \return An error code
1799 xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc,
1800 struct ifnet *ifnet, gnttab_copy_table gnttab)
1803 /* number of tx requests consumed to build the last packet */
1808 num_consumed = xnb_ring2pkt(&pkt, txb, txb->req_cons);
1809 if (num_consumed == 0)
1810 return 0; /* Nothing to receive */
1812 /* update statistics independent of errors */
1813 ifnet->if_ipackets++;
1816 * if we got here, then 1 or more requests was consumed, but the packet
1817 * is not necessarily valid.
1819 if (xnb_pkt_is_valid(&pkt) == 0) {
1820 /* got a garbage packet, respond and drop it */
1821 xnb_txpkt2rsp(&pkt, txb, 1);
1822 txb->req_cons += num_consumed;
1823 DPRINTF("xnb_intr: garbage packet, num_consumed=%d\n",
1825 ifnet->if_ierrors++;
1829 *mbufc = xnb_pkt2mbufc(&pkt, ifnet);
1831 if (*mbufc == NULL) {
1833 * Couldn't allocate mbufs. Respond and drop the packet. Do
1834 * not consume the requests
1836 xnb_txpkt2rsp(&pkt, txb, 1);
1837 DPRINTF("xnb_intr: Couldn't allocate mbufs, num_consumed=%d\n",
1839 ifnet->if_iqdrops++;
1843 nr_ents = xnb_txpkt2gnttab(&pkt, *mbufc, gnttab, txb, otherend);
1846 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
1848 KASSERT(hv_ret == 0,
1849 ("HYPERVISOR_grant_table_op returned %d\n", hv_ret));
1850 xnb_update_mbufc(*mbufc, gnttab, nr_ents);
1853 xnb_txpkt2rsp(&pkt, txb, 0);
1854 txb->req_cons += num_consumed;
1859 * Create an xnb_pkt based on the contents of an mbuf chain.
1860 * \param[in] mbufc mbuf chain to transform into a packet
1861 * \param[out] pkt Storage for the newly generated xnb_pkt
1862 * \param[in] start The ring index of the first available slot in the rx
1864 * \param[in] space The number of free slots in the rx ring
1866 * \retval EINVAL mbufc was corrupt or not convertible into a pkt
1867 * \retval EAGAIN There was not enough space in the ring to queue the
1871 xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt,
1872 RING_IDX start, int space)
1877 if ((mbufc == NULL) ||
1878 ( (mbufc->m_flags & M_PKTHDR) == 0) ||
1879 (mbufc->m_pkthdr.len == 0)) {
1880 xnb_pkt_invalidate(pkt);
1885 xnb_pkt_validate(pkt);
1887 pkt->size = mbufc->m_pkthdr.len;
1889 pkt->car_size = mbufc->m_len;
1891 if (mbufc->m_pkthdr.csum_flags & CSUM_TSO) {
1892 pkt->flags |= NETRXF_extra_info;
1893 pkt->extra.u.gso.size = mbufc->m_pkthdr.tso_segsz;
1894 pkt->extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
1895 pkt->extra.u.gso.pad = 0;
1896 pkt->extra.u.gso.features = 0;
1897 pkt->extra.type = XEN_NETIF_EXTRA_TYPE_GSO;
1898 pkt->extra.flags = 0;
1899 pkt->cdr = start + 2;
1901 pkt->cdr = start + 1;
1903 if (mbufc->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA)) {
1905 (NETRXF_csum_blank | NETRXF_data_validated);
1909 * Each ring response can have up to PAGE_SIZE of data.
1910 * Assume that we can defragment the mbuf chain efficiently
1911 * into responses so that each response but the last uses all
1914 pkt->list_len = (pkt->size + PAGE_SIZE - 1) / PAGE_SIZE;
1916 if (pkt->list_len > 1) {
1917 pkt->flags |= NETRXF_more_data;
1920 slots_required = pkt->list_len +
1921 (pkt->flags & NETRXF_extra_info ? 1 : 0);
1922 if (slots_required > space) {
1923 xnb_pkt_invalidate(pkt);
1932 * Build a gnttab_copy table that can be used to copy data from an mbuf chain
1933 * to the frontend's shared buffers. Does not actually perform the copy.
1934 * Always uses gref's on the other end's side.
1935 * \param[in] pkt pkt's associated responses form the dest for the copy
1937 * \param[in] mbufc The source for the copy operation
1938 * \param[out] gnttab Storage for the returned grant table
1939 * \param[in] rxb Pointer to the backend ring structure
1940 * \param[in] otherend_id The domain ID of the other end of the copy
1941 * \return The number of gnttab entries filled
1944 xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc,
1945 gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb,
1946 domid_t otherend_id)
1949 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */
1950 int gnt_idx = 0; /* index into grant table */
1951 RING_IDX r_idx = pkt->car; /* index into rx ring buffer */
1952 int r_ofs = 0; /* offset of next data within rx request's data area */
1953 int m_ofs = 0; /* offset of next data within mbuf's data area */
1954 /* size in bytes that still needs to be represented in the table */
1955 uint16_t size_remaining;
1957 size_remaining = (xnb_pkt_is_valid(pkt) != 0) ? pkt->size : 0;
1959 while (size_remaining > 0) {
1960 const netif_rx_request_t *rxq = RING_GET_REQUEST(rxb, r_idx);
1961 const size_t mbuf_space = mbuf->m_len - m_ofs;
1962 /* Xen shared pages have an implied size of PAGE_SIZE */
1963 const size_t req_size = PAGE_SIZE;
1964 const size_t pkt_space = req_size - r_ofs;
1966 * space is the largest amount of data that can be copied in the
1967 * grant table's next entry
1969 const size_t space = MIN(pkt_space, mbuf_space);
1971 /* TODO: handle this error condition without panicing */
1972 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short"));
1974 gnttab[gnt_idx].dest.u.ref = rxq->gref;
1975 gnttab[gnt_idx].dest.domid = otherend_id;
1976 gnttab[gnt_idx].dest.offset = r_ofs;
1977 gnttab[gnt_idx].source.u.gmfn = virt_to_mfn(
1978 mtod(mbuf, vm_offset_t) + m_ofs);
1979 gnttab[gnt_idx].source.offset = virt_to_offset(
1980 mtod(mbuf, vm_offset_t) + m_ofs);
1981 gnttab[gnt_idx].source.domid = DOMID_SELF;
1982 gnttab[gnt_idx].len = space;
1983 gnttab[gnt_idx].flags = GNTCOPY_dest_gref;
1989 size_remaining -= space;
1990 if (req_size - r_ofs <= 0) {
1991 /* Must move to the next rx request */
1993 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1;
1995 if (mbuf->m_len - m_ofs <= 0) {
1996 /* Must move to the next mbuf */
1998 mbuf = mbuf->m_next;
2006 * Generates responses for all the requests that constituted pkt. Builds
2007 * responses and writes them to the ring, but doesn't push the shared ring
2009 * \param[in] pkt the packet that needs a response
2010 * \param[in] gnttab The grant copy table corresponding to this packet.
2011 * Used to determine how many rsp->netif_rx_response_t's to
2013 * \param[in] n_entries Number of relevant entries in the grant table
2014 * \param[out] ring Responses go here
2015 * \return The number of RX requests that were consumed to generate
2019 xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab,
2020 int n_entries, netif_rx_back_ring_t *ring)
2023 * This code makes the following assumptions:
2024 * * All entries in gnttab set GNTCOPY_dest_gref
2025 * * The entries in gnttab are grouped by their grefs: any two
2026 * entries with the same gref must be adjacent
2030 int n_responses = 0;
2031 grant_ref_t last_gref = GRANT_REF_INVALID;
2034 KASSERT(gnttab != NULL, ("Received a null granttable copy"));
2037 * In the event of an error, we only need to send one response to the
2038 * netfront. In that case, we musn't write any data to the responses
2039 * after the one we send. So we must loop all the way through gnttab
2040 * looking for errors before we generate any responses
2042 * Since we're looping through the grant table anyway, we'll count the
2043 * number of different gref's in it, which will tell us how many
2044 * responses to generate
2046 for (gnt_idx = 0; gnt_idx < n_entries; gnt_idx++) {
2047 int16_t status = gnttab[gnt_idx].status;
2048 if (status != GNTST_okay) {
2050 "Got error %d for hypervisor gnttab_copy status\n",
2055 if (gnttab[gnt_idx].dest.u.ref != last_gref) {
2057 last_gref = gnttab[gnt_idx].dest.u.ref;
2063 netif_rx_response_t *rsp;
2065 id = RING_GET_REQUEST(ring, ring->rsp_prod_pvt)->id;
2066 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
2068 rsp->status = NETIF_RSP_ERROR;
2072 const int has_extra = pkt->flags & NETRXF_extra_info;
2076 for (i = 0; i < n_responses; i++) {
2077 netif_rx_request_t rxq;
2078 netif_rx_response_t *rsp;
2080 r_idx = ring->rsp_prod_pvt + i;
2082 * We copy the structure of rxq instead of making a
2083 * pointer because it shares the same memory as rsp.
2085 rxq = *(RING_GET_REQUEST(ring, r_idx));
2086 rsp = RING_GET_RESPONSE(ring, r_idx);
2087 if (has_extra && (i == 1)) {
2088 netif_extra_info_t *ext =
2089 (netif_extra_info_t*)rsp;
2090 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
2092 ext->u.gso.size = pkt->extra.u.gso.size;
2093 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
2095 ext->u.gso.features = 0;
2098 rsp->status = GNTST_okay;
2101 if (i < pkt->list_len - 1)
2102 rsp->flags |= NETRXF_more_data;
2103 if ((i == 0) && has_extra)
2104 rsp->flags |= NETRXF_extra_info;
2106 (pkt->flags & NETRXF_data_validated)) {
2107 rsp->flags |= NETRXF_data_validated;
2108 rsp->flags |= NETRXF_csum_blank;
2111 for (; gnttab[gnt_idx].dest.u.ref == rxq.gref;
2113 rsp->status += gnttab[gnt_idx].len;
2119 ring->req_cons += n_responses;
2120 ring->rsp_prod_pvt += n_responses;
2125 * Add IP, TCP, and/or UDP checksums to every mbuf in a chain. The first mbuf
2126 * in the chain must start with a struct ether_header.
2128 * XXX This function will perform incorrectly on UDP packets that are split up
2129 * into multiple ethernet frames.
2132 xnb_add_mbuf_cksum(struct mbuf *mbufc)
2134 struct ether_header *eh;
2136 uint16_t ether_type;
2138 eh = mtod(mbufc, struct ether_header*);
2139 ether_type = ntohs(eh->ether_type);
2140 if (ether_type != ETHERTYPE_IP) {
2141 /* Nothing to calculate */
2145 iph = (struct ip*)(eh + 1);
2146 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) {
2148 iph->ip_sum = in_cksum_hdr(iph);
2151 switch (iph->ip_p) {
2153 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) {
2154 size_t tcplen = ntohs(iph->ip_len) - sizeof(struct ip);
2155 struct tcphdr *th = (struct tcphdr*)(iph + 1);
2156 th->th_sum = in_pseudo(iph->ip_src.s_addr,
2157 iph->ip_dst.s_addr, htons(IPPROTO_TCP + tcplen));
2158 th->th_sum = in_cksum_skip(mbufc,
2159 sizeof(struct ether_header) + ntohs(iph->ip_len),
2160 sizeof(struct ether_header) + (iph->ip_hl << 2));
2164 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) {
2165 size_t udplen = ntohs(iph->ip_len) - sizeof(struct ip);
2166 struct udphdr *uh = (struct udphdr*)(iph + 1);
2167 uh->uh_sum = in_pseudo(iph->ip_src.s_addr,
2168 iph->ip_dst.s_addr, htons(IPPROTO_UDP + udplen));
2169 uh->uh_sum = in_cksum_skip(mbufc,
2170 sizeof(struct ether_header) + ntohs(iph->ip_len),
2171 sizeof(struct ether_header) + (iph->ip_hl << 2));
2180 xnb_stop(struct xnb_softc *xnb)
2184 mtx_assert(&xnb->sc_lock, MA_OWNED);
2186 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2187 if_link_state_change(ifp, LINK_STATE_DOWN);
2191 xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2193 struct xnb_softc *xnb = ifp->if_softc;
2195 struct ifreq *ifr = (struct ifreq*) data;
2196 struct ifaddr *ifa = (struct ifaddr*)data;
2202 mtx_lock(&xnb->sc_lock);
2203 if (ifp->if_flags & IFF_UP) {
2204 xnb_ifinit_locked(xnb);
2206 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2211 * Note: netfront sets a variable named xn_if_flags
2212 * here, but that variable is never read
2214 mtx_unlock(&xnb->sc_lock);
2219 mtx_lock(&xnb->sc_lock);
2220 if (ifa->ifa_addr->sa_family == AF_INET) {
2221 ifp->if_flags |= IFF_UP;
2222 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2223 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING |
2225 if_link_state_change(ifp,
2227 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2228 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2229 if_link_state_change(ifp,
2232 arp_ifinit(ifp, ifa);
2233 mtx_unlock(&xnb->sc_lock);
2235 mtx_unlock(&xnb->sc_lock);
2237 error = ether_ioctl(ifp, cmd, data);
2243 mtx_lock(&xnb->sc_lock);
2244 if (ifr->ifr_reqcap & IFCAP_TXCSUM) {
2245 ifp->if_capenable |= IFCAP_TXCSUM;
2246 ifp->if_hwassist |= XNB_CSUM_FEATURES;
2248 ifp->if_capenable &= ~(IFCAP_TXCSUM);
2249 ifp->if_hwassist &= ~(XNB_CSUM_FEATURES);
2251 if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) {
2252 ifp->if_capenable |= IFCAP_RXCSUM;
2254 ifp->if_capenable &= ~(IFCAP_RXCSUM);
2257 * TODO enable TSO4 and LRO once we no longer need
2258 * to calculate checksums in software
2261 if (ifr->if_reqcap |= IFCAP_TSO4) {
2262 if (IFCAP_TXCSUM & ifp->if_capenable) {
2263 printf("xnb: Xen netif requires that "
2264 "TXCSUM be enabled in order "
2268 ifp->if_capenable |= IFCAP_TSO4;
2269 ifp->if_hwassist |= CSUM_TSO;
2272 ifp->if_capenable &= ~(IFCAP_TSO4);
2273 ifp->if_hwassist &= ~(CSUM_TSO);
2275 if (ifr->ifreqcap |= IFCAP_LRO) {
2276 ifp->if_capenable |= IFCAP_LRO;
2278 ifp->if_capenable &= ~(IFCAP_LRO);
2281 mtx_unlock(&xnb->sc_lock);
2284 ifp->if_mtu = ifr->ifr_mtu;
2285 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2292 error = ifmedia_ioctl(ifp, ifr, &xnb->sc_media, cmd);
2295 error = ether_ioctl(ifp, cmd, data);
2302 xnb_start_locked(struct ifnet *ifp)
2304 netif_rx_back_ring_t *rxb;
2305 struct xnb_softc *xnb;
2307 RING_IDX req_prod_local;
2309 xnb = ifp->if_softc;
2310 rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring;
2316 int out_of_space = 0;
2318 req_prod_local = rxb->sring->req_prod;
2323 IF_DEQUEUE(&ifp->if_snd, mbufc);
2326 error = xnb_send(rxb, xnb->otherend_id, mbufc,
2331 * Insufficient space in the ring.
2332 * Requeue pkt and send when space is
2335 IF_PREPEND(&ifp->if_snd, mbufc);
2337 * Perhaps the frontend missed an IRQ
2338 * and went to sleep. Notify it to wake
2345 /* OS gave a corrupt packet. Drop it.*/
2349 /* Send succeeded, or packet had error.
2350 * Free the packet */
2356 if (out_of_space != 0)
2360 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(rxb, notify);
2361 if ((notify != 0) || (out_of_space != 0))
2362 xen_intr_signal(xnb->xen_intr_handle);
2363 rxb->sring->req_event = req_prod_local + 1;
2365 } while (rxb->sring->req_prod != req_prod_local) ;
2369 * Sends one packet to the ring. Blocks until the packet is on the ring
2370 * \param[in] mbufc Contains one packet to send. Caller must free
2371 * \param[in,out] rxb The packet will be pushed onto this ring, but the
2372 * otherend will not be notified.
2373 * \param[in] otherend The domain ID of the other end of the connection
2374 * \retval EAGAIN The ring did not have enough space for the packet.
2375 * The ring has not been modified
2376 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make
2377 * this a function parameter so that we will take less
2379 * \retval EINVAL mbufc was corrupt or not convertible into a pkt
2382 xnb_send(netif_rx_back_ring_t *ring, domid_t otherend, const struct mbuf *mbufc,
2383 gnttab_copy_table gnttab)
2386 int error, n_entries, n_reqs;
2389 space = ring->sring->req_prod - ring->req_cons;
2390 error = xnb_mbufc2pkt(mbufc, &pkt, ring->rsp_prod_pvt, space);
2393 n_entries = xnb_rxpkt2gnttab(&pkt, mbufc, gnttab, ring, otherend);
2394 if (n_entries != 0) {
2395 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
2397 KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n",
2401 n_reqs = xnb_rxpkt2rsp(&pkt, gnttab, n_entries, ring);
2407 xnb_start(struct ifnet *ifp)
2409 struct xnb_softc *xnb;
2411 xnb = ifp->if_softc;
2412 mtx_lock(&xnb->rx_lock);
2413 xnb_start_locked(ifp);
2414 mtx_unlock(&xnb->rx_lock);
2417 /* equivalent of network_open() in Linux */
2419 xnb_ifinit_locked(struct xnb_softc *xnb)
2425 mtx_assert(&xnb->sc_lock, MA_OWNED);
2427 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2432 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2433 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2434 if_link_state_change(ifp, LINK_STATE_UP);
2439 xnb_ifinit(void *xsc)
2441 struct xnb_softc *xnb = xsc;
2443 mtx_lock(&xnb->sc_lock);
2444 xnb_ifinit_locked(xnb);
2445 mtx_unlock(&xnb->sc_lock);
2450 * Read the 'mac' node at the given device's node in the store, and parse that
2451 * as colon-separated octets, placing result the given mac array. mac must be
2452 * a preallocated array of length ETHER_ADDR_LEN ETH_ALEN (as declared in
2454 * Return 0 on success, or errno on error.
2457 xen_net_read_mac(device_t dev, uint8_t mac[])
2459 char *s, *e, *macstr;
2464 path = xenbus_get_node(dev);
2465 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
2467 xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
2470 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2471 mac[i] = strtoul(s, &e, 16);
2472 if (s == e || (e[0] != ':' && e[0] != 0)) {
2478 free(macstr, M_XENBUS);
2485 * Callback used by the generic networking code to tell us when our carrier
2486 * state has changed. Since we don't have a physical carrier, we don't care
2489 xnb_ifmedia_upd(struct ifnet *ifp)
2495 * Callback used by the generic networking code to ask us what our carrier
2496 * state is. Since we don't have a physical carrier, this is very simple
2499 xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2501 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2502 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2506 /*---------------------------- NewBus Registration ---------------------------*/
2507 static device_method_t xnb_methods[] = {
2508 /* Device interface */
2509 DEVMETHOD(device_probe, xnb_probe),
2510 DEVMETHOD(device_attach, xnb_attach),
2511 DEVMETHOD(device_detach, xnb_detach),
2512 DEVMETHOD(device_shutdown, bus_generic_shutdown),
2513 DEVMETHOD(device_suspend, xnb_suspend),
2514 DEVMETHOD(device_resume, xnb_resume),
2516 /* Xenbus interface */
2517 DEVMETHOD(xenbus_otherend_changed, xnb_frontend_changed),
2522 static driver_t xnb_driver = {
2525 sizeof(struct xnb_softc),
2527 devclass_t xnb_devclass;
2529 DRIVER_MODULE(xnb, xenbusb_back, xnb_driver, xnb_devclass, 0, 0);
2532 /*-------------------------- Unit Tests -------------------------------------*/
2534 #include "netback_unit_tests.c"