]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/xen/blkback/blkback.c
xen: allow limiting the amount of duplicated pending xenstore watches
[FreeBSD/FreeBSD.git] / sys / dev / xen / blkback / blkback.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009-2012 Spectra Logic Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon
16  *    including a substantially similar Disclaimer requirement for further
17  *    binary redistribution.
18  *
19  * NO WARRANTY
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
28  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGES.
31  *
32  * Authors: Justin T. Gibbs     (Spectra Logic Corporation)
33  *          Ken Merry           (Spectra Logic Corporation)
34  */
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 /**
39  * \file blkback.c
40  *
41  * \brief Device driver supporting the vending of block storage from
42  *        a FreeBSD domain to other domains.
43  */
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49
50 #include <sys/bio.h>
51 #include <sys/bus.h>
52 #include <sys/conf.h>
53 #include <sys/devicestat.h>
54 #include <sys/disk.h>
55 #include <sys/fcntl.h>
56 #include <sys/filedesc.h>
57 #include <sys/kdb.h>
58 #include <sys/module.h>
59 #include <sys/namei.h>
60 #include <sys/proc.h>
61 #include <sys/rman.h>
62 #include <sys/taskqueue.h>
63 #include <sys/types.h>
64 #include <sys/vnode.h>
65 #include <sys/mount.h>
66 #include <sys/sysctl.h>
67 #include <sys/bitstring.h>
68 #include <sys/sdt.h>
69
70 #include <geom/geom.h>
71
72 #include <machine/_inttypes.h>
73
74 #include <vm/vm.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_kern.h>
77
78 #include <xen/xen-os.h>
79 #include <xen/blkif.h>
80 #include <xen/gnttab.h>
81 #include <xen/xen_intr.h>
82
83 #include <xen/interface/event_channel.h>
84 #include <xen/interface/grant_table.h>
85
86 #include <xen/xenbus/xenbusvar.h>
87
88 /*--------------------------- Compile-time Tunables --------------------------*/
89 /**
90  * The maximum number of shared memory ring pages we will allow in a
91  * negotiated block-front/back communication channel.  Allow enough
92  * ring space for all requests to be XBB_MAX_REQUEST_SIZE'd.
93  */
94 #define XBB_MAX_RING_PAGES              32
95
96 /**
97  * The maximum number of outstanding request blocks (request headers plus
98  * additional segment blocks) we will allow in a negotiated block-front/back
99  * communication channel.
100  */
101 #define XBB_MAX_REQUESTS                                        \
102         __CONST_RING_SIZE(blkif, PAGE_SIZE * XBB_MAX_RING_PAGES)
103
104 /**
105  * \brief Define to force all I/O to be performed on memory owned by the
106  *        backend device, with a copy-in/out to the remote domain's memory.
107  *
108  * \note  This option is currently required when this driver's domain is
109  *        operating in HVM mode on a system using an IOMMU.
110  *
111  * This driver uses Xen's grant table API to gain access to the memory of
112  * the remote domains it serves.  When our domain is operating in PV mode,
113  * the grant table mechanism directly updates our domain's page table entries
114  * to point to the physical pages of the remote domain.  This scheme guarantees
115  * that blkback and the backing devices it uses can safely perform DMA
116  * operations to satisfy requests.  In HVM mode, Xen may use a HW IOMMU to
117  * insure that our domain cannot DMA to pages owned by another domain.  As
118  * of Xen 4.0, IOMMU mappings for HVM guests are not updated via the grant
119  * table API.  For this reason, in HVM mode, we must bounce all requests into
120  * memory that is mapped into our domain at domain startup and thus has
121  * valid IOMMU mappings.
122  */
123 #define XBB_USE_BOUNCE_BUFFERS
124
125 /**
126  * \brief Define to enable rudimentary request logging to the console.
127  */
128 #undef XBB_DEBUG
129
130 /*---------------------------------- Macros ----------------------------------*/
131 /**
132  * Custom malloc type for all driver allocations.
133  */
134 static MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data");
135
136 #ifdef XBB_DEBUG
137 #define DPRINTF(fmt, args...)                                   \
138     printf("xbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
139 #else
140 #define DPRINTF(fmt, args...) do {} while(0)
141 #endif
142
143 /**
144  * The maximum mapped region size per request we will allow in a negotiated
145  * block-front/back communication channel.
146  * Use old default of MAXPHYS == 128K.
147  */
148 #define XBB_MAX_REQUEST_SIZE                                    \
149         MIN(128 * 1024, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE)
150
151 /**
152  * The maximum number of segments (within a request header and accompanying
153  * segment blocks) per request we will allow in a negotiated block-front/back
154  * communication channel.
155  */
156 #define XBB_MAX_SEGMENTS_PER_REQUEST                            \
157         (MIN(UIO_MAXIOV,                                        \
158              MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST,                \
159                  (XBB_MAX_REQUEST_SIZE / PAGE_SIZE) + 1)))
160
161 /**
162  * The maximum number of ring pages that we can allow per request list.
163  * We limit this to the maximum number of segments per request, because
164  * that is already a reasonable number of segments to aggregate.  This
165  * number should never be smaller than XBB_MAX_SEGMENTS_PER_REQUEST,
166  * because that would leave situations where we can't dispatch even one
167  * large request.
168  */
169 #define XBB_MAX_SEGMENTS_PER_REQLIST XBB_MAX_SEGMENTS_PER_REQUEST
170
171 /*--------------------------- Forward Declarations ---------------------------*/
172 struct xbb_softc;
173 struct xbb_xen_req;
174
175 static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt,
176                               ...) __attribute__((format(printf, 3, 4)));
177 static int  xbb_shutdown(struct xbb_softc *xbb);
178
179 /*------------------------------ Data Structures -----------------------------*/
180
181 STAILQ_HEAD(xbb_xen_req_list, xbb_xen_req);
182
183 typedef enum {
184         XBB_REQLIST_NONE        = 0x00,
185         XBB_REQLIST_MAPPED      = 0x01
186 } xbb_reqlist_flags;
187
188 struct xbb_xen_reqlist {
189         /**
190          * Back reference to the parent block back instance for this
191          * request.  Used during bio_done handling.
192          */
193         struct xbb_softc        *xbb;
194
195         /**
196          * BLKIF_OP code for this request.
197          */
198         int                      operation;
199
200         /**
201          * Set to BLKIF_RSP_* to indicate request status.
202          *
203          * This field allows an error status to be recorded even if the
204          * delivery of this status must be deferred.  Deferred reporting
205          * is necessary, for example, when an error is detected during
206          * completion processing of one bio when other bios for this
207          * request are still outstanding.
208          */
209         int                      status;
210
211         /**
212          * Number of 512 byte sectors not transferred.
213          */
214         int                      residual_512b_sectors;
215
216         /**
217          * Starting sector number of the first request in the list.
218          */
219         off_t                    starting_sector_number;
220
221         /**
222          * If we're going to coalesce, the next contiguous sector would be
223          * this one.
224          */
225         off_t                    next_contig_sector;
226
227         /**
228          * Number of child requests in the list.
229          */
230         int                      num_children;
231
232         /**
233          * Number of I/O requests still pending on the backend.
234          */
235         int                      pendcnt;
236
237         /**
238          * Total number of segments for requests in the list.
239          */
240         int                      nr_segments;
241
242         /**
243          * Flags for this particular request list.
244          */
245         xbb_reqlist_flags        flags;
246
247         /**
248          * Kernel virtual address space reserved for this request
249          * list structure and used to map the remote domain's pages for
250          * this I/O, into our domain's address space.
251          */
252         uint8_t                 *kva;
253
254         /**
255          * Base, pseudo-physical address, corresponding to the start
256          * of this request's kva region.
257          */
258         uint64_t                 gnt_base;
259
260 #ifdef XBB_USE_BOUNCE_BUFFERS
261         /**
262          * Pre-allocated domain local memory used to proxy remote
263          * domain memory during I/O operations.
264          */
265         uint8_t                 *bounce;
266 #endif
267
268         /**
269          * Array of grant handles (one per page) used to map this request.
270          */
271         grant_handle_t          *gnt_handles;
272
273         /**
274          * Device statistics request ordering type (ordered or simple).
275          */
276         devstat_tag_type         ds_tag_type;
277
278         /**
279          * Device statistics request type (read, write, no_data).
280          */
281         devstat_trans_flags      ds_trans_type;
282
283         /**
284          * The start time for this request.
285          */
286         struct bintime           ds_t0;
287
288         /**
289          * Linked list of contiguous requests with the same operation type.
290          */
291         struct xbb_xen_req_list  contig_req_list;
292
293         /**
294          * Linked list links used to aggregate idle requests in the
295          * request list free pool (xbb->reqlist_free_stailq) and pending
296          * requests waiting for execution (xbb->reqlist_pending_stailq).
297          */
298         STAILQ_ENTRY(xbb_xen_reqlist) links;
299 };
300
301 STAILQ_HEAD(xbb_xen_reqlist_list, xbb_xen_reqlist);
302
303 /**
304  * \brief Object tracking an in-flight I/O from a Xen VBD consumer.
305  */
306 struct xbb_xen_req {
307         /**
308          * Linked list links used to aggregate requests into a reqlist
309          * and to store them in the request free pool.
310          */
311         STAILQ_ENTRY(xbb_xen_req) links;
312
313         /**
314          * The remote domain's identifier for this I/O request.
315          */
316         uint64_t                  id;
317
318         /**
319          * The number of pages currently mapped for this request.
320          */
321         int                       nr_pages;
322
323         /**
324          * The number of 512 byte sectors comprising this requests.
325          */
326         int                       nr_512b_sectors;
327
328         /**
329          * BLKIF_OP code for this request.
330          */
331         int                       operation;
332
333         /**
334          * Storage used for non-native ring requests.
335          */
336         blkif_request_t          ring_req_storage;
337
338         /**
339          * Pointer to the Xen request in the ring.
340          */
341         blkif_request_t         *ring_req;
342
343         /**
344          * Consumer index for this request.
345          */
346         RING_IDX                 req_ring_idx;
347
348         /**
349          * The start time for this request.
350          */
351         struct bintime           ds_t0;
352
353         /**
354          * Pointer back to our parent request list.
355          */
356         struct xbb_xen_reqlist  *reqlist;
357 };
358 SLIST_HEAD(xbb_xen_req_slist, xbb_xen_req);
359
360 /**
361  * \brief Configuration data for the shared memory request ring
362  *        used to communicate with the front-end client of this
363  *        this driver.
364  */
365 struct xbb_ring_config {
366         /** KVA address where ring memory is mapped. */
367         vm_offset_t     va;
368
369         /** The pseudo-physical address where ring memory is mapped.*/
370         uint64_t        gnt_addr;
371
372         /**
373          * Grant table handles, one per-ring page, returned by the
374          * hyperpervisor upon mapping of the ring and required to
375          * unmap it when a connection is torn down.
376          */
377         grant_handle_t  handle[XBB_MAX_RING_PAGES];
378
379         /**
380          * The device bus address returned by the hypervisor when
381          * mapping the ring and required to unmap it when a connection
382          * is torn down.
383          */
384         uint64_t        bus_addr[XBB_MAX_RING_PAGES];
385
386         /** The number of ring pages mapped for the current connection. */
387         u_int           ring_pages;
388
389         /**
390          * The grant references, one per-ring page, supplied by the
391          * front-end, allowing us to reference the ring pages in the
392          * front-end's domain and to map these pages into our own domain.
393          */
394         grant_ref_t     ring_ref[XBB_MAX_RING_PAGES];
395
396         /** The interrupt driven even channel used to signal ring events. */
397         evtchn_port_t   evtchn;
398 };
399
400 /**
401  * Per-instance connection state flags.
402  */
403 typedef enum
404 {
405         /**
406          * The front-end requested a read-only mount of the
407          * back-end device/file.
408          */
409         XBBF_READ_ONLY         = 0x01,
410
411         /** Communication with the front-end has been established. */
412         XBBF_RING_CONNECTED    = 0x02,
413
414         /**
415          * Front-end requests exist in the ring and are waiting for
416          * xbb_xen_req objects to free up.
417          */
418         XBBF_RESOURCE_SHORTAGE = 0x04,
419
420         /** Connection teardown in progress. */
421         XBBF_SHUTDOWN          = 0x08,
422
423         /** A thread is already performing shutdown processing. */
424         XBBF_IN_SHUTDOWN       = 0x10
425 } xbb_flag_t;
426
427 /** Backend device type.  */
428 typedef enum {
429         /** Backend type unknown. */
430         XBB_TYPE_NONE           = 0x00,
431
432         /**
433          * Backend type disk (access via cdev switch
434          * strategy routine).
435          */
436         XBB_TYPE_DISK           = 0x01,
437
438         /** Backend type file (access vnode operations.). */
439         XBB_TYPE_FILE           = 0x02
440 } xbb_type;
441
442 /**
443  * \brief Structure used to memoize information about a per-request
444  *        scatter-gather list.
445  *
446  * The chief benefit of using this data structure is it avoids having
447  * to reparse the possibly discontiguous S/G list in the original
448  * request.  Due to the way that the mapping of the memory backing an
449  * I/O transaction is handled by Xen, a second pass is unavoidable.
450  * At least this way the second walk is a simple array traversal.
451  *
452  * \note A single Scatter/Gather element in the block interface covers
453  *       at most 1 machine page.  In this context a sector (blkif
454  *       nomenclature, not what I'd choose) is a 512b aligned unit
455  *       of mapping within the machine page referenced by an S/G
456  *       element.
457  */
458 struct xbb_sg {
459         /** The number of 512b data chunks mapped in this S/G element. */
460         int16_t nsect;
461
462         /**
463          * The index (0 based) of the first 512b data chunk mapped
464          * in this S/G element.
465          */
466         uint8_t first_sect;
467
468         /**
469          * The index (0 based) of the last 512b data chunk mapped
470          * in this S/G element.
471          */
472         uint8_t last_sect;
473 };
474
475 /**
476  * Character device backend specific configuration data.
477  */
478 struct xbb_dev_data {
479         /** Cdev used for device backend access.  */
480         struct cdev   *cdev;
481
482         /** Cdev switch used for device backend access.  */
483         struct cdevsw *csw;
484
485         /** Used to hold a reference on opened cdev backend devices. */
486         int            dev_ref;
487 };
488
489 /**
490  * File backend specific configuration data.
491  */
492 struct xbb_file_data {
493         /** Credentials to use for vnode backed (file based) I/O. */
494         struct ucred   *cred;
495
496         /**
497          * \brief Array of io vectors used to process file based I/O.
498          *
499          * Only a single file based request is outstanding per-xbb instance,
500          * so we only need one of these.
501          */
502         struct iovec    xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST];
503 #ifdef XBB_USE_BOUNCE_BUFFERS
504
505         /**
506          * \brief Array of io vectors used to handle bouncing of file reads.
507          *
508          * Vnode operations are free to modify uio data during their
509          * exectuion.  In the case of a read with bounce buffering active,
510          * we need some of the data from the original uio in order to
511          * bounce-out the read data.  This array serves as the temporary
512          * storage for this saved data.
513          */
514         struct iovec    saved_xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST];
515
516         /**
517          * \brief Array of memoized bounce buffer kva offsets used
518          *        in the file based backend.
519          *
520          * Due to the way that the mapping of the memory backing an
521          * I/O transaction is handled by Xen, a second pass through
522          * the request sg elements is unavoidable. We memoize the computed
523          * bounce address here to reduce the cost of the second walk.
524          */
525         void            *xiovecs_vaddr[XBB_MAX_SEGMENTS_PER_REQLIST];
526 #endif /* XBB_USE_BOUNCE_BUFFERS */
527 };
528
529 /**
530  * Collection of backend type specific data.
531  */
532 union xbb_backend_data {
533         struct xbb_dev_data  dev;
534         struct xbb_file_data file;
535 };
536
537 /**
538  * Function signature of backend specific I/O handlers.
539  */
540 typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb,
541                               struct xbb_xen_reqlist *reqlist, int operation,
542                               int flags);
543
544 /**
545  * Per-instance configuration data.
546  */
547 struct xbb_softc {
548         /**
549          * Task-queue used to process I/O requests.
550          */
551         struct taskqueue         *io_taskqueue;
552
553         /**
554          * Single "run the request queue" task enqueued
555          * on io_taskqueue.
556          */
557         struct task               io_task;
558
559         /** Device type for this instance. */
560         xbb_type                  device_type;
561
562         /** NewBus device corresponding to this instance. */
563         device_t                  dev;
564
565         /** Backend specific dispatch routine for this instance. */
566         xbb_dispatch_t            dispatch_io;
567
568         /** The number of requests outstanding on the backend device/file. */
569         int                       active_request_count;
570
571         /** Free pool of request tracking structures. */
572         struct xbb_xen_req_list   request_free_stailq;
573
574         /** Array, sized at connection time, of request tracking structures. */
575         struct xbb_xen_req       *requests;
576
577         /** Free pool of request list structures. */
578         struct xbb_xen_reqlist_list reqlist_free_stailq;
579
580         /** List of pending request lists awaiting execution. */
581         struct xbb_xen_reqlist_list reqlist_pending_stailq;
582
583         /** Array, sized at connection time, of request list structures. */
584         struct xbb_xen_reqlist   *request_lists;
585
586         /**
587          * Global pool of kva used for mapping remote domain ring
588          * and I/O transaction data.
589          */
590         vm_offset_t               kva;
591
592         /** Pseudo-physical address corresponding to kva. */
593         uint64_t                  gnt_base_addr;
594
595         /** The size of the global kva pool. */
596         int                       kva_size;
597
598         /** The size of the KVA area used for request lists. */
599         int                       reqlist_kva_size;
600
601         /** The number of pages of KVA used for request lists */
602         int                       reqlist_kva_pages;
603
604         /** Bitmap of free KVA pages */
605         bitstr_t                 *kva_free;
606
607         /**
608          * \brief Cached value of the front-end's domain id.
609          * 
610          * This value is used at once for each mapped page in
611          * a transaction.  We cache it to avoid incuring the
612          * cost of an ivar access every time this is needed.
613          */
614         domid_t                   otherend_id;
615
616         /**
617          * \brief The blkif protocol abi in effect.
618          *
619          * There are situations where the back and front ends can
620          * have a different, native abi (e.g. intel x86_64 and
621          * 32bit x86 domains on the same machine).  The back-end
622          * always accommodates the front-end's native abi.  That
623          * value is pulled from the XenStore and recorded here.
624          */
625         int                       abi;
626
627         /**
628          * \brief The maximum number of requests and request lists allowed
629          *        to be in flight at a time.
630          *
631          * This value is negotiated via the XenStore.
632          */
633         u_int                     max_requests;
634
635         /**
636          * \brief The maximum number of segments (1 page per segment)
637          *        that can be mapped by a request.
638          *
639          * This value is negotiated via the XenStore.
640          */
641         u_int                     max_request_segments;
642
643         /**
644          * \brief Maximum number of segments per request list.
645          *
646          * This value is derived from and will generally be larger than
647          * max_request_segments.
648          */
649         u_int                     max_reqlist_segments;
650
651         /**
652          * The maximum size of any request to this back-end
653          * device.
654          *
655          * This value is negotiated via the XenStore.
656          */
657         u_int                     max_request_size;
658
659         /**
660          * The maximum size of any request list.  This is derived directly
661          * from max_reqlist_segments.
662          */
663         u_int                     max_reqlist_size;
664
665         /** Various configuration and state bit flags. */
666         xbb_flag_t                flags;
667
668         /** Ring mapping and interrupt configuration data. */
669         struct xbb_ring_config    ring_config;
670
671         /** Runtime, cross-abi safe, structures for ring access. */
672         blkif_back_rings_t        rings;
673
674         /** IRQ mapping for the communication ring event channel. */
675         xen_intr_handle_t         xen_intr_handle;
676
677         /**
678          * \brief Backend access mode flags (e.g. write, or read-only).
679          *
680          * This value is passed to us by the front-end via the XenStore.
681          */
682         char                     *dev_mode;
683
684         /**
685          * \brief Backend device type (e.g. "disk", "cdrom", "floppy").
686          *
687          * This value is passed to us by the front-end via the XenStore.
688          * Currently unused.
689          */
690         char                     *dev_type;
691
692         /**
693          * \brief Backend device/file identifier.
694          *
695          * This value is passed to us by the front-end via the XenStore.
696          * We expect this to be a POSIX path indicating the file or
697          * device to open.
698          */
699         char                     *dev_name;
700
701         /**
702          * Vnode corresponding to the backend device node or file
703          * we are acessing.
704          */
705         struct vnode             *vn;
706
707         union xbb_backend_data    backend;
708
709         /** The native sector size of the backend. */
710         u_int                     sector_size;
711
712         /** log2 of sector_size.  */
713         u_int                     sector_size_shift;
714
715         /** Size in bytes of the backend device or file.  */
716         off_t                     media_size;
717
718         /**
719          * \brief media_size expressed in terms of the backend native
720          *        sector size.
721          *
722          * (e.g. xbb->media_size >> xbb->sector_size_shift).
723          */
724         uint64_t                  media_num_sectors;
725
726         /**
727          * \brief Array of memoized scatter gather data computed during the
728          *        conversion of blkif ring requests to internal xbb_xen_req
729          *        structures.
730          *
731          * Ring processing is serialized so we only need one of these.
732          */
733         struct xbb_sg             xbb_sgs[XBB_MAX_SEGMENTS_PER_REQLIST];
734
735         /**
736          * Temporary grant table map used in xbb_dispatch_io().  When
737          * XBB_MAX_SEGMENTS_PER_REQLIST gets large, keeping this on the
738          * stack could cause a stack overflow.
739          */
740         struct gnttab_map_grant_ref   maps[XBB_MAX_SEGMENTS_PER_REQLIST];
741
742         /** Mutex protecting per-instance data. */
743         struct mtx                lock;
744
745         /**
746          * Resource representing allocated physical address space
747          * associated with our per-instance kva region.
748          */
749         struct resource          *pseudo_phys_res;
750
751         /** Resource id for allocated physical address space. */
752         int                       pseudo_phys_res_id;
753
754         /**
755          * I/O statistics from BlockBack dispatch down.  These are
756          * coalesced requests, and we start them right before execution.
757          */
758         struct devstat           *xbb_stats;
759
760         /**
761          * I/O statistics coming into BlockBack.  These are the requests as
762          * we get them from BlockFront.  They are started as soon as we
763          * receive a request, and completed when the I/O is complete.
764          */
765         struct devstat           *xbb_stats_in;
766
767         /** Disable sending flush to the backend */
768         int                       disable_flush;
769
770         /** Send a real flush for every N flush requests */
771         int                       flush_interval;
772
773         /** Count of flush requests in the interval */
774         int                       flush_count;
775
776         /** Don't coalesce requests if this is set */
777         int                       no_coalesce_reqs;
778
779         /** Number of requests we have received */
780         uint64_t                  reqs_received;
781
782         /** Number of requests we have completed*/
783         uint64_t                  reqs_completed;
784
785         /** Number of requests we queued but not pushed*/
786         uint64_t                  reqs_queued_for_completion;
787
788         /** Number of requests we completed with an error status*/
789         uint64_t                  reqs_completed_with_error;
790
791         /** How many forced dispatches (i.e. without coalescing) have happened */
792         uint64_t                  forced_dispatch;
793
794         /** How many normal dispatches have happened */
795         uint64_t                  normal_dispatch;
796
797         /** How many total dispatches have happened */
798         uint64_t                  total_dispatch;
799
800         /** How many times we have run out of KVA */
801         uint64_t                  kva_shortages;
802
803         /** How many times we have run out of request structures */
804         uint64_t                  request_shortages;
805
806         /** Watch to wait for hotplug script execution */
807         struct xs_watch           hotplug_watch;
808
809         /** Got the needed data from hotplug scripts? */
810         bool                      hotplug_done;
811 };
812
813 /*---------------------------- Request Processing ----------------------------*/
814 /**
815  * Allocate an internal transaction tracking structure from the free pool.
816  *
817  * \param xbb  Per-instance xbb configuration structure.
818  *
819  * \return  On success, a pointer to the allocated xbb_xen_req structure.
820  *          Otherwise NULL.
821  */
822 static inline struct xbb_xen_req *
823 xbb_get_req(struct xbb_softc *xbb)
824 {
825         struct xbb_xen_req *req;
826
827         req = NULL;
828
829         mtx_assert(&xbb->lock, MA_OWNED);
830
831         if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) {
832                 STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links);
833                 xbb->active_request_count++;
834         }
835
836         return (req);
837 }
838
839 /**
840  * Return an allocated transaction tracking structure to the free pool.
841  *
842  * \param xbb  Per-instance xbb configuration structure.
843  * \param req  The request structure to free.
844  */
845 static inline void
846 xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req)
847 {
848         mtx_assert(&xbb->lock, MA_OWNED);
849
850         STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links);
851         xbb->active_request_count--;
852
853         KASSERT(xbb->active_request_count >= 0,
854                 ("xbb_release_req: negative active count"));
855 }
856
857 /**
858  * Return an xbb_xen_req_list of allocated xbb_xen_reqs to the free pool.
859  *
860  * \param xbb       Per-instance xbb configuration structure.
861  * \param req_list  The list of requests to free.
862  * \param nreqs     The number of items in the list.
863  */
864 static inline void
865 xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list,
866                  int nreqs)
867 {
868         mtx_assert(&xbb->lock, MA_OWNED);
869
870         STAILQ_CONCAT(&xbb->request_free_stailq, req_list);
871         xbb->active_request_count -= nreqs;
872
873         KASSERT(xbb->active_request_count >= 0,
874                 ("xbb_release_reqs: negative active count"));
875 }
876
877 /**
878  * Given a page index and 512b sector offset within that page,
879  * calculate an offset into a request's kva region.
880  *
881  * \param reqlist The request structure whose kva region will be accessed.
882  * \param pagenr  The page index used to compute the kva offset.
883  * \param sector  The 512b sector index used to compute the page relative
884  *                kva offset.
885  *
886  * \return  The computed global KVA offset.
887  */
888 static inline uint8_t *
889 xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
890 {
891         return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9));
892 }
893
894 #ifdef XBB_USE_BOUNCE_BUFFERS
895 /**
896  * Given a page index and 512b sector offset within that page,
897  * calculate an offset into a request's local bounce memory region.
898  *
899  * \param reqlist The request structure whose bounce region will be accessed.
900  * \param pagenr  The page index used to compute the bounce offset.
901  * \param sector  The 512b sector index used to compute the page relative
902  *                bounce offset.
903  *
904  * \return  The computed global bounce buffer address.
905  */
906 static inline uint8_t *
907 xbb_reqlist_bounce_addr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
908 {
909         return (reqlist->bounce + (PAGE_SIZE * pagenr) + (sector << 9));
910 }
911 #endif
912
913 /**
914  * Given a page number and 512b sector offset within that page,
915  * calculate an offset into the request's memory region that the
916  * underlying backend device/file should use for I/O.
917  *
918  * \param reqlist The request structure whose I/O region will be accessed.
919  * \param pagenr  The page index used to compute the I/O offset.
920  * \param sector  The 512b sector index used to compute the page relative
921  *                I/O offset.
922  *
923  * \return  The computed global I/O address.
924  *
925  * Depending on configuration, this will either be a local bounce buffer
926  * or a pointer to the memory mapped in from the front-end domain for
927  * this request.
928  */
929 static inline uint8_t *
930 xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
931 {
932 #ifdef XBB_USE_BOUNCE_BUFFERS
933         return (xbb_reqlist_bounce_addr(reqlist, pagenr, sector));
934 #else
935         return (xbb_reqlist_vaddr(reqlist, pagenr, sector));
936 #endif
937 }
938
939 /**
940  * Given a page index and 512b sector offset within that page, calculate
941  * an offset into the local pseudo-physical address space used to map a
942  * front-end's request data into a request.
943  *
944  * \param reqlist The request list structure whose pseudo-physical region
945  *                will be accessed.
946  * \param pagenr  The page index used to compute the pseudo-physical offset.
947  * \param sector  The 512b sector index used to compute the page relative
948  *                pseudo-physical offset.
949  *
950  * \return  The computed global pseudo-phsyical address.
951  *
952  * Depending on configuration, this will either be a local bounce buffer
953  * or a pointer to the memory mapped in from the front-end domain for
954  * this request.
955  */
956 static inline uintptr_t
957 xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
958 {
959         struct xbb_softc *xbb;
960
961         xbb = reqlist->xbb;
962
963         return ((uintptr_t)(xbb->gnt_base_addr +
964                 (uintptr_t)(reqlist->kva - xbb->kva) +
965                 (PAGE_SIZE * pagenr) + (sector << 9)));
966 }
967
968 /**
969  * Get Kernel Virtual Address space for mapping requests.
970  *
971  * \param xbb         Per-instance xbb configuration structure.
972  * \param nr_pages    Number of pages needed.
973  * \param check_only  If set, check for free KVA but don't allocate it.
974  * \param have_lock   If set, xbb lock is already held.
975  *
976  * \return  On success, a pointer to the allocated KVA region.  Otherwise NULL.
977  *
978  * Note:  This should be unnecessary once we have either chaining or
979  * scatter/gather support for struct bio.  At that point we'll be able to
980  * put multiple addresses and lengths in one bio/bio chain and won't need
981  * to map everything into one virtual segment.
982  */
983 static uint8_t *
984 xbb_get_kva(struct xbb_softc *xbb, int nr_pages)
985 {
986         int first_clear;
987         int num_clear;
988         uint8_t *free_kva;
989         int      i;
990
991         KASSERT(nr_pages != 0, ("xbb_get_kva of zero length"));
992
993         first_clear = 0;
994         free_kva = NULL;
995
996         mtx_lock(&xbb->lock);
997
998         /*
999          * Look for the first available page.  If there are none, we're done.
1000          */
1001         bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear);
1002
1003         if (first_clear == -1)
1004                 goto bailout;
1005
1006         /*
1007          * Starting at the first available page, look for consecutive free
1008          * pages that will satisfy the user's request.
1009          */
1010         for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) {
1011                 /*
1012                  * If this is true, the page is used, so we have to reset
1013                  * the number of clear pages and the first clear page
1014                  * (since it pointed to a region with an insufficient number
1015                  * of clear pages).
1016                  */
1017                 if (bit_test(xbb->kva_free, i)) {
1018                         num_clear = 0;
1019                         first_clear = -1;
1020                         continue;
1021                 }
1022
1023                 if (first_clear == -1)
1024                         first_clear = i;
1025
1026                 /*
1027                  * If this is true, we've found a large enough free region
1028                  * to satisfy the request.
1029                  */
1030                 if (++num_clear == nr_pages) {
1031                         bit_nset(xbb->kva_free, first_clear,
1032                                  first_clear + nr_pages - 1);
1033
1034                         free_kva = xbb->kva +
1035                                 (uint8_t *)((intptr_t)first_clear * PAGE_SIZE);
1036
1037                         KASSERT(free_kva >= (uint8_t *)xbb->kva &&
1038                                 free_kva + (nr_pages * PAGE_SIZE) <=
1039                                 (uint8_t *)xbb->ring_config.va,
1040                                 ("Free KVA %p len %d out of range, "
1041                                  "kva = %#jx, ring VA = %#jx\n", free_kva,
1042                                  nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva,
1043                                  (uintmax_t)xbb->ring_config.va));
1044                         break;
1045                 }
1046         }
1047
1048 bailout:
1049
1050         if (free_kva == NULL) {
1051                 xbb->flags |= XBBF_RESOURCE_SHORTAGE;
1052                 xbb->kva_shortages++;
1053         }
1054
1055         mtx_unlock(&xbb->lock);
1056
1057         return (free_kva);
1058 }
1059
1060 /**
1061  * Free allocated KVA.
1062  *
1063  * \param xbb       Per-instance xbb configuration structure.
1064  * \param kva_ptr   Pointer to allocated KVA region.  
1065  * \param nr_pages  Number of pages in the KVA region.
1066  */
1067 static void
1068 xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages)
1069 {
1070         intptr_t start_page;
1071
1072         mtx_assert(&xbb->lock, MA_OWNED);
1073
1074         start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT;
1075         bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1);
1076
1077 }
1078
1079 /**
1080  * Unmap the front-end pages associated with this I/O request.
1081  *
1082  * \param req  The request structure to unmap.
1083  */
1084 static void
1085 xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist)
1086 {
1087         struct gnttab_unmap_grant_ref unmap[XBB_MAX_SEGMENTS_PER_REQLIST];
1088         u_int                         i;
1089         u_int                         invcount;
1090         int                           error;
1091
1092         invcount = 0;
1093         for (i = 0; i < reqlist->nr_segments; i++) {
1094                 if (reqlist->gnt_handles[i] == GRANT_REF_INVALID)
1095                         continue;
1096
1097                 unmap[invcount].host_addr    = xbb_get_gntaddr(reqlist, i, 0);
1098                 unmap[invcount].dev_bus_addr = 0;
1099                 unmap[invcount].handle       = reqlist->gnt_handles[i];
1100                 reqlist->gnt_handles[i]      = GRANT_REF_INVALID;
1101                 invcount++;
1102         }
1103
1104         error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
1105                                           unmap, invcount);
1106         KASSERT(error == 0, ("Grant table operation failed"));
1107 }
1108
1109 /**
1110  * Allocate an internal transaction tracking structure from the free pool.
1111  *
1112  * \param xbb  Per-instance xbb configuration structure.
1113  *
1114  * \return  On success, a pointer to the allocated xbb_xen_reqlist structure.
1115  *          Otherwise NULL.
1116  */
1117 static inline struct xbb_xen_reqlist *
1118 xbb_get_reqlist(struct xbb_softc *xbb)
1119 {
1120         struct xbb_xen_reqlist *reqlist;
1121
1122         reqlist = NULL;
1123
1124         mtx_assert(&xbb->lock, MA_OWNED);
1125
1126         if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
1127                 STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links);
1128                 reqlist->flags = XBB_REQLIST_NONE;
1129                 reqlist->kva = NULL;
1130                 reqlist->status = BLKIF_RSP_OKAY;
1131                 reqlist->residual_512b_sectors = 0;
1132                 reqlist->num_children = 0;
1133                 reqlist->nr_segments = 0;
1134                 STAILQ_INIT(&reqlist->contig_req_list);
1135         }
1136
1137         return (reqlist);
1138 }
1139
1140 /**
1141  * Return an allocated transaction tracking structure to the free pool.
1142  *
1143  * \param xbb        Per-instance xbb configuration structure.
1144  * \param req        The request list structure to free.
1145  * \param wakeup     If set, wakeup the work thread if freeing this reqlist
1146  *                   during a resource shortage condition.
1147  */
1148 static inline void
1149 xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
1150                     int wakeup)
1151 {
1152
1153         mtx_assert(&xbb->lock, MA_OWNED);
1154
1155         if (wakeup) {
1156                 wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE;
1157                 xbb->flags &= ~XBBF_RESOURCE_SHORTAGE;
1158         }
1159
1160         if (reqlist->kva != NULL)
1161                 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
1162
1163         xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children);
1164
1165         STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
1166
1167         if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1168                 /*
1169                  * Shutdown is in progress.  See if we can
1170                  * progress further now that one more request
1171                  * has completed and been returned to the
1172                  * free pool.
1173                  */
1174                 xbb_shutdown(xbb);
1175         }
1176
1177         if (wakeup != 0)
1178                 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); 
1179 }
1180
1181 /**
1182  * Request resources and do basic request setup.
1183  *
1184  * \param xbb          Per-instance xbb configuration structure.
1185  * \param reqlist      Pointer to reqlist pointer.
1186  * \param ring_req     Pointer to a block ring request.
1187  * \param ring_index   The ring index of this request.
1188  *
1189  * \return  0 for success, non-zero for failure.
1190  */
1191 static int
1192 xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
1193                   blkif_request_t *ring_req, RING_IDX ring_idx)
1194 {
1195         struct xbb_xen_reqlist *nreqlist;
1196         struct xbb_xen_req     *nreq;
1197
1198         nreqlist = NULL;
1199         nreq     = NULL;
1200
1201         mtx_lock(&xbb->lock);
1202
1203         /*
1204          * We don't allow new resources to be allocated if we're in the
1205          * process of shutting down.
1206          */
1207         if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1208                 mtx_unlock(&xbb->lock);
1209                 return (1);
1210         }
1211
1212         /*
1213          * Allocate a reqlist if the caller doesn't have one already.
1214          */
1215         if (*reqlist == NULL) {
1216                 nreqlist = xbb_get_reqlist(xbb);
1217                 if (nreqlist == NULL)
1218                         goto bailout_error;
1219         }
1220
1221         /* We always allocate a request. */
1222         nreq = xbb_get_req(xbb);
1223         if (nreq == NULL)
1224                 goto bailout_error;
1225
1226         mtx_unlock(&xbb->lock);
1227
1228         if (*reqlist == NULL) {
1229                 *reqlist = nreqlist;
1230                 nreqlist->operation = ring_req->operation;
1231                 nreqlist->starting_sector_number = ring_req->sector_number;
1232                 STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist,
1233                                    links);
1234         }
1235
1236         nreq->reqlist = *reqlist;
1237         nreq->req_ring_idx = ring_idx;
1238         nreq->id = ring_req->id;
1239         nreq->operation = ring_req->operation;
1240
1241         if (xbb->abi != BLKIF_PROTOCOL_NATIVE) {
1242                 bcopy(ring_req, &nreq->ring_req_storage, sizeof(*ring_req));
1243                 nreq->ring_req = &nreq->ring_req_storage;
1244         } else {
1245                 nreq->ring_req = ring_req;
1246         }
1247
1248         binuptime(&nreq->ds_t0);
1249         devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0);
1250         STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links);
1251         (*reqlist)->num_children++;
1252         (*reqlist)->nr_segments += ring_req->nr_segments;
1253
1254         return (0);
1255
1256 bailout_error:
1257
1258         /*
1259          * We're out of resources, so set the shortage flag.  The next time
1260          * a request is released, we'll try waking up the work thread to
1261          * see if we can allocate more resources.
1262          */
1263         xbb->flags |= XBBF_RESOURCE_SHORTAGE;
1264         xbb->request_shortages++;
1265
1266         if (nreq != NULL)
1267                 xbb_release_req(xbb, nreq);
1268
1269         if (nreqlist != NULL)
1270                 xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0);
1271
1272         mtx_unlock(&xbb->lock);
1273
1274         return (1);
1275 }
1276
1277 /**
1278  * Create and queue a response to a blkif request.
1279  * 
1280  * \param xbb     Per-instance xbb configuration structure.
1281  * \param req     The request structure to which to respond.
1282  * \param status  The status code to report.  See BLKIF_RSP_*
1283  *                in sys/xen/interface/io/blkif.h.
1284  */
1285 static void
1286 xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
1287 {
1288         blkif_response_t *resp;
1289
1290         /*
1291          * The mutex is required here, and should be held across this call
1292          * until after the subsequent call to xbb_push_responses().  This
1293          * is to guarantee that another context won't queue responses and
1294          * push them while we're active.
1295          *
1296          * That could lead to the other end being notified of responses
1297          * before the resources have been freed on this end.  The other end
1298          * would then be able to queue additional I/O, and we may run out
1299          * of resources because we haven't freed them all yet.
1300          */
1301         mtx_assert(&xbb->lock, MA_OWNED);
1302
1303         /*
1304          * Place on the response ring for the relevant domain.
1305          * For now, only the spacing between entries is different
1306          * in the different ABIs, not the response entry layout.
1307          */
1308         switch (xbb->abi) {
1309         case BLKIF_PROTOCOL_NATIVE:
1310                 resp = RING_GET_RESPONSE(&xbb->rings.native,
1311                                          xbb->rings.native.rsp_prod_pvt);
1312                 break;
1313         case BLKIF_PROTOCOL_X86_32:
1314                 resp = (blkif_response_t *)
1315                     RING_GET_RESPONSE(&xbb->rings.x86_32,
1316                                       xbb->rings.x86_32.rsp_prod_pvt);
1317                 break;
1318         case BLKIF_PROTOCOL_X86_64:
1319                 resp = (blkif_response_t *)
1320                     RING_GET_RESPONSE(&xbb->rings.x86_64,
1321                                       xbb->rings.x86_64.rsp_prod_pvt);
1322                 break;
1323         default:
1324                 panic("Unexpected blkif protocol ABI.");
1325         }
1326
1327         resp->id        = req->id;
1328         resp->operation = req->operation;
1329         resp->status    = status;
1330
1331         if (status != BLKIF_RSP_OKAY)
1332                 xbb->reqs_completed_with_error++;
1333
1334         xbb->rings.common.rsp_prod_pvt++;
1335
1336         xbb->reqs_queued_for_completion++;
1337
1338 }
1339
1340 /**
1341  * Send queued responses to blkif requests.
1342  * 
1343  * \param xbb            Per-instance xbb configuration structure.
1344  * \param run_taskqueue  Flag that is set to 1 if the taskqueue
1345  *                       should be run, 0 if it does not need to be run.
1346  * \param notify         Flag that is set to 1 if the other end should be
1347  *                       notified via irq, 0 if the other end should not be
1348  *                       notified.
1349  */
1350 static void
1351 xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify)
1352 {
1353         int more_to_do;
1354
1355         /*
1356          * The mutex is required here.
1357          */
1358         mtx_assert(&xbb->lock, MA_OWNED);
1359
1360         more_to_do = 0;
1361
1362         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify);
1363
1364         if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) {
1365                 /*
1366                  * Tail check for pending requests. Allows frontend to avoid
1367                  * notifications if requests are already in flight (lower
1368                  * overheads and promotes batching).
1369                  */
1370                 RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do);
1371         } else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) {
1372                 more_to_do = 1;
1373         }
1374
1375         xbb->reqs_completed += xbb->reqs_queued_for_completion;
1376         xbb->reqs_queued_for_completion = 0;
1377
1378         *run_taskqueue = more_to_do;
1379 }
1380
1381 /**
1382  * Complete a request list.
1383  *
1384  * \param xbb        Per-instance xbb configuration structure.
1385  * \param reqlist    Allocated internal request list structure.
1386  */
1387 static void
1388 xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1389 {
1390         struct xbb_xen_req *nreq;
1391         off_t               sectors_sent;
1392         int                 notify, run_taskqueue;
1393
1394         sectors_sent = 0;
1395
1396         if (reqlist->flags & XBB_REQLIST_MAPPED)
1397                 xbb_unmap_reqlist(reqlist);
1398
1399         mtx_lock(&xbb->lock);
1400
1401         /*
1402          * All I/O is done, send the response. A lock is not necessary
1403          * to protect the request list, because all requests have
1404          * completed.  Therefore this is the only context accessing this
1405          * reqlist right now.  However, in order to make sure that no one
1406          * else queues responses onto the queue or pushes them to the other
1407          * side while we're active, we need to hold the lock across the
1408          * calls to xbb_queue_response() and xbb_push_responses().
1409          */
1410         STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1411                 off_t cur_sectors_sent;
1412
1413                 /* Put this response on the ring, but don't push yet */
1414                 xbb_queue_response(xbb, nreq, reqlist->status);
1415
1416                 /* We don't report bytes sent if there is an error. */
1417                 if (reqlist->status == BLKIF_RSP_OKAY)
1418                         cur_sectors_sent = nreq->nr_512b_sectors;
1419                 else
1420                         cur_sectors_sent = 0;
1421
1422                 sectors_sent += cur_sectors_sent;
1423
1424                 devstat_end_transaction(xbb->xbb_stats_in,
1425                                         /*bytes*/cur_sectors_sent << 9,
1426                                         reqlist->ds_tag_type,
1427                                         reqlist->ds_trans_type,
1428                                         /*now*/NULL,
1429                                         /*then*/&nreq->ds_t0);
1430         }
1431
1432         /*
1433          * Take out any sectors not sent.  If we wind up negative (which
1434          * might happen if an error is reported as well as a residual), just
1435          * report 0 sectors sent.
1436          */
1437         sectors_sent -= reqlist->residual_512b_sectors;
1438         if (sectors_sent < 0)
1439                 sectors_sent = 0;
1440
1441         devstat_end_transaction(xbb->xbb_stats,
1442                                 /*bytes*/ sectors_sent << 9,
1443                                 reqlist->ds_tag_type,
1444                                 reqlist->ds_trans_type,
1445                                 /*now*/NULL,
1446                                 /*then*/&reqlist->ds_t0);
1447
1448         xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
1449
1450         xbb_push_responses(xbb, &run_taskqueue, &notify);
1451
1452         mtx_unlock(&xbb->lock);
1453
1454         if (run_taskqueue)
1455                 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); 
1456
1457         if (notify)
1458                 xen_intr_signal(xbb->xen_intr_handle);
1459 }
1460
1461 /**
1462  * Completion handler for buffer I/O requests issued by the device
1463  * backend driver.
1464  *
1465  * \param bio  The buffer I/O request on which to perform completion
1466  *             processing.
1467  */
1468 static void
1469 xbb_bio_done(struct bio *bio)
1470 {
1471         struct xbb_softc       *xbb;
1472         struct xbb_xen_reqlist *reqlist;
1473
1474         reqlist = bio->bio_caller1;
1475         xbb     = reqlist->xbb;
1476
1477         reqlist->residual_512b_sectors += bio->bio_resid >> 9;
1478
1479         /*
1480          * This is a bit imprecise.  With aggregated I/O a single
1481          * request list can contain multiple front-end requests and
1482          * a multiple bios may point to a single request.  By carefully
1483          * walking the request list, we could map residuals and errors
1484          * back to the original front-end request, but the interface
1485          * isn't sufficiently rich for us to properly report the error.
1486          * So, we just treat the entire request list as having failed if an
1487          * error occurs on any part.  And, if an error occurs, we treat
1488          * the amount of data transferred as 0.
1489          *
1490          * For residuals, we report it on the overall aggregated device,
1491          * but not on the individual requests, since we don't currently
1492          * do the work to determine which front-end request to which the
1493          * residual applies.
1494          */
1495         if (bio->bio_error) {
1496                 DPRINTF("BIO returned error %d for operation on device %s\n",
1497                         bio->bio_error, xbb->dev_name);
1498                 reqlist->status = BLKIF_RSP_ERROR;
1499
1500                 if (bio->bio_error == ENXIO
1501                  && xenbus_get_state(xbb->dev) == XenbusStateConnected) {
1502                         /*
1503                          * Backend device has disappeared.  Signal the
1504                          * front-end that we (the device proxy) want to
1505                          * go away.
1506                          */
1507                         xenbus_set_state(xbb->dev, XenbusStateClosing);
1508                 }
1509         }
1510
1511 #ifdef XBB_USE_BOUNCE_BUFFERS
1512         if (bio->bio_cmd == BIO_READ) {
1513                 vm_offset_t kva_offset;
1514
1515                 kva_offset = (vm_offset_t)bio->bio_data
1516                            - (vm_offset_t)reqlist->bounce;
1517                 memcpy((uint8_t *)reqlist->kva + kva_offset,
1518                        bio->bio_data, bio->bio_bcount);
1519         }
1520 #endif /* XBB_USE_BOUNCE_BUFFERS */
1521
1522         /*
1523          * Decrement the pending count for the request list.  When we're
1524          * done with the requests, send status back for all of them.
1525          */
1526         if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1)
1527                 xbb_complete_reqlist(xbb, reqlist);
1528
1529         g_destroy_bio(bio);
1530 }
1531
1532 /**
1533  * Parse a blkif request into an internal request structure and send
1534  * it to the backend for processing.
1535  *
1536  * \param xbb       Per-instance xbb configuration structure.
1537  * \param reqlist   Allocated internal request list structure.
1538  *
1539  * \return          On success, 0.  For resource shortages, non-zero.
1540  *  
1541  * This routine performs the backend common aspects of request parsing
1542  * including compiling an internal request structure, parsing the S/G
1543  * list and any secondary ring requests in which they may reside, and
1544  * the mapping of front-end I/O pages into our domain.
1545  */
1546 static int
1547 xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1548 {
1549         struct xbb_sg                *xbb_sg;
1550         struct gnttab_map_grant_ref  *map;
1551         struct blkif_request_segment *sg;
1552         struct blkif_request_segment *last_block_sg;
1553         struct xbb_xen_req           *nreq;
1554         u_int                         nseg;
1555         u_int                         seg_idx;
1556         u_int                         block_segs;
1557         int                           nr_sects;
1558         int                           total_sects;
1559         int                           operation;
1560         uint8_t                       bio_flags;
1561         int                           error;
1562
1563         reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1564         bio_flags            = 0;
1565         total_sects          = 0;
1566         nr_sects             = 0;
1567
1568         /*
1569          * First determine whether we have enough free KVA to satisfy this
1570          * request list.  If not, tell xbb_run_queue() so it can go to
1571          * sleep until we have more KVA.
1572          */
1573         reqlist->kva = NULL;
1574         if (reqlist->nr_segments != 0) {
1575                 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
1576                 if (reqlist->kva == NULL) {
1577                         /*
1578                          * If we're out of KVA, return ENOMEM.
1579                          */
1580                         return (ENOMEM);
1581                 }
1582         }
1583
1584         binuptime(&reqlist->ds_t0);
1585         devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0);
1586
1587         switch (reqlist->operation) {
1588         case BLKIF_OP_WRITE_BARRIER:
1589                 bio_flags       |= BIO_ORDERED;
1590                 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1591                 /* FALLTHROUGH */
1592         case BLKIF_OP_WRITE:
1593                 operation = BIO_WRITE;
1594                 reqlist->ds_trans_type = DEVSTAT_WRITE;
1595                 if ((xbb->flags & XBBF_READ_ONLY) != 0) {
1596                         DPRINTF("Attempt to write to read only device %s\n",
1597                                 xbb->dev_name);
1598                         reqlist->status = BLKIF_RSP_ERROR;
1599                         goto send_response;
1600                 }
1601                 break;
1602         case BLKIF_OP_READ:
1603                 operation = BIO_READ;
1604                 reqlist->ds_trans_type = DEVSTAT_READ;
1605                 break;
1606         case BLKIF_OP_FLUSH_DISKCACHE:
1607                 /*
1608                  * If this is true, the user has requested that we disable
1609                  * flush support.  So we just complete the requests
1610                  * successfully.
1611                  */
1612                 if (xbb->disable_flush != 0) {
1613                         goto send_response;
1614                 }
1615
1616                 /*
1617                  * The user has requested that we only send a real flush
1618                  * for every N flush requests.  So keep count, and either
1619                  * complete the request immediately or queue it for the
1620                  * backend.
1621                  */
1622                 if (xbb->flush_interval != 0) {
1623                         if (++(xbb->flush_count) < xbb->flush_interval) {
1624                                 goto send_response;
1625                         } else
1626                                 xbb->flush_count = 0;
1627                 }
1628
1629                 operation = BIO_FLUSH;
1630                 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1631                 reqlist->ds_trans_type = DEVSTAT_NO_DATA;
1632                 goto do_dispatch;
1633                 /*NOTREACHED*/
1634         default:
1635                 DPRINTF("error: unknown block io operation [%d]\n",
1636                         reqlist->operation);
1637                 reqlist->status = BLKIF_RSP_ERROR;
1638                 goto send_response;
1639         }
1640
1641         reqlist->xbb  = xbb;
1642         xbb_sg        = xbb->xbb_sgs;
1643         map           = xbb->maps;
1644         seg_idx       = 0;
1645
1646         STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1647                 blkif_request_t         *ring_req;
1648                 RING_IDX                 req_ring_idx;
1649                 u_int                    req_seg_idx;
1650
1651                 ring_req              = nreq->ring_req;
1652                 req_ring_idx          = nreq->req_ring_idx;
1653                 nr_sects              = 0;
1654                 nseg                  = ring_req->nr_segments;
1655                 nreq->nr_pages        = nseg;
1656                 nreq->nr_512b_sectors = 0;
1657                 req_seg_idx           = 0;
1658                 sg                    = NULL;
1659
1660                 /* Check that number of segments is sane. */
1661                 if (__predict_false(nseg == 0)
1662                  || __predict_false(nseg > xbb->max_request_segments)) {
1663                         DPRINTF("Bad number of segments in request (%d)\n",
1664                                 nseg);
1665                         reqlist->status = BLKIF_RSP_ERROR;
1666                         goto send_response;
1667                 }
1668
1669                 block_segs    = nseg;
1670                 sg            = ring_req->seg;
1671                 last_block_sg = sg + block_segs;
1672
1673                 while (sg < last_block_sg) {
1674                         KASSERT(seg_idx <
1675                                 XBB_MAX_SEGMENTS_PER_REQLIST,
1676                                 ("seg_idx %d is too large, max "
1677                                 "segs %d\n", seg_idx,
1678                                 XBB_MAX_SEGMENTS_PER_REQLIST));
1679
1680                         xbb_sg->first_sect = sg->first_sect;
1681                         xbb_sg->last_sect  = sg->last_sect;
1682                         xbb_sg->nsect =
1683                             (int8_t)(sg->last_sect -
1684                             sg->first_sect + 1);
1685
1686                         if ((sg->last_sect >= (PAGE_SIZE >> 9))
1687                          || (xbb_sg->nsect <= 0)) {
1688                                 reqlist->status = BLKIF_RSP_ERROR;
1689                                 goto send_response;
1690                         }
1691
1692                         nr_sects += xbb_sg->nsect;
1693                         map->host_addr = xbb_get_gntaddr(reqlist,
1694                                                 seg_idx, /*sector*/0);
1695                         KASSERT(map->host_addr + PAGE_SIZE <=
1696                                 xbb->ring_config.gnt_addr,
1697                                 ("Host address %#jx len %d overlaps "
1698                                  "ring address %#jx\n",
1699                                 (uintmax_t)map->host_addr, PAGE_SIZE,
1700                                 (uintmax_t)xbb->ring_config.gnt_addr));
1701
1702                         map->flags     = GNTMAP_host_map;
1703                         map->ref       = sg->gref;
1704                         map->dom       = xbb->otherend_id;
1705                         if (operation == BIO_WRITE)
1706                                 map->flags |= GNTMAP_readonly;
1707                         sg++;
1708                         map++;
1709                         xbb_sg++;
1710                         seg_idx++;
1711                         req_seg_idx++;
1712                 }
1713
1714                 /* Convert to the disk's sector size */
1715                 nreq->nr_512b_sectors = nr_sects;
1716                 nr_sects = (nr_sects << 9) >> xbb->sector_size_shift;
1717                 total_sects += nr_sects;
1718
1719                 if ((nreq->nr_512b_sectors &
1720                     ((xbb->sector_size >> 9) - 1)) != 0) {
1721                         device_printf(xbb->dev, "%s: I/O size (%d) is not "
1722                                       "a multiple of the backing store sector "
1723                                       "size (%d)\n", __func__,
1724                                       nreq->nr_512b_sectors << 9,
1725                                       xbb->sector_size);
1726                         reqlist->status = BLKIF_RSP_ERROR;
1727                         goto send_response;
1728                 }
1729         }
1730
1731         error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
1732                                           xbb->maps, reqlist->nr_segments);
1733         if (error != 0)
1734                 panic("Grant table operation failed (%d)", error);
1735
1736         reqlist->flags |= XBB_REQLIST_MAPPED;
1737
1738         for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
1739              seg_idx++, map++){
1740                 if (__predict_false(map->status != 0)) {
1741                         DPRINTF("invalid buffer -- could not remap "
1742                                 "it (%d)\n", map->status);
1743                         DPRINTF("Mapping(%d): Host Addr 0x%"PRIx64", flags "
1744                                 "0x%x ref 0x%x, dom %d\n", seg_idx,
1745                                 map->host_addr, map->flags, map->ref,
1746                                 map->dom);
1747                         reqlist->status = BLKIF_RSP_ERROR;
1748                         goto send_response;
1749                 }
1750
1751                 reqlist->gnt_handles[seg_idx] = map->handle;
1752         }
1753         if (reqlist->starting_sector_number + total_sects >
1754             xbb->media_num_sectors) {
1755                 DPRINTF("%s of [%" PRIu64 ",%" PRIu64 "] "
1756                         "extends past end of device %s\n",
1757                         operation == BIO_READ ? "read" : "write",
1758                         reqlist->starting_sector_number,
1759                         reqlist->starting_sector_number + total_sects,
1760                         xbb->dev_name); 
1761                 reqlist->status = BLKIF_RSP_ERROR;
1762                 goto send_response;
1763         }
1764
1765 do_dispatch:
1766
1767         error = xbb->dispatch_io(xbb,
1768                                  reqlist,
1769                                  operation,
1770                                  bio_flags);
1771
1772         if (error != 0) {
1773                 reqlist->status = BLKIF_RSP_ERROR;
1774                 goto send_response;
1775         }
1776
1777         return (0);
1778
1779 send_response:
1780
1781         xbb_complete_reqlist(xbb, reqlist);
1782
1783         return (0);
1784 }
1785
1786 static __inline int
1787 xbb_count_sects(blkif_request_t *ring_req)
1788 {
1789         int i;
1790         int cur_size = 0;
1791
1792         for (i = 0; i < ring_req->nr_segments; i++) {
1793                 int nsect;
1794
1795                 nsect = (int8_t)(ring_req->seg[i].last_sect -
1796                         ring_req->seg[i].first_sect + 1);
1797                 if (nsect <= 0)
1798                         break;
1799
1800                 cur_size += nsect;
1801         }
1802
1803         return (cur_size);
1804 }
1805
1806 /**
1807  * Process incoming requests from the shared communication ring in response
1808  * to a signal on the ring's event channel.
1809  *
1810  * \param context  Callback argument registerd during task initialization -
1811  *                 the xbb_softc for this instance.
1812  * \param pending  The number of taskqueue_enqueue events that have
1813  *                 occurred since this handler was last run.
1814  */
1815 static void
1816 xbb_run_queue(void *context, int pending)
1817 {
1818         struct xbb_softc       *xbb;
1819         blkif_back_rings_t     *rings;
1820         RING_IDX                rp;
1821         uint64_t                cur_sector;
1822         int                     cur_operation;
1823         struct xbb_xen_reqlist *reqlist;
1824
1825         xbb   = (struct xbb_softc *)context;
1826         rings = &xbb->rings;
1827
1828         /*
1829          * Work gather and dispatch loop.  Note that we have a bias here
1830          * towards gathering I/O sent by blockfront.  We first gather up
1831          * everything in the ring, as long as we have resources.  Then we
1832          * dispatch one request, and then attempt to gather up any
1833          * additional requests that have come in while we were dispatching
1834          * the request.
1835          *
1836          * This allows us to get a clearer picture (via devstat) of how
1837          * many requests blockfront is queueing to us at any given time.
1838          */
1839         for (;;) {
1840                 int retval;
1841
1842                 /*
1843                  * Initialize reqlist to the last element in the pending
1844                  * queue, if there is one.  This allows us to add more
1845                  * requests to that request list, if we have room.
1846                  */
1847                 reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq,
1848                                       xbb_xen_reqlist, links);
1849                 if (reqlist != NULL) {
1850                         cur_sector = reqlist->next_contig_sector;
1851                         cur_operation = reqlist->operation;
1852                 } else {
1853                         cur_operation = 0;
1854                         cur_sector    = 0;
1855                 }
1856
1857                 /*
1858                  * Cache req_prod to avoid accessing a cache line shared
1859                  * with the frontend.
1860                  */
1861                 rp = rings->common.sring->req_prod;
1862
1863                 /* Ensure we see queued requests up to 'rp'. */
1864                 rmb();
1865
1866                 /**
1867                  * Run so long as there is work to consume and the generation
1868                  * of a response will not overflow the ring.
1869                  *
1870                  * @note There's a 1 to 1 relationship between requests and
1871                  *       responses, so an overflow should never occur.  This
1872                  *       test is to protect our domain from digesting bogus
1873                  *       data.  Shouldn't we log this?
1874                  */
1875                 while (rings->common.req_cons != rp
1876                     && RING_REQUEST_CONS_OVERFLOW(&rings->common,
1877                                                   rings->common.req_cons) == 0){
1878                         blkif_request_t         ring_req_storage;
1879                         blkif_request_t        *ring_req;
1880                         int                     cur_size;
1881
1882                         switch (xbb->abi) {
1883                         case BLKIF_PROTOCOL_NATIVE:
1884                                 ring_req = RING_GET_REQUEST(&xbb->rings.native,
1885                                     rings->common.req_cons);
1886                                 break;
1887                         case BLKIF_PROTOCOL_X86_32:
1888                         {
1889                                 struct blkif_x86_32_request *ring_req32;
1890
1891                                 ring_req32 = RING_GET_REQUEST(
1892                                     &xbb->rings.x86_32, rings->common.req_cons);
1893                                 blkif_get_x86_32_req(&ring_req_storage,
1894                                                      ring_req32);
1895                                 ring_req = &ring_req_storage;
1896                                 break;
1897                         }
1898                         case BLKIF_PROTOCOL_X86_64:
1899                         {
1900                                 struct blkif_x86_64_request *ring_req64;
1901
1902                                 ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64,
1903                                     rings->common.req_cons);
1904                                 blkif_get_x86_64_req(&ring_req_storage,
1905                                                      ring_req64);
1906                                 ring_req = &ring_req_storage;
1907                                 break;
1908                         }
1909                         default:
1910                                 panic("Unexpected blkif protocol ABI.");
1911                                 /* NOTREACHED */
1912                         } 
1913
1914                         /*
1915                          * Check for situations that would require closing
1916                          * off this I/O for further coalescing:
1917                          *  - Coalescing is turned off.
1918                          *  - Current I/O is out of sequence with the previous
1919                          *    I/O.
1920                          *  - Coalesced I/O would be too large.
1921                          */
1922                         if ((reqlist != NULL)
1923                          && ((xbb->no_coalesce_reqs != 0)
1924                           || ((xbb->no_coalesce_reqs == 0)
1925                            && ((ring_req->sector_number != cur_sector)
1926                             || (ring_req->operation != cur_operation)
1927                             || ((ring_req->nr_segments + reqlist->nr_segments) >
1928                                  xbb->max_reqlist_segments))))) {
1929                                 reqlist = NULL;
1930                         }
1931
1932                         /*
1933                          * Grab and check for all resources in one shot.
1934                          * If we can't get all of the resources we need,
1935                          * the shortage is noted and the thread will get
1936                          * woken up when more resources are available.
1937                          */
1938                         retval = xbb_get_resources(xbb, &reqlist, ring_req,
1939                                                    xbb->rings.common.req_cons);
1940
1941                         if (retval != 0) {
1942                                 /*
1943                                  * Resource shortage has been recorded.
1944                                  * We'll be scheduled to run once a request
1945                                  * object frees up due to a completion.
1946                                  */
1947                                 break;
1948                         }
1949
1950                         /*
1951                          * Signify that we can overwrite this request with
1952                          * a response by incrementing our consumer index.
1953                          * The response won't be generated until after
1954                          * we've already consumed all necessary data out
1955                          * of the version of the request in the ring buffer
1956                          * (for native mode).  We must update the consumer
1957                          * index  before issuing back-end I/O so there is
1958                          * no possibility that it will complete and a
1959                          * response be generated before we make room in 
1960                          * the queue for that response.
1961                          */
1962                         xbb->rings.common.req_cons++;
1963                         xbb->reqs_received++;
1964
1965                         cur_size = xbb_count_sects(ring_req);
1966                         cur_sector = ring_req->sector_number + cur_size;
1967                         reqlist->next_contig_sector = cur_sector;
1968                         cur_operation = ring_req->operation;
1969                 }
1970
1971                 /* Check for I/O to dispatch */
1972                 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
1973                 if (reqlist == NULL) {
1974                         /*
1975                          * We're out of work to do, put the task queue to
1976                          * sleep.
1977                          */
1978                         break;
1979                 }
1980
1981                 /*
1982                  * Grab the first request off the queue and attempt
1983                  * to dispatch it.
1984                  */
1985                 STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links);
1986
1987                 retval = xbb_dispatch_io(xbb, reqlist);
1988                 if (retval != 0) {
1989                         /*
1990                          * xbb_dispatch_io() returns non-zero only when
1991                          * there is a resource shortage.  If that's the
1992                          * case, re-queue this request on the head of the
1993                          * queue, and go to sleep until we have more
1994                          * resources.
1995                          */
1996                         STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq,
1997                                            reqlist, links);
1998                         break;
1999                 } else {
2000                         /*
2001                          * If we still have anything on the queue after
2002                          * removing the head entry, that is because we
2003                          * met one of the criteria to create a new
2004                          * request list (outlined above), and we'll call
2005                          * that a forced dispatch for statistical purposes.
2006                          *
2007                          * Otherwise, if there is only one element on the
2008                          * queue, we coalesced everything available on
2009                          * the ring and we'll call that a normal dispatch.
2010                          */
2011                         reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
2012
2013                         if (reqlist != NULL)
2014                                 xbb->forced_dispatch++;
2015                         else
2016                                 xbb->normal_dispatch++;
2017
2018                         xbb->total_dispatch++;
2019                 }
2020         }
2021 }
2022
2023 /**
2024  * Interrupt handler bound to the shared ring's event channel.
2025  *
2026  * \param arg  Callback argument registerd during event channel
2027  *             binding - the xbb_softc for this instance.
2028  */
2029 static int
2030 xbb_filter(void *arg)
2031 {
2032         struct xbb_softc *xbb;
2033
2034         /* Defer to taskqueue thread. */
2035         xbb = (struct xbb_softc *)arg;
2036         taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); 
2037
2038         return (FILTER_HANDLED);
2039 }
2040
2041 SDT_PROVIDER_DEFINE(xbb);
2042 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int");
2043 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t",
2044                   "uint64_t");
2045 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int",
2046                   "uint64_t", "uint64_t");
2047
2048 /*----------------------------- Backend Handlers -----------------------------*/
2049 /**
2050  * Backend handler for character device access.
2051  *
2052  * \param xbb        Per-instance xbb configuration structure.
2053  * \param reqlist    Allocated internal request list structure.
2054  * \param operation  BIO_* I/O operation code.
2055  * \param bio_flags  Additional bio_flag data to pass to any generated
2056  *                   bios (e.g. BIO_ORDERED)..
2057  *
2058  * \return  0 for success, errno codes for failure.
2059  */
2060 static int
2061 xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2062                  int operation, int bio_flags)
2063 {
2064         struct xbb_dev_data *dev_data;
2065         struct bio          *bios[XBB_MAX_SEGMENTS_PER_REQLIST];
2066         off_t                bio_offset;
2067         struct bio          *bio;
2068         struct xbb_sg       *xbb_sg;
2069         u_int                nbio;
2070         u_int                bio_idx;
2071         u_int                nseg;
2072         u_int                seg_idx;
2073         int                  error;
2074
2075         dev_data   = &xbb->backend.dev;
2076         bio_offset = (off_t)reqlist->starting_sector_number
2077                    << xbb->sector_size_shift;
2078         error      = 0;
2079         nbio       = 0;
2080         bio_idx    = 0;
2081
2082         if (operation == BIO_FLUSH) {
2083                 bio = g_new_bio();
2084                 if (__predict_false(bio == NULL)) {
2085                         DPRINTF("Unable to allocate bio for BIO_FLUSH\n");
2086                         error = ENOMEM;
2087                         return (error);
2088                 }
2089
2090                 bio->bio_cmd     = BIO_FLUSH;
2091                 bio->bio_flags  |= BIO_ORDERED;
2092                 bio->bio_dev     = dev_data->cdev;
2093                 bio->bio_offset  = 0;
2094                 bio->bio_data    = 0;
2095                 bio->bio_done    = xbb_bio_done;
2096                 bio->bio_caller1 = reqlist;
2097                 bio->bio_pblkno  = 0;
2098
2099                 reqlist->pendcnt = 1;
2100
2101                 SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush,
2102                            device_get_unit(xbb->dev));
2103
2104                 (*dev_data->csw->d_strategy)(bio);
2105
2106                 return (0);
2107         }
2108
2109         xbb_sg = xbb->xbb_sgs;
2110         bio    = NULL;
2111         nseg = reqlist->nr_segments;
2112
2113         for (seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
2114                 /*
2115                  * KVA will not be contiguous, so any additional
2116                  * I/O will need to be represented in a new bio.
2117                  */
2118                 if ((bio != NULL)
2119                  && (xbb_sg->first_sect != 0)) {
2120                         if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2121                                 printf("%s: Discontiguous I/O request "
2122                                        "from domain %d ends on "
2123                                        "non-sector boundary\n",
2124                                        __func__, xbb->otherend_id);
2125                                 error = EINVAL;
2126                                 goto fail_free_bios;
2127                         }
2128                         bio = NULL;
2129                 }
2130
2131                 if (bio == NULL) {
2132                         /*
2133                          * Make sure that the start of this bio is
2134                          * aligned to a device sector.
2135                          */
2136                         if ((bio_offset & (xbb->sector_size - 1)) != 0){
2137                                 printf("%s: Misaligned I/O request "
2138                                        "from domain %d\n", __func__,
2139                                        xbb->otherend_id);
2140                                 error = EINVAL;
2141                                 goto fail_free_bios;
2142                         }
2143
2144                         bio = bios[nbio++] = g_new_bio();
2145                         if (__predict_false(bio == NULL)) {
2146                                 error = ENOMEM;
2147                                 goto fail_free_bios;
2148                         }
2149                         bio->bio_cmd     = operation;
2150                         bio->bio_flags  |= bio_flags;
2151                         bio->bio_dev     = dev_data->cdev;
2152                         bio->bio_offset  = bio_offset;
2153                         bio->bio_data    = xbb_reqlist_ioaddr(reqlist, seg_idx,
2154                                                 xbb_sg->first_sect);
2155                         bio->bio_done    = xbb_bio_done;
2156                         bio->bio_caller1 = reqlist;
2157                         bio->bio_pblkno  = bio_offset >> xbb->sector_size_shift;
2158                 }
2159
2160                 bio->bio_length += xbb_sg->nsect << 9;
2161                 bio->bio_bcount  = bio->bio_length;
2162                 bio_offset      += xbb_sg->nsect << 9;
2163
2164                 if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) {
2165                         if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2166                                 printf("%s: Discontiguous I/O request "
2167                                        "from domain %d ends on "
2168                                        "non-sector boundary\n",
2169                                        __func__, xbb->otherend_id);
2170                                 error = EINVAL;
2171                                 goto fail_free_bios;
2172                         }
2173                         /*
2174                          * KVA will not be contiguous, so any additional
2175                          * I/O will need to be represented in a new bio.
2176                          */
2177                         bio = NULL;
2178                 }
2179         }
2180
2181         reqlist->pendcnt = nbio;
2182
2183         for (bio_idx = 0; bio_idx < nbio; bio_idx++)
2184         {
2185 #ifdef XBB_USE_BOUNCE_BUFFERS
2186                 vm_offset_t kva_offset;
2187
2188                 kva_offset = (vm_offset_t)bios[bio_idx]->bio_data
2189                            - (vm_offset_t)reqlist->bounce;
2190                 if (operation == BIO_WRITE) {
2191                         memcpy(bios[bio_idx]->bio_data,
2192                                (uint8_t *)reqlist->kva + kva_offset,
2193                                bios[bio_idx]->bio_bcount);
2194                 }
2195 #endif
2196                 if (operation == BIO_READ) {
2197                         SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read,
2198                                    device_get_unit(xbb->dev),
2199                                    bios[bio_idx]->bio_offset,
2200                                    bios[bio_idx]->bio_length);
2201                 } else if (operation == BIO_WRITE) {
2202                         SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write,
2203                                    device_get_unit(xbb->dev),
2204                                    bios[bio_idx]->bio_offset,
2205                                    bios[bio_idx]->bio_length);
2206                 }
2207                 (*dev_data->csw->d_strategy)(bios[bio_idx]);
2208         }
2209
2210         return (error);
2211
2212 fail_free_bios:
2213         for (bio_idx = 0; bio_idx < (nbio-1); bio_idx++)
2214                 g_destroy_bio(bios[bio_idx]);
2215
2216         return (error);
2217 }
2218
2219 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int");
2220 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t",
2221                   "uint64_t");
2222 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int",
2223                   "uint64_t", "uint64_t");
2224
2225 /**
2226  * Backend handler for file access.
2227  *
2228  * \param xbb        Per-instance xbb configuration structure.
2229  * \param reqlist    Allocated internal request list.
2230  * \param operation  BIO_* I/O operation code.
2231  * \param flags      Additional bio_flag data to pass to any generated bios
2232  *                   (e.g. BIO_ORDERED)..
2233  *
2234  * \return  0 for success, errno codes for failure.
2235  */
2236 static int
2237 xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2238                   int operation, int flags)
2239 {
2240         struct xbb_file_data *file_data;
2241         u_int                 seg_idx;
2242         u_int                 nseg;
2243         struct uio            xuio;
2244         struct xbb_sg        *xbb_sg;
2245         struct iovec         *xiovec;
2246 #ifdef XBB_USE_BOUNCE_BUFFERS
2247         void                **p_vaddr;
2248         int                   saved_uio_iovcnt;
2249 #endif /* XBB_USE_BOUNCE_BUFFERS */
2250         int                   error;
2251
2252         file_data = &xbb->backend.file;
2253         error = 0;
2254         bzero(&xuio, sizeof(xuio));
2255
2256         switch (operation) {
2257         case BIO_READ:
2258                 xuio.uio_rw = UIO_READ;
2259                 break;
2260         case BIO_WRITE:
2261                 xuio.uio_rw = UIO_WRITE;
2262                 break;
2263         case BIO_FLUSH: {
2264                 struct mount *mountpoint;
2265
2266                 SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush,
2267                            device_get_unit(xbb->dev));
2268
2269                 (void) vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2270
2271                 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2272                 error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread);
2273                 VOP_UNLOCK(xbb->vn);
2274
2275                 vn_finished_write(mountpoint);
2276
2277                 goto bailout_send_response;
2278                 /* NOTREACHED */
2279         }
2280         default:
2281                 panic("invalid operation %d", operation);
2282                 /* NOTREACHED */
2283         }
2284         xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number
2285                         << xbb->sector_size_shift;
2286         xuio.uio_segflg = UIO_SYSSPACE;
2287         xuio.uio_iov = file_data->xiovecs;
2288         xuio.uio_iovcnt = 0;
2289         xbb_sg = xbb->xbb_sgs;
2290         nseg = reqlist->nr_segments;
2291
2292         for (xiovec = NULL, seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
2293                 /*
2294                  * If the first sector is not 0, the KVA will
2295                  * not be contiguous and we'll need to go on
2296                  * to another segment.
2297                  */
2298                 if (xbb_sg->first_sect != 0)
2299                         xiovec = NULL;
2300
2301                 if (xiovec == NULL) {
2302                         xiovec = &file_data->xiovecs[xuio.uio_iovcnt];
2303                         xiovec->iov_base = xbb_reqlist_ioaddr(reqlist,
2304                             seg_idx, xbb_sg->first_sect);
2305 #ifdef XBB_USE_BOUNCE_BUFFERS
2306                         /*
2307                          * Store the address of the incoming
2308                          * buffer at this particular offset
2309                          * as well, so we can do the copy
2310                          * later without having to do more
2311                          * work to recalculate this address.
2312                          */
2313                         p_vaddr = &file_data->xiovecs_vaddr[xuio.uio_iovcnt];
2314                         *p_vaddr = xbb_reqlist_vaddr(reqlist, seg_idx,
2315                             xbb_sg->first_sect);
2316 #endif /* XBB_USE_BOUNCE_BUFFERS */
2317                         xiovec->iov_len = 0;
2318                         xuio.uio_iovcnt++;
2319                 }
2320
2321                 xiovec->iov_len += xbb_sg->nsect << 9;
2322
2323                 xuio.uio_resid += xbb_sg->nsect << 9;
2324
2325                 /*
2326                  * If the last sector is not the full page
2327                  * size count, the next segment will not be
2328                  * contiguous in KVA and we need a new iovec.
2329                  */
2330                 if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9)
2331                         xiovec = NULL;
2332         }
2333
2334         xuio.uio_td = curthread;
2335
2336 #ifdef XBB_USE_BOUNCE_BUFFERS
2337         saved_uio_iovcnt = xuio.uio_iovcnt;
2338
2339         if (operation == BIO_WRITE) {
2340                 /* Copy the write data to the local buffer. */
2341                 for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr,
2342                      xiovec = xuio.uio_iov; seg_idx < xuio.uio_iovcnt;
2343                      seg_idx++, xiovec++, p_vaddr++) {
2344                         memcpy(xiovec->iov_base, *p_vaddr, xiovec->iov_len);
2345                 }
2346         } else {
2347                 /*
2348                  * We only need to save off the iovecs in the case of a
2349                  * read, because the copy for the read happens after the
2350                  * VOP_READ().  (The uio will get modified in that call
2351                  * sequence.)
2352                  */
2353                 memcpy(file_data->saved_xiovecs, xuio.uio_iov,
2354                        xuio.uio_iovcnt * sizeof(xuio.uio_iov[0]));
2355         }
2356 #endif /* XBB_USE_BOUNCE_BUFFERS */
2357
2358         switch (operation) {
2359         case BIO_READ:
2360
2361                 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read,
2362                            device_get_unit(xbb->dev), xuio.uio_offset,
2363                            xuio.uio_resid);
2364
2365                 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2366
2367                 /*
2368                  * UFS pays attention to IO_DIRECT for reads.  If the
2369                  * DIRECTIO option is configured into the kernel, it calls
2370                  * ffs_rawread().  But that only works for single-segment
2371                  * uios with user space addresses.  In our case, with a
2372                  * kernel uio, it still reads into the buffer cache, but it
2373                  * will just try to release the buffer from the cache later
2374                  * on in ffs_read().
2375                  *
2376                  * ZFS does not pay attention to IO_DIRECT for reads.
2377                  *
2378                  * UFS does not pay attention to IO_SYNC for reads.
2379                  *
2380                  * ZFS pays attention to IO_SYNC (which translates into the
2381                  * Solaris define FRSYNC for zfs_read()) for reads.  It
2382                  * attempts to sync the file before reading.
2383                  *
2384                  * So, to attempt to provide some barrier semantics in the
2385                  * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.  
2386                  */
2387                 error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ? 
2388                                  (IO_DIRECT|IO_SYNC) : 0, file_data->cred);
2389
2390                 VOP_UNLOCK(xbb->vn);
2391                 break;
2392         case BIO_WRITE: {
2393                 struct mount *mountpoint;
2394
2395                 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write,
2396                            device_get_unit(xbb->dev), xuio.uio_offset,
2397                            xuio.uio_resid);
2398
2399                 (void)vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2400
2401                 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2402
2403                 /*
2404                  * UFS pays attention to IO_DIRECT for writes.  The write
2405                  * is done asynchronously.  (Normally the write would just
2406                  * get put into cache.
2407                  *
2408                  * UFS pays attention to IO_SYNC for writes.  It will
2409                  * attempt to write the buffer out synchronously if that
2410                  * flag is set.
2411                  *
2412                  * ZFS does not pay attention to IO_DIRECT for writes.
2413                  *
2414                  * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
2415                  * for writes.  It will flush the transaction from the
2416                  * cache before returning.
2417                  *
2418                  * So if we've got the BIO_ORDERED flag set, we want
2419                  * IO_SYNC in either the UFS or ZFS case.
2420                  */
2421                 error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
2422                                   IO_SYNC : 0, file_data->cred);
2423                 VOP_UNLOCK(xbb->vn);
2424
2425                 vn_finished_write(mountpoint);
2426
2427                 break;
2428         }
2429         default:
2430                 panic("invalid operation %d", operation);
2431                 /* NOTREACHED */
2432         }
2433
2434 #ifdef XBB_USE_BOUNCE_BUFFERS
2435         /* We only need to copy here for read operations */
2436         if (operation == BIO_READ) {
2437                 for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr,
2438                      xiovec = file_data->saved_xiovecs;
2439                      seg_idx < saved_uio_iovcnt; seg_idx++,
2440                      xiovec++, p_vaddr++) {
2441                         /*
2442                          * Note that we have to use the copy of the 
2443                          * io vector we made above.  uiomove() modifies
2444                          * the uio and its referenced vector as uiomove
2445                          * performs the copy, so we can't rely on any
2446                          * state from the original uio.
2447                          */
2448                         memcpy(*p_vaddr, xiovec->iov_base, xiovec->iov_len);
2449                 }
2450         }
2451 #endif /* XBB_USE_BOUNCE_BUFFERS */
2452
2453 bailout_send_response:
2454
2455         if (error != 0)
2456                 reqlist->status = BLKIF_RSP_ERROR;
2457
2458         xbb_complete_reqlist(xbb, reqlist);
2459
2460         return (0);
2461 }
2462
2463 /*--------------------------- Backend Configuration --------------------------*/
2464 /**
2465  * Close and cleanup any backend device/file specific state for this
2466  * block back instance. 
2467  *
2468  * \param xbb  Per-instance xbb configuration structure.
2469  */
2470 static void
2471 xbb_close_backend(struct xbb_softc *xbb)
2472 {
2473         DROP_GIANT();
2474         DPRINTF("closing dev=%s\n", xbb->dev_name);
2475         if (xbb->vn) {
2476                 int flags = FREAD;
2477
2478                 if ((xbb->flags & XBBF_READ_ONLY) == 0)
2479                         flags |= FWRITE;
2480
2481                 switch (xbb->device_type) {
2482                 case XBB_TYPE_DISK:
2483                         if (xbb->backend.dev.csw) {
2484                                 dev_relthread(xbb->backend.dev.cdev,
2485                                               xbb->backend.dev.dev_ref);
2486                                 xbb->backend.dev.csw  = NULL;
2487                                 xbb->backend.dev.cdev = NULL;
2488                         }
2489                         break;
2490                 case XBB_TYPE_FILE:
2491                         break;
2492                 case XBB_TYPE_NONE:
2493                 default:
2494                         panic("Unexpected backend type.");
2495                         break;
2496                 }
2497
2498                 (void)vn_close(xbb->vn, flags, NOCRED, curthread);
2499                 xbb->vn = NULL;
2500
2501                 switch (xbb->device_type) {
2502                 case XBB_TYPE_DISK:
2503                         break;
2504                 case XBB_TYPE_FILE:
2505                         if (xbb->backend.file.cred != NULL) {
2506                                 crfree(xbb->backend.file.cred);
2507                                 xbb->backend.file.cred = NULL;
2508                         }
2509                         break;
2510                 case XBB_TYPE_NONE:
2511                 default:
2512                         panic("Unexpected backend type.");
2513                         break;
2514                 }
2515         }
2516         PICKUP_GIANT();
2517 }
2518
2519 /**
2520  * Open a character device to be used for backend I/O.
2521  *
2522  * \param xbb  Per-instance xbb configuration structure.
2523  *
2524  * \return  0 for success, errno codes for failure.
2525  */
2526 static int
2527 xbb_open_dev(struct xbb_softc *xbb)
2528 {
2529         struct vattr   vattr;
2530         struct cdev   *dev;
2531         struct cdevsw *devsw;
2532         int            error;
2533
2534         xbb->device_type = XBB_TYPE_DISK;
2535         xbb->dispatch_io = xbb_dispatch_dev;
2536         xbb->backend.dev.cdev = xbb->vn->v_rdev;
2537         xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev,
2538                                              &xbb->backend.dev.dev_ref);
2539         if (xbb->backend.dev.csw == NULL)
2540                 panic("Unable to retrieve device switch");
2541
2542         error = VOP_GETATTR(xbb->vn, &vattr, NOCRED);
2543         if (error) {
2544                 xenbus_dev_fatal(xbb->dev, error, "error getting "
2545                                  "vnode attributes for device %s",
2546                                  xbb->dev_name);
2547                 return (error);
2548         }
2549
2550         dev = xbb->vn->v_rdev;
2551         devsw = dev->si_devsw;
2552         if (!devsw->d_ioctl) {
2553                 xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for "
2554                                  "device %s!", xbb->dev_name);
2555                 return (ENODEV);
2556         }
2557
2558         error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
2559                                (caddr_t)&xbb->sector_size, FREAD,
2560                                curthread);
2561         if (error) {
2562                 xenbus_dev_fatal(xbb->dev, error,
2563                                  "error calling ioctl DIOCGSECTORSIZE "
2564                                  "for device %s", xbb->dev_name);
2565                 return (error);
2566         }
2567
2568         error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
2569                                (caddr_t)&xbb->media_size, FREAD,
2570                                curthread);
2571         if (error) {
2572                 xenbus_dev_fatal(xbb->dev, error,
2573                                  "error calling ioctl DIOCGMEDIASIZE "
2574                                  "for device %s", xbb->dev_name);
2575                 return (error);
2576         }
2577
2578         return (0);
2579 }
2580
2581 /**
2582  * Open a file to be used for backend I/O.
2583  *
2584  * \param xbb  Per-instance xbb configuration structure.
2585  *
2586  * \return  0 for success, errno codes for failure.
2587  */
2588 static int
2589 xbb_open_file(struct xbb_softc *xbb)
2590 {
2591         struct xbb_file_data *file_data;
2592         struct vattr          vattr;
2593         int                   error;
2594
2595         file_data = &xbb->backend.file;
2596         xbb->device_type = XBB_TYPE_FILE;
2597         xbb->dispatch_io = xbb_dispatch_file;
2598         error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred);
2599         if (error != 0) {
2600                 xenbus_dev_fatal(xbb->dev, error,
2601                                  "error calling VOP_GETATTR()"
2602                                  "for file %s", xbb->dev_name);
2603                 return (error);
2604         }
2605
2606         /*
2607          * Verify that we have the ability to upgrade to exclusive
2608          * access on this file so we can trap errors at open instead
2609          * of reporting them during first access.
2610          */
2611         if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) {
2612                 vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY);
2613                 if (VN_IS_DOOMED(xbb->vn)) {
2614                         error = EBADF;
2615                         xenbus_dev_fatal(xbb->dev, error,
2616                                          "error locking file %s",
2617                                          xbb->dev_name);
2618
2619                         return (error);
2620                 }
2621         }
2622
2623         file_data->cred = crhold(curthread->td_ucred);
2624         xbb->media_size = vattr.va_size;
2625
2626         /*
2627          * XXX KDM vattr.va_blocksize may be larger than 512 bytes here.
2628          * With ZFS, it is 131072 bytes.  Block sizes that large don't work
2629          * with disklabel and UFS on FreeBSD at least.  Large block sizes
2630          * may not work with other OSes as well.  So just export a sector
2631          * size of 512 bytes, which should work with any OS or
2632          * application.  Since our backing is a file, any block size will
2633          * work fine for the backing store.
2634          */
2635 #if 0
2636         xbb->sector_size = vattr.va_blocksize;
2637 #endif
2638         xbb->sector_size = 512;
2639
2640         /*
2641          * Sanity check.  The media size has to be at least one
2642          * sector long.
2643          */
2644         if (xbb->media_size < xbb->sector_size) {
2645                 error = EINVAL;
2646                 xenbus_dev_fatal(xbb->dev, error,
2647                                  "file %s size %ju < block size %u",
2648                                  xbb->dev_name,
2649                                  (uintmax_t)xbb->media_size,
2650                                  xbb->sector_size);
2651         }
2652         return (error);
2653 }
2654
2655 /**
2656  * Open the backend provider for this connection.
2657  *
2658  * \param xbb  Per-instance xbb configuration structure.
2659  *
2660  * \return  0 for success, errno codes for failure.
2661  */
2662 static int
2663 xbb_open_backend(struct xbb_softc *xbb)
2664 {
2665         struct nameidata nd;
2666         int              flags;
2667         int              error;
2668
2669         flags = FREAD;
2670         error = 0;
2671
2672         DPRINTF("opening dev=%s\n", xbb->dev_name);
2673
2674         if (rootvnode == NULL) {
2675                 xenbus_dev_fatal(xbb->dev, ENOENT,
2676                                  "Root file system not mounted");
2677                 return (ENOENT);
2678         }
2679
2680         if ((xbb->flags & XBBF_READ_ONLY) == 0)
2681                 flags |= FWRITE;
2682
2683         pwd_ensure_dirs();
2684
2685  again:
2686         NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name, curthread);
2687         error = vn_open(&nd, &flags, 0, NULL);
2688         if (error) {
2689                 /*
2690                  * This is the only reasonable guess we can make as far as
2691                  * path if the user doesn't give us a fully qualified path.
2692                  * If they want to specify a file, they need to specify the
2693                  * full path.
2694                  */
2695                 if (xbb->dev_name[0] != '/') {
2696                         char *dev_path = "/dev/";
2697                         char *dev_name;
2698
2699                         /* Try adding device path at beginning of name */
2700                         dev_name = malloc(strlen(xbb->dev_name)
2701                                         + strlen(dev_path) + 1,
2702                                           M_XENBLOCKBACK, M_NOWAIT);
2703                         if (dev_name) {
2704                                 sprintf(dev_name, "%s%s", dev_path,
2705                                         xbb->dev_name);
2706                                 free(xbb->dev_name, M_XENBLOCKBACK);
2707                                 xbb->dev_name = dev_name;
2708                                 goto again;
2709                         }
2710                 }
2711                 xenbus_dev_fatal(xbb->dev, error, "error opening device %s",
2712                                  xbb->dev_name);
2713                 return (error);
2714         }
2715
2716         NDFREE(&nd, NDF_ONLY_PNBUF);
2717                 
2718         xbb->vn = nd.ni_vp;
2719
2720         /* We only support disks and files. */
2721         if (vn_isdisk_error(xbb->vn, &error)) {
2722                 error = xbb_open_dev(xbb);
2723         } else if (xbb->vn->v_type == VREG) {
2724                 error = xbb_open_file(xbb);
2725         } else {
2726                 error = EINVAL;
2727                 xenbus_dev_fatal(xbb->dev, error, "%s is not a disk "
2728                                  "or file", xbb->dev_name);
2729         }
2730         VOP_UNLOCK(xbb->vn);
2731
2732         if (error != 0) {
2733                 xbb_close_backend(xbb);
2734                 return (error);
2735         }
2736
2737         xbb->sector_size_shift = fls(xbb->sector_size) - 1;
2738         xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift;
2739
2740         DPRINTF("opened %s=%s sector_size=%u media_size=%" PRId64 "\n",
2741                 (xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file",
2742                 xbb->dev_name, xbb->sector_size, xbb->media_size);
2743
2744         return (0);
2745 }
2746
2747 /*------------------------ Inter-Domain Communication ------------------------*/
2748 /**
2749  * Free dynamically allocated KVA or pseudo-physical address allocations.
2750  *
2751  * \param xbb  Per-instance xbb configuration structure.
2752  */
2753 static void
2754 xbb_free_communication_mem(struct xbb_softc *xbb)
2755 {
2756         if (xbb->kva != 0) {
2757                 if (xbb->pseudo_phys_res != NULL) {
2758                         xenmem_free(xbb->dev, xbb->pseudo_phys_res_id,
2759                             xbb->pseudo_phys_res);
2760                         xbb->pseudo_phys_res = NULL;
2761                 }
2762         }
2763         xbb->kva = 0;
2764         xbb->gnt_base_addr = 0;
2765         if (xbb->kva_free != NULL) {
2766                 free(xbb->kva_free, M_XENBLOCKBACK);
2767                 xbb->kva_free = NULL;
2768         }
2769 }
2770
2771 /**
2772  * Cleanup all inter-domain communication mechanisms.
2773  *
2774  * \param xbb  Per-instance xbb configuration structure.
2775  */
2776 static int
2777 xbb_disconnect(struct xbb_softc *xbb)
2778 {
2779         struct gnttab_unmap_grant_ref  ops[XBB_MAX_RING_PAGES];
2780         struct gnttab_unmap_grant_ref *op;
2781         u_int                          ring_idx;
2782         int                            error;
2783
2784         DPRINTF("\n");
2785
2786         if ((xbb->flags & XBBF_RING_CONNECTED) == 0)
2787                 return (0);
2788
2789         mtx_unlock(&xbb->lock);
2790         xen_intr_unbind(&xbb->xen_intr_handle);
2791         taskqueue_drain(xbb->io_taskqueue, &xbb->io_task); 
2792         mtx_lock(&xbb->lock);
2793
2794         /*
2795          * No new interrupts can generate work, but we must wait
2796          * for all currently active requests to drain.
2797          */
2798         if (xbb->active_request_count != 0)
2799                 return (EAGAIN);
2800
2801         for (ring_idx = 0, op = ops;
2802              ring_idx < xbb->ring_config.ring_pages;
2803              ring_idx++, op++) {
2804                 op->host_addr    = xbb->ring_config.gnt_addr
2805                                  + (ring_idx * PAGE_SIZE);
2806                 op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx];
2807                 op->handle       = xbb->ring_config.handle[ring_idx];
2808         }
2809
2810         error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ops,
2811                                           xbb->ring_config.ring_pages);
2812         if (error != 0)
2813                 panic("Grant table op failed (%d)", error);
2814
2815         xbb_free_communication_mem(xbb);
2816
2817         if (xbb->requests != NULL) {
2818                 free(xbb->requests, M_XENBLOCKBACK);
2819                 xbb->requests = NULL;
2820         }
2821
2822         if (xbb->request_lists != NULL) {
2823                 struct xbb_xen_reqlist *reqlist;
2824                 int i;
2825
2826                 /* There is one request list for ever allocated request. */
2827                 for (i = 0, reqlist = xbb->request_lists;
2828                      i < xbb->max_requests; i++, reqlist++){
2829 #ifdef XBB_USE_BOUNCE_BUFFERS
2830                         if (reqlist->bounce != NULL) {
2831                                 free(reqlist->bounce, M_XENBLOCKBACK);
2832                                 reqlist->bounce = NULL;
2833                         }
2834 #endif
2835                         if (reqlist->gnt_handles != NULL) {
2836                                 free(reqlist->gnt_handles, M_XENBLOCKBACK);
2837                                 reqlist->gnt_handles = NULL;
2838                         }
2839                 }
2840                 free(xbb->request_lists, M_XENBLOCKBACK);
2841                 xbb->request_lists = NULL;
2842         }
2843
2844         xbb->flags &= ~XBBF_RING_CONNECTED;
2845         return (0);
2846 }
2847
2848 /**
2849  * Map shared memory ring into domain local address space, initialize
2850  * ring control structures, and bind an interrupt to the event channel
2851  * used to notify us of ring changes.
2852  *
2853  * \param xbb  Per-instance xbb configuration structure.
2854  */
2855 static int
2856 xbb_connect_ring(struct xbb_softc *xbb)
2857 {
2858         struct gnttab_map_grant_ref  gnts[XBB_MAX_RING_PAGES];
2859         struct gnttab_map_grant_ref *gnt;
2860         u_int                        ring_idx;
2861         int                          error;
2862
2863         if ((xbb->flags & XBBF_RING_CONNECTED) != 0)
2864                 return (0);
2865
2866         /*
2867          * Kva for our ring is at the tail of the region of kva allocated
2868          * by xbb_alloc_communication_mem().
2869          */
2870         xbb->ring_config.va = xbb->kva
2871                             + (xbb->kva_size
2872                              - (xbb->ring_config.ring_pages * PAGE_SIZE));
2873         xbb->ring_config.gnt_addr = xbb->gnt_base_addr
2874                                   + (xbb->kva_size
2875                                    - (xbb->ring_config.ring_pages * PAGE_SIZE));
2876
2877         for (ring_idx = 0, gnt = gnts;
2878              ring_idx < xbb->ring_config.ring_pages;
2879              ring_idx++, gnt++) {
2880                 gnt->host_addr = xbb->ring_config.gnt_addr
2881                                + (ring_idx * PAGE_SIZE);
2882                 gnt->flags     = GNTMAP_host_map;
2883                 gnt->ref       = xbb->ring_config.ring_ref[ring_idx];
2884                 gnt->dom       = xbb->otherend_id;
2885         }
2886
2887         error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, gnts,
2888                                           xbb->ring_config.ring_pages);
2889         if (error)
2890                 panic("blkback: Ring page grant table op failed (%d)", error);
2891
2892         for (ring_idx = 0, gnt = gnts;
2893              ring_idx < xbb->ring_config.ring_pages;
2894              ring_idx++, gnt++) {
2895                 if (gnt->status != 0) {
2896                         xbb->ring_config.va = 0;
2897                         xenbus_dev_fatal(xbb->dev, EACCES,
2898                                          "Ring shared page mapping failed. "
2899                                          "Status %d.", gnt->status);
2900                         return (EACCES);
2901                 }
2902                 xbb->ring_config.handle[ring_idx]   = gnt->handle;
2903                 xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr;
2904         }
2905
2906         /* Initialize the ring based on ABI. */
2907         switch (xbb->abi) {
2908         case BLKIF_PROTOCOL_NATIVE:
2909         {
2910                 blkif_sring_t *sring;
2911                 sring = (blkif_sring_t *)xbb->ring_config.va;
2912                 BACK_RING_INIT(&xbb->rings.native, sring,
2913                                xbb->ring_config.ring_pages * PAGE_SIZE);
2914                 break;
2915         }
2916         case BLKIF_PROTOCOL_X86_32:
2917         {
2918                 blkif_x86_32_sring_t *sring_x86_32;
2919                 sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va;
2920                 BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32,
2921                                xbb->ring_config.ring_pages * PAGE_SIZE);
2922                 break;
2923         }
2924         case BLKIF_PROTOCOL_X86_64:
2925         {
2926                 blkif_x86_64_sring_t *sring_x86_64;
2927                 sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va;
2928                 BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64,
2929                                xbb->ring_config.ring_pages * PAGE_SIZE);
2930                 break;
2931         }
2932         default:
2933                 panic("Unexpected blkif protocol ABI.");
2934         }
2935
2936         xbb->flags |= XBBF_RING_CONNECTED;
2937
2938         error = xen_intr_bind_remote_port(xbb->dev,
2939                                           xbb->otherend_id,
2940                                           xbb->ring_config.evtchn,
2941                                           xbb_filter,
2942                                           /*ithread_handler*/NULL,
2943                                           /*arg*/xbb,
2944                                           INTR_TYPE_BIO | INTR_MPSAFE,
2945                                           &xbb->xen_intr_handle);
2946         if (error) {
2947                 (void)xbb_disconnect(xbb);
2948                 xenbus_dev_fatal(xbb->dev, error, "binding event channel");
2949                 return (error);
2950         }
2951
2952         DPRINTF("rings connected!\n");
2953
2954         return 0;
2955 }
2956
2957 /**
2958  * Size KVA and pseudo-physical address allocations based on negotiated
2959  * values for the size and number of I/O requests, and the size of our
2960  * communication ring.
2961  *
2962  * \param xbb  Per-instance xbb configuration structure.
2963  *
2964  * These address spaces are used to dynamically map pages in the
2965  * front-end's domain into our own.
2966  */
2967 static int
2968 xbb_alloc_communication_mem(struct xbb_softc *xbb)
2969 {
2970         xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments;
2971         xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE;
2972         xbb->kva_size = xbb->reqlist_kva_size +
2973                         (xbb->ring_config.ring_pages * PAGE_SIZE);
2974
2975         xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages, M_XENBLOCKBACK, M_NOWAIT);
2976         if (xbb->kva_free == NULL)
2977                 return (ENOMEM);
2978
2979         DPRINTF("%s: kva_size = %d, reqlist_kva_size = %d\n",
2980                 device_get_nameunit(xbb->dev), xbb->kva_size,
2981                 xbb->reqlist_kva_size);
2982         /*
2983          * Reserve a range of pseudo physical memory that we can map
2984          * into kva.  These pages will only be backed by machine
2985          * pages ("real memory") during the lifetime of front-end requests
2986          * via grant table operations.
2987          */
2988         xbb->pseudo_phys_res_id = 0;
2989         xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id,
2990             xbb->kva_size);
2991         if (xbb->pseudo_phys_res == NULL) {
2992                 xbb->kva = 0;
2993                 return (ENOMEM);
2994         }
2995         xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res);
2996         xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res);
2997
2998         DPRINTF("%s: kva: %#jx, gnt_base_addr: %#jx\n",
2999                 device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva,
3000                 (uintmax_t)xbb->gnt_base_addr); 
3001         return (0);
3002 }
3003
3004 /**
3005  * Collect front-end information from the XenStore.
3006  *
3007  * \param xbb  Per-instance xbb configuration structure.
3008  */
3009 static int
3010 xbb_collect_frontend_info(struct xbb_softc *xbb)
3011 {
3012         char        protocol_abi[64];
3013         const char *otherend_path;
3014         int         error;
3015         u_int       ring_idx;
3016         u_int       ring_page_order;
3017         size_t      ring_size;
3018
3019         otherend_path = xenbus_get_otherend_path(xbb->dev);
3020
3021         /*
3022          * Protocol defaults valid even if all negotiation fails.
3023          */
3024         xbb->ring_config.ring_pages = 1;
3025         xbb->max_request_segments   = BLKIF_MAX_SEGMENTS_PER_REQUEST;
3026         xbb->max_request_size       = xbb->max_request_segments * PAGE_SIZE;
3027
3028         /*
3029          * Mandatory data (used in all versions of the protocol) first.
3030          */
3031         error = xs_scanf(XST_NIL, otherend_path,
3032                          "event-channel", NULL, "%" PRIu32,
3033                          &xbb->ring_config.evtchn);
3034         if (error != 0) {
3035                 xenbus_dev_fatal(xbb->dev, error,
3036                                  "Unable to retrieve event-channel information "
3037                                  "from frontend %s.  Unable to connect.",
3038                                  xenbus_get_otherend_path(xbb->dev));
3039                 return (error);
3040         }
3041
3042         /*
3043          * These fields are initialized to legacy protocol defaults
3044          * so we only need to fail if reading the updated value succeeds
3045          * and the new value is outside of its allowed range.
3046          *
3047          * \note xs_gather() returns on the first encountered error, so
3048          *       we must use independent calls in order to guarantee
3049          *       we don't miss information in a sparsly populated front-end
3050          *       tree.
3051          *
3052          * \note xs_scanf() does not update variables for unmatched
3053          *       fields.
3054          */
3055         ring_page_order = 0;
3056         xbb->max_requests = 32;
3057
3058         (void)xs_scanf(XST_NIL, otherend_path,
3059                        "ring-page-order", NULL, "%u",
3060                        &ring_page_order);
3061         xbb->ring_config.ring_pages = 1 << ring_page_order;
3062         ring_size = PAGE_SIZE * xbb->ring_config.ring_pages;
3063         xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size);
3064
3065         if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) {
3066                 xenbus_dev_fatal(xbb->dev, EINVAL,
3067                                  "Front-end specified ring-pages of %u "
3068                                  "exceeds backend limit of %u.  "
3069                                  "Unable to connect.",
3070                                  xbb->ring_config.ring_pages,
3071                                  XBB_MAX_RING_PAGES);
3072                 return (EINVAL);
3073         }
3074
3075         if (xbb->ring_config.ring_pages == 1) {
3076                 error = xs_gather(XST_NIL, otherend_path,
3077                                   "ring-ref", "%" PRIu32,
3078                                   &xbb->ring_config.ring_ref[0],
3079                                   NULL);
3080                 if (error != 0) {
3081                         xenbus_dev_fatal(xbb->dev, error,
3082                                          "Unable to retrieve ring information "
3083                                          "from frontend %s.  Unable to "
3084                                          "connect.",
3085                                          xenbus_get_otherend_path(xbb->dev));
3086                         return (error);
3087                 }
3088         } else {
3089                 /* Multi-page ring format. */
3090                 for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages;
3091                      ring_idx++) {
3092                         char ring_ref_name[]= "ring_refXX";
3093
3094                         snprintf(ring_ref_name, sizeof(ring_ref_name),
3095                                  "ring-ref%u", ring_idx);
3096                         error = xs_scanf(XST_NIL, otherend_path,
3097                                          ring_ref_name, NULL, "%" PRIu32,
3098                                          &xbb->ring_config.ring_ref[ring_idx]);
3099                         if (error != 0) {
3100                                 xenbus_dev_fatal(xbb->dev, error,
3101                                                  "Failed to retriev grant "
3102                                                  "reference for page %u of "
3103                                                  "shared ring.  Unable "
3104                                                  "to connect.", ring_idx);
3105                                 return (error);
3106                         }
3107                 }
3108         }
3109
3110         error = xs_gather(XST_NIL, otherend_path,
3111                           "protocol", "%63s", protocol_abi,
3112                           NULL); 
3113         if (error != 0
3114          || !strcmp(protocol_abi, XEN_IO_PROTO_ABI_NATIVE)) {
3115                 /*
3116                  * Assume native if the frontend has not
3117                  * published ABI data or it has published and
3118                  * matches our own ABI.
3119                  */
3120                 xbb->abi = BLKIF_PROTOCOL_NATIVE;
3121         } else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_32)) {
3122                 xbb->abi = BLKIF_PROTOCOL_X86_32;
3123         } else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_64)) {
3124                 xbb->abi = BLKIF_PROTOCOL_X86_64;
3125         } else {
3126                 xenbus_dev_fatal(xbb->dev, EINVAL,
3127                                  "Unknown protocol ABI (%s) published by "
3128                                  "frontend.  Unable to connect.", protocol_abi);
3129                 return (EINVAL);
3130         }
3131         return (0);
3132 }
3133
3134 /**
3135  * Allocate per-request data structures given request size and number
3136  * information negotiated with the front-end.
3137  *
3138  * \param xbb  Per-instance xbb configuration structure.
3139  */
3140 static int
3141 xbb_alloc_requests(struct xbb_softc *xbb)
3142 {
3143         struct xbb_xen_req *req;
3144         struct xbb_xen_req *last_req;
3145
3146         /*
3147          * Allocate request book keeping datastructures.
3148          */
3149         xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests),
3150                                M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3151         if (xbb->requests == NULL) {
3152                 xenbus_dev_fatal(xbb->dev, ENOMEM, 
3153                                   "Unable to allocate request structures");
3154                 return (ENOMEM);
3155         }
3156
3157         req      = xbb->requests;
3158         last_req = &xbb->requests[xbb->max_requests - 1];
3159         STAILQ_INIT(&xbb->request_free_stailq);
3160         while (req <= last_req) {
3161                 STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links);
3162                 req++;
3163         }
3164         return (0);
3165 }
3166
3167 static int
3168 xbb_alloc_request_lists(struct xbb_softc *xbb)
3169 {
3170         struct xbb_xen_reqlist *reqlist;
3171         int                     i;
3172
3173         /*
3174          * If no requests can be merged, we need 1 request list per
3175          * in flight request.
3176          */
3177         xbb->request_lists = malloc(xbb->max_requests *
3178                 sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3179         if (xbb->request_lists == NULL) {
3180                 xenbus_dev_fatal(xbb->dev, ENOMEM, 
3181                                   "Unable to allocate request list structures");
3182                 return (ENOMEM);
3183         }
3184
3185         STAILQ_INIT(&xbb->reqlist_free_stailq);
3186         STAILQ_INIT(&xbb->reqlist_pending_stailq);
3187         for (i = 0; i < xbb->max_requests; i++) {
3188                 int seg;
3189
3190                 reqlist      = &xbb->request_lists[i];
3191
3192                 reqlist->xbb = xbb;
3193
3194 #ifdef XBB_USE_BOUNCE_BUFFERS
3195                 reqlist->bounce = malloc(xbb->max_reqlist_size,
3196                                          M_XENBLOCKBACK, M_NOWAIT);
3197                 if (reqlist->bounce == NULL) {
3198                         xenbus_dev_fatal(xbb->dev, ENOMEM, 
3199                                          "Unable to allocate request "
3200                                          "bounce buffers");
3201                         return (ENOMEM);
3202                 }
3203 #endif /* XBB_USE_BOUNCE_BUFFERS */
3204
3205                 reqlist->gnt_handles = malloc(xbb->max_reqlist_segments *
3206                                               sizeof(*reqlist->gnt_handles),
3207                                               M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3208                 if (reqlist->gnt_handles == NULL) {
3209                         xenbus_dev_fatal(xbb->dev, ENOMEM,
3210                                           "Unable to allocate request "
3211                                           "grant references");
3212                         return (ENOMEM);
3213                 }
3214
3215                 for (seg = 0; seg < xbb->max_reqlist_segments; seg++)
3216                         reqlist->gnt_handles[seg] = GRANT_REF_INVALID;
3217
3218                 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
3219         }
3220         return (0);
3221 }
3222
3223 /**
3224  * Supply information about the physical device to the frontend
3225  * via XenBus.
3226  *
3227  * \param xbb  Per-instance xbb configuration structure.
3228  */
3229 static int
3230 xbb_publish_backend_info(struct xbb_softc *xbb)
3231 {
3232         struct xs_transaction xst;
3233         const char           *our_path;
3234         const char           *leaf;
3235         int                   error;
3236
3237         our_path = xenbus_get_node(xbb->dev);
3238         while (1) {
3239                 error = xs_transaction_start(&xst);
3240                 if (error != 0) {
3241                         xenbus_dev_fatal(xbb->dev, error,
3242                                          "Error publishing backend info "
3243                                          "(start transaction)");
3244                         return (error);
3245                 }
3246
3247                 leaf = "sectors";
3248                 error = xs_printf(xst, our_path, leaf,
3249                                   "%"PRIu64, xbb->media_num_sectors);
3250                 if (error != 0)
3251                         break;
3252
3253                 /* XXX Support all VBD attributes here. */
3254                 leaf = "info";
3255                 error = xs_printf(xst, our_path, leaf, "%u",
3256                                   xbb->flags & XBBF_READ_ONLY
3257                                 ? VDISK_READONLY : 0);
3258                 if (error != 0)
3259                         break;
3260
3261                 leaf = "sector-size";
3262                 error = xs_printf(xst, our_path, leaf, "%u",
3263                                   xbb->sector_size);
3264                 if (error != 0)
3265                         break;
3266
3267                 error = xs_transaction_end(xst, 0);
3268                 if (error == 0) {
3269                         return (0);
3270                 } else if (error != EAGAIN) {
3271                         xenbus_dev_fatal(xbb->dev, error, "ending transaction");
3272                         return (error);
3273                 }
3274         }
3275
3276         xenbus_dev_fatal(xbb->dev, error, "writing %s/%s",
3277                         our_path, leaf);
3278         xs_transaction_end(xst, 1);
3279         return (error);
3280 }
3281
3282 /**
3283  * Connect to our blkfront peer now that it has completed publishing
3284  * its configuration into the XenStore.
3285  *
3286  * \param xbb  Per-instance xbb configuration structure.
3287  */
3288 static void
3289 xbb_connect(struct xbb_softc *xbb)
3290 {
3291         int error;
3292
3293         if (!xbb->hotplug_done ||
3294             (xenbus_get_state(xbb->dev) != XenbusStateInitWait) ||
3295             (xbb_collect_frontend_info(xbb) != 0))
3296                 return;
3297
3298         xbb->flags &= ~XBBF_SHUTDOWN;
3299
3300         /*
3301          * We limit the maximum number of reqlist segments to the maximum
3302          * number of segments in the ring, or our absolute maximum,
3303          * whichever is smaller.
3304          */
3305         xbb->max_reqlist_segments = MIN(xbb->max_request_segments *
3306                 xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST);
3307
3308         /*
3309          * The maximum size is simply a function of the number of segments
3310          * we can handle.
3311          */
3312         xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE;
3313
3314         /* Allocate resources whose size depends on front-end configuration. */
3315         error = xbb_alloc_communication_mem(xbb);
3316         if (error != 0) {
3317                 xenbus_dev_fatal(xbb->dev, error,
3318                                  "Unable to allocate communication memory");
3319                 return;
3320         }
3321
3322         error = xbb_alloc_requests(xbb);
3323         if (error != 0) {
3324                 /* Specific errors are reported by xbb_alloc_requests(). */
3325                 return;
3326         }
3327
3328         error = xbb_alloc_request_lists(xbb);
3329         if (error != 0) {
3330                 /* Specific errors are reported by xbb_alloc_request_lists(). */
3331                 return;
3332         }
3333
3334         /*
3335          * Connect communication channel.
3336          */
3337         error = xbb_connect_ring(xbb);
3338         if (error != 0) {
3339                 /* Specific errors are reported by xbb_connect_ring(). */
3340                 return;
3341         }
3342
3343         if (xbb_publish_backend_info(xbb) != 0) {
3344                 /*
3345                  * If we can't publish our data, we cannot participate
3346                  * in this connection, and waiting for a front-end state
3347                  * change will not help the situation.
3348                  */
3349                 (void)xbb_disconnect(xbb);
3350                 return;
3351         }
3352
3353         /* Ready for I/O. */
3354         xenbus_set_state(xbb->dev, XenbusStateConnected);
3355 }
3356
3357 /*-------------------------- Device Teardown Support -------------------------*/
3358 /**
3359  * Perform device shutdown functions.
3360  *
3361  * \param xbb  Per-instance xbb configuration structure.
3362  *
3363  * Mark this instance as shutting down, wait for any active I/O on the
3364  * backend device/file to drain, disconnect from the front-end, and notify
3365  * any waiters (e.g. a thread invoking our detach method) that detach can
3366  * now proceed.
3367  */
3368 static int
3369 xbb_shutdown(struct xbb_softc *xbb)
3370 {
3371         XenbusState frontState;
3372         int         error;
3373
3374         DPRINTF("\n");
3375
3376         /*
3377          * Due to the need to drop our mutex during some
3378          * xenbus operations, it is possible for two threads
3379          * to attempt to close out shutdown processing at
3380          * the same time.  Tell the caller that hits this
3381          * race to try back later. 
3382          */
3383         if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0)
3384                 return (EAGAIN);
3385
3386         xbb->flags |= XBBF_IN_SHUTDOWN;
3387         mtx_unlock(&xbb->lock);
3388
3389         if (xbb->hotplug_watch.node != NULL) {
3390                 xs_unregister_watch(&xbb->hotplug_watch);
3391                 free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
3392                 xbb->hotplug_watch.node = NULL;
3393         }
3394         xbb->hotplug_done = false;
3395
3396         if (xenbus_get_state(xbb->dev) < XenbusStateClosing)
3397                 xenbus_set_state(xbb->dev, XenbusStateClosing);
3398
3399         frontState = xenbus_get_otherend_state(xbb->dev);
3400         mtx_lock(&xbb->lock);
3401         xbb->flags &= ~XBBF_IN_SHUTDOWN;
3402
3403         /* Wait for the frontend to disconnect (if it's connected). */
3404         if (frontState == XenbusStateConnected)
3405                 return (EAGAIN);
3406
3407         DPRINTF("\n");
3408
3409         /* Indicate shutdown is in progress. */
3410         xbb->flags |= XBBF_SHUTDOWN;
3411
3412         /* Disconnect from the front-end. */
3413         error = xbb_disconnect(xbb);
3414         if (error != 0) {
3415                 /*
3416                  * Requests still outstanding.  We'll be called again
3417                  * once they complete.
3418                  */
3419                 KASSERT(error == EAGAIN,
3420                         ("%s: Unexpected xbb_disconnect() failure %d",
3421                          __func__, error));
3422
3423                 return (error);
3424         }
3425
3426         DPRINTF("\n");
3427
3428         /* Indicate to xbb_detach() that is it safe to proceed. */
3429         wakeup(xbb);
3430
3431         return (0);
3432 }
3433
3434 /**
3435  * Report an attach time error to the console and Xen, and cleanup
3436  * this instance by forcing immediate detach processing.
3437  *
3438  * \param xbb  Per-instance xbb configuration structure.
3439  * \param err  Errno describing the error.
3440  * \param fmt  Printf style format and arguments
3441  */
3442 static void
3443 xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...)
3444 {
3445         va_list ap;
3446         va_list ap_hotplug;
3447
3448         va_start(ap, fmt);
3449         va_copy(ap_hotplug, ap);
3450         xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev),
3451                   "hotplug-error", fmt, ap_hotplug);
3452         va_end(ap_hotplug);
3453         xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3454                   "hotplug-status", "error");
3455
3456         xenbus_dev_vfatal(xbb->dev, err, fmt, ap);
3457         va_end(ap);
3458
3459         xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3460                   "online", "0");
3461         mtx_lock(&xbb->lock);
3462         xbb_shutdown(xbb);
3463         mtx_unlock(&xbb->lock);
3464 }
3465
3466 /*---------------------------- NewBus Entrypoints ----------------------------*/
3467 /**
3468  * Inspect a XenBus device and claim it if is of the appropriate type.
3469  * 
3470  * \param dev  NewBus device object representing a candidate XenBus device.
3471  *
3472  * \return  0 for success, errno codes for failure.
3473  */
3474 static int
3475 xbb_probe(device_t dev)
3476 {
3477
3478         if (!strcmp(xenbus_get_type(dev), "vbd")) {
3479                 device_set_desc(dev, "Backend Virtual Block Device");
3480                 device_quiet(dev);
3481                 return (0);
3482         }
3483
3484         return (ENXIO);
3485 }
3486
3487 /**
3488  * Setup sysctl variables to control various Block Back parameters.
3489  *
3490  * \param xbb  Xen Block Back softc.
3491  *
3492  */
3493 static void
3494 xbb_setup_sysctl(struct xbb_softc *xbb)
3495 {
3496         struct sysctl_ctx_list *sysctl_ctx = NULL;
3497         struct sysctl_oid      *sysctl_tree = NULL;
3498
3499         sysctl_ctx = device_get_sysctl_ctx(xbb->dev);
3500         if (sysctl_ctx == NULL)
3501                 return;
3502
3503         sysctl_tree = device_get_sysctl_tree(xbb->dev);
3504         if (sysctl_tree == NULL)
3505                 return;
3506
3507         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3508                        "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0,
3509                        "fake the flush command");
3510
3511         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3512                        "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0,
3513                        "send a real flush for N flush requests");
3514
3515         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3516                        "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0,
3517                        "Don't coalesce contiguous requests");
3518
3519         SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3520                          "reqs_received", CTLFLAG_RW, &xbb->reqs_received,
3521                          "how many I/O requests we have received");
3522
3523         SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3524                          "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed,
3525                          "how many I/O requests have been completed");
3526
3527         SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3528                          "reqs_queued_for_completion", CTLFLAG_RW,
3529                          &xbb->reqs_queued_for_completion,
3530                          "how many I/O requests queued but not yet pushed");
3531
3532         SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3533                          "reqs_completed_with_error", CTLFLAG_RW,
3534                          &xbb->reqs_completed_with_error,
3535                          "how many I/O requests completed with error status");
3536
3537         SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3538                          "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch,
3539                          "how many I/O dispatches were forced");
3540
3541         SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3542                          "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch,
3543                          "how many I/O dispatches were normal");
3544
3545         SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3546                          "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch,
3547                          "total number of I/O dispatches");
3548
3549         SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3550                          "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages,
3551                          "how many times we have run out of KVA");
3552
3553         SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3554                          "request_shortages", CTLFLAG_RW,
3555                          &xbb->request_shortages,
3556                          "how many times we have run out of requests");
3557
3558         SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3559                         "max_requests", CTLFLAG_RD, &xbb->max_requests, 0,
3560                         "maximum outstanding requests (negotiated)");
3561
3562         SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3563                         "max_request_segments", CTLFLAG_RD,
3564                         &xbb->max_request_segments, 0,
3565                         "maximum number of pages per requests (negotiated)");
3566
3567         SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3568                         "max_request_size", CTLFLAG_RD,
3569                         &xbb->max_request_size, 0,
3570                         "maximum size in bytes of a request (negotiated)");
3571
3572         SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3573                         "ring_pages", CTLFLAG_RD,
3574                         &xbb->ring_config.ring_pages, 0,
3575                         "communication channel pages (negotiated)");
3576 }
3577
3578 static void
3579 xbb_attach_disk(struct xs_watch *watch, const char **vec, unsigned int len)
3580 {
3581         device_t                 dev;
3582         struct xbb_softc        *xbb;
3583         int                      error;
3584
3585         dev = (device_t) watch->callback_data;
3586         xbb = device_get_softc(dev);
3587
3588         error = xs_gather(XST_NIL, xenbus_get_node(dev), "physical-device-path",
3589             NULL, &xbb->dev_name, NULL);
3590         if (error != 0)
3591                 return;
3592
3593         xs_unregister_watch(watch);
3594         free(watch->node, M_XENBLOCKBACK);
3595         watch->node = NULL;
3596
3597         /* Collect physical device information. */
3598         error = xs_gather(XST_NIL, xenbus_get_otherend_path(xbb->dev),
3599                           "device-type", NULL, &xbb->dev_type,
3600                           NULL);
3601         if (error != 0)
3602                 xbb->dev_type = NULL;
3603
3604         error = xs_gather(XST_NIL, xenbus_get_node(dev),
3605                           "mode", NULL, &xbb->dev_mode,
3606                           NULL);
3607         if (error != 0) {
3608                 xbb_attach_failed(xbb, error, "reading backend fields at %s",
3609                                   xenbus_get_node(dev));
3610                 return;
3611         }
3612
3613         /* Parse fopen style mode flags. */
3614         if (strchr(xbb->dev_mode, 'w') == NULL)
3615                 xbb->flags |= XBBF_READ_ONLY;
3616
3617         /*
3618          * Verify the physical device is present and can support
3619          * the desired I/O mode.
3620          */
3621         error = xbb_open_backend(xbb);
3622         if (error != 0) {
3623                 xbb_attach_failed(xbb, error, "Unable to open %s",
3624                                   xbb->dev_name);
3625                 return;
3626         }
3627
3628         /* Use devstat(9) for recording statistics. */
3629         xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev),
3630                                            xbb->sector_size,
3631                                            DEVSTAT_ALL_SUPPORTED,
3632                                            DEVSTAT_TYPE_DIRECT
3633                                          | DEVSTAT_TYPE_IF_OTHER,
3634                                            DEVSTAT_PRIORITY_OTHER);
3635
3636         xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev),
3637                                               xbb->sector_size,
3638                                               DEVSTAT_ALL_SUPPORTED,
3639                                               DEVSTAT_TYPE_DIRECT
3640                                             | DEVSTAT_TYPE_IF_OTHER,
3641                                               DEVSTAT_PRIORITY_OTHER);
3642         /*
3643          * Setup sysctl variables.
3644          */
3645         xbb_setup_sysctl(xbb);
3646
3647         /*
3648          * Create a taskqueue for doing work that must occur from a
3649          * thread context.
3650          */
3651         xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev),
3652                                                   M_NOWAIT,
3653                                                   taskqueue_thread_enqueue,
3654                                                   /*contxt*/&xbb->io_taskqueue);
3655         if (xbb->io_taskqueue == NULL) {
3656                 xbb_attach_failed(xbb, error, "Unable to create taskqueue");
3657                 return;
3658         }
3659
3660         taskqueue_start_threads(&xbb->io_taskqueue,
3661                                 /*num threads*/1,
3662                                 /*priority*/PWAIT,
3663                                 /*thread name*/
3664                                 "%s taskq", device_get_nameunit(dev));
3665
3666         /* Update hot-plug status to satisfy xend. */
3667         error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3668                           "hotplug-status", "connected");
3669         if (error) {
3670                 xbb_attach_failed(xbb, error, "writing %s/hotplug-status",
3671                                   xenbus_get_node(xbb->dev));
3672                 return;
3673         }
3674
3675         xbb->hotplug_done = true;
3676
3677         /* The front end might be waiting for the backend, attach if so. */
3678         if (xenbus_get_otherend_state(xbb->dev) == XenbusStateInitialised)
3679                 xbb_connect(xbb);
3680 }
3681
3682 /**
3683  * Attach to a XenBus device that has been claimed by our probe routine.
3684  *
3685  * \param dev  NewBus device object representing this Xen Block Back instance.
3686  *
3687  * \return  0 for success, errno codes for failure.
3688  */
3689 static int
3690 xbb_attach(device_t dev)
3691 {
3692         struct xbb_softc        *xbb;
3693         int                      error;
3694         u_int                    max_ring_page_order;
3695         struct sbuf             *watch_path;
3696
3697         DPRINTF("Attaching to %s\n", xenbus_get_node(dev));
3698
3699         /*
3700          * Basic initialization.
3701          * After this block it is safe to call xbb_detach()
3702          * to clean up any allocated data for this instance.
3703          */
3704         xbb = device_get_softc(dev);
3705         xbb->dev = dev;
3706         xbb->otherend_id = xenbus_get_otherend_id(dev);
3707         TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb);
3708         mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF);
3709
3710         /*
3711          * Publish protocol capabilities for consumption by the
3712          * front-end.
3713          */
3714         error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3715                           "feature-barrier", "1");
3716         if (error) {
3717                 xbb_attach_failed(xbb, error, "writing %s/feature-barrier",
3718                                   xenbus_get_node(xbb->dev));
3719                 return (error);
3720         }
3721
3722         error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3723                           "feature-flush-cache", "1");
3724         if (error) {
3725                 xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache",
3726                                   xenbus_get_node(xbb->dev));
3727                 return (error);
3728         }
3729
3730         max_ring_page_order = flsl(XBB_MAX_RING_PAGES) - 1;
3731         error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3732                           "max-ring-page-order", "%u", max_ring_page_order);
3733         if (error) {
3734                 xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order",
3735                                   xenbus_get_node(xbb->dev));
3736                 return (error);
3737         }
3738
3739         /*
3740          * We need to wait for hotplug script execution before
3741          * moving forward.
3742          */
3743         KASSERT(!xbb->hotplug_done, ("Hotplug scripts already executed"));
3744         watch_path = xs_join(xenbus_get_node(xbb->dev), "physical-device-path");
3745         xbb->hotplug_watch.callback_data = (uintptr_t)dev;
3746         xbb->hotplug_watch.callback = xbb_attach_disk;
3747         KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup"));
3748         xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK);
3749         /*
3750          * We don't care about the path updated, just about the value changes
3751          * on that single node, hence there's no need to queue more that one
3752          * event.
3753          */
3754         xbb->hotplug_watch.max_pending = 1;
3755         sbuf_delete(watch_path);
3756         error = xs_register_watch(&xbb->hotplug_watch);
3757         if (error != 0) {
3758                 xbb_attach_failed(xbb, error, "failed to create watch on %s",
3759                     xbb->hotplug_watch.node);
3760                 free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
3761                 return (error);
3762         }
3763
3764         /* Tell the toolstack blkback has attached. */
3765         xenbus_set_state(dev, XenbusStateInitWait);
3766
3767         return (0);
3768 }
3769
3770 /**
3771  * Detach from a block back device instance.
3772  *
3773  * \param dev  NewBus device object representing this Xen Block Back instance.
3774  *
3775  * \return  0 for success, errno codes for failure.
3776  * 
3777  * \note A block back device may be detached at any time in its life-cycle,
3778  *       including part way through the attach process.  For this reason,
3779  *       initialization order and the initialization state checks in this
3780  *       routine must be carefully coupled so that attach time failures
3781  *       are gracefully handled.
3782  */
3783 static int
3784 xbb_detach(device_t dev)
3785 {
3786         struct xbb_softc *xbb;
3787
3788         DPRINTF("\n");
3789
3790         xbb = device_get_softc(dev);
3791         mtx_lock(&xbb->lock);
3792         while (xbb_shutdown(xbb) == EAGAIN) {
3793                 msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0,
3794                        "xbb_shutdown", 0);
3795         }
3796         mtx_unlock(&xbb->lock);
3797
3798         DPRINTF("\n");
3799
3800         if (xbb->io_taskqueue != NULL)
3801                 taskqueue_free(xbb->io_taskqueue);
3802
3803         if (xbb->xbb_stats != NULL)
3804                 devstat_remove_entry(xbb->xbb_stats);
3805
3806         if (xbb->xbb_stats_in != NULL)
3807                 devstat_remove_entry(xbb->xbb_stats_in);
3808
3809         xbb_close_backend(xbb);
3810
3811         if (xbb->dev_mode != NULL) {
3812                 free(xbb->dev_mode, M_XENSTORE);
3813                 xbb->dev_mode = NULL;
3814         }
3815
3816         if (xbb->dev_type != NULL) {
3817                 free(xbb->dev_type, M_XENSTORE);
3818                 xbb->dev_type = NULL;
3819         }
3820
3821         if (xbb->dev_name != NULL) {
3822                 free(xbb->dev_name, M_XENSTORE);
3823                 xbb->dev_name = NULL;
3824         }
3825
3826         mtx_destroy(&xbb->lock);
3827         return (0);
3828 }
3829
3830 /**
3831  * Prepare this block back device for suspension of this VM.
3832  * 
3833  * \param dev  NewBus device object representing this Xen Block Back instance.
3834  *
3835  * \return  0 for success, errno codes for failure.
3836  */
3837 static int
3838 xbb_suspend(device_t dev)
3839 {
3840 #ifdef NOT_YET
3841         struct xbb_softc *sc = device_get_softc(dev);
3842
3843         /* Prevent new requests being issued until we fix things up. */
3844         mtx_lock(&sc->xb_io_lock);
3845         sc->connected = BLKIF_STATE_SUSPENDED;
3846         mtx_unlock(&sc->xb_io_lock);
3847 #endif
3848
3849         return (0);
3850 }
3851
3852 /**
3853  * Perform any processing required to recover from a suspended state.
3854  * 
3855  * \param dev  NewBus device object representing this Xen Block Back instance.
3856  *
3857  * \return  0 for success, errno codes for failure.
3858  */
3859 static int
3860 xbb_resume(device_t dev)
3861 {
3862         return (0);
3863 }
3864
3865 /**
3866  * Handle state changes expressed via the XenStore by our front-end peer.
3867  *
3868  * \param dev             NewBus device object representing this Xen
3869  *                        Block Back instance.
3870  * \param frontend_state  The new state of the front-end.
3871  *
3872  * \return  0 for success, errno codes for failure.
3873  */
3874 static void
3875 xbb_frontend_changed(device_t dev, XenbusState frontend_state)
3876 {
3877         struct xbb_softc *xbb = device_get_softc(dev);
3878
3879         DPRINTF("frontend_state=%s, xbb_state=%s\n",
3880                 xenbus_strstate(frontend_state),
3881                 xenbus_strstate(xenbus_get_state(xbb->dev)));
3882
3883         switch (frontend_state) {
3884         case XenbusStateInitialising:
3885                 break;
3886         case XenbusStateInitialised:
3887         case XenbusStateConnected:
3888                 xbb_connect(xbb);
3889                 break;
3890         case XenbusStateClosing:
3891         case XenbusStateClosed:
3892                 mtx_lock(&xbb->lock);
3893                 xbb_shutdown(xbb);
3894                 mtx_unlock(&xbb->lock);
3895                 if (frontend_state == XenbusStateClosed)
3896                         xenbus_set_state(xbb->dev, XenbusStateClosed);
3897                 break;
3898         default:
3899                 xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend",
3900                                  frontend_state);
3901                 break;
3902         }
3903 }
3904
3905 /*---------------------------- NewBus Registration ---------------------------*/
3906 static device_method_t xbb_methods[] = {
3907         /* Device interface */
3908         DEVMETHOD(device_probe,         xbb_probe),
3909         DEVMETHOD(device_attach,        xbb_attach),
3910         DEVMETHOD(device_detach,        xbb_detach),
3911         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
3912         DEVMETHOD(device_suspend,       xbb_suspend),
3913         DEVMETHOD(device_resume,        xbb_resume),
3914
3915         /* Xenbus interface */
3916         DEVMETHOD(xenbus_otherend_changed, xbb_frontend_changed),
3917         { 0, 0 }
3918 };
3919
3920 static driver_t xbb_driver = {
3921         "xbbd",
3922         xbb_methods,
3923         sizeof(struct xbb_softc),
3924 };
3925 devclass_t xbb_devclass;
3926
3927 DRIVER_MODULE(xbbd, xenbusb_back, xbb_driver, xbb_devclass, 0, 0);