2 * XenBSD block device driver
4 * Copyright (c) 2010-2013 Spectra Logic Corporation
5 * Copyright (c) 2009 Scott Long, Yahoo!
6 * Copyright (c) 2009 Frank Suchomel, Citrix
7 * Copyright (c) 2009 Doug F. Rabson, Citrix
8 * Copyright (c) 2005 Kip Macy
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
44 #include <sys/module.h>
45 #include <sys/sysctl.h>
47 #include <machine/bus.h>
49 #include <machine/resource.h>
50 #include <machine/intr_machdep.h>
51 #include <machine/vmparam.h>
53 #include <xen/xen-os.h>
54 #include <xen/hypervisor.h>
55 #include <xen/xen_intr.h>
56 #include <xen/gnttab.h>
57 #include <xen/interface/grant_table.h>
58 #include <xen/interface/io/protocols.h>
59 #include <xen/xenbus/xenbusvar.h>
61 #include <machine/_inttypes.h>
63 #include <geom/geom_disk.h>
65 #include <dev/xen/blkfront/block.h>
67 #include "xenbus_if.h"
69 /*--------------------------- Forward Declarations ---------------------------*/
70 static void xbd_closing(device_t);
71 static void xbd_startio(struct xbd_softc *sc);
73 /*---------------------------------- Macros ----------------------------------*/
75 #define DPRINTK(fmt, args...) printf("[XEN] %s:%d: " fmt ".\n", __func__, __LINE__, ##args)
77 #define DPRINTK(fmt, args...)
80 #define XBD_SECTOR_SHFT 9
82 /*---------------------------- Global Static Data ----------------------------*/
83 static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data");
85 static int xbd_enable_indirect = 1;
86 SYSCTL_NODE(_hw, OID_AUTO, xbd, CTLFLAG_RD, 0, "xbd driver parameters");
87 SYSCTL_INT(_hw_xbd, OID_AUTO, xbd_enable_indirect, CTLFLAG_RDTUN,
88 &xbd_enable_indirect, 0, "Enable xbd indirect segments");
90 /*---------------------------- Command Processing ----------------------------*/
92 xbd_freeze(struct xbd_softc *sc, xbd_flag_t xbd_flag)
94 if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) != 0)
97 sc->xbd_flags |= xbd_flag;
98 sc->xbd_qfrozen_cnt++;
102 xbd_thaw(struct xbd_softc *sc, xbd_flag_t xbd_flag)
104 if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) == 0)
107 if (sc->xbd_qfrozen_cnt == 0)
108 panic("%s: Thaw with flag 0x%x while not frozen.",
111 sc->xbd_flags &= ~xbd_flag;
112 sc->xbd_qfrozen_cnt--;
116 xbd_cm_freeze(struct xbd_softc *sc, struct xbd_command *cm, xbdc_flag_t cm_flag)
118 if ((cm->cm_flags & XBDCF_FROZEN) != 0)
121 cm->cm_flags |= XBDCF_FROZEN|cm_flag;
122 xbd_freeze(sc, XBDF_NONE);
126 xbd_cm_thaw(struct xbd_softc *sc, struct xbd_command *cm)
128 if ((cm->cm_flags & XBDCF_FROZEN) == 0)
131 cm->cm_flags &= ~XBDCF_FROZEN;
132 xbd_thaw(sc, XBDF_NONE);
136 xbd_flush_requests(struct xbd_softc *sc)
140 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->xbd_ring, notify);
143 xen_intr_signal(sc->xen_intr_handle);
147 xbd_free_command(struct xbd_command *cm)
150 KASSERT((cm->cm_flags & XBDCF_Q_MASK) == XBD_Q_NONE,
151 ("Freeing command that is still on queue %d.",
152 cm->cm_flags & XBDCF_Q_MASK));
154 cm->cm_flags = XBDCF_INITIALIZER;
156 cm->cm_complete = NULL;
157 xbd_enqueue_cm(cm, XBD_Q_FREE);
158 xbd_thaw(cm->cm_sc, XBDF_CM_SHORTAGE);
162 xbd_mksegarray(bus_dma_segment_t *segs, int nsegs,
163 grant_ref_t * gref_head, int otherend_id, int readonly,
164 grant_ref_t * sg_ref, struct blkif_request_segment *sg)
166 struct blkif_request_segment *last_block_sg = sg + nsegs;
167 vm_paddr_t buffer_ma;
168 uint64_t fsect, lsect;
171 while (sg < last_block_sg) {
172 KASSERT(segs->ds_addr % (1 << XBD_SECTOR_SHFT) == 0,
173 ("XEN disk driver I/O must be sector aligned"));
174 KASSERT(segs->ds_len % (1 << XBD_SECTOR_SHFT) == 0,
175 ("XEN disk driver I/Os must be a multiple of "
176 "the sector length"));
177 buffer_ma = segs->ds_addr;
178 fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT;
179 lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1;
181 KASSERT(lsect <= 7, ("XEN disk driver data cannot "
182 "cross a page boundary"));
184 /* install a grant reference. */
185 ref = gnttab_claim_grant_reference(gref_head);
188 * GNTTAB_LIST_END == 0xffffffff, but it is private
191 KASSERT(ref != ~0, ("grant_reference failed"));
193 gnttab_grant_foreign_access_ref(
196 buffer_ma >> PAGE_SHIFT,
200 *sg = (struct blkif_request_segment) {
212 xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
214 struct xbd_softc *sc;
215 struct xbd_command *cm;
222 cm->cm_bp->bio_error = EIO;
224 xbd_free_command(cm);
228 KASSERT(nsegs <= sc->xbd_max_request_segments,
229 ("Too many segments in a blkfront I/O"));
231 if (nsegs <= BLKIF_MAX_SEGMENTS_PER_REQUEST) {
232 blkif_request_t *ring_req;
234 /* Fill out a blkif_request_t structure. */
235 ring_req = (blkif_request_t *)
236 RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
237 sc->xbd_ring.req_prod_pvt++;
238 ring_req->id = cm->cm_id;
239 ring_req->operation = cm->cm_operation;
240 ring_req->sector_number = cm->cm_sector_number;
241 ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk;
242 ring_req->nr_segments = nsegs;
244 xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
245 xenbus_get_otherend_id(sc->xbd_dev),
246 cm->cm_operation == BLKIF_OP_WRITE,
247 cm->cm_sg_refs, ring_req->seg);
249 blkif_request_indirect_t *ring_req;
251 /* Fill out a blkif_request_indirect_t structure. */
252 ring_req = (blkif_request_indirect_t *)
253 RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
254 sc->xbd_ring.req_prod_pvt++;
255 ring_req->id = cm->cm_id;
256 ring_req->operation = BLKIF_OP_INDIRECT;
257 ring_req->indirect_op = cm->cm_operation;
258 ring_req->sector_number = cm->cm_sector_number;
259 ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk;
260 ring_req->nr_segments = nsegs;
262 xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
263 xenbus_get_otherend_id(sc->xbd_dev),
264 cm->cm_operation == BLKIF_OP_WRITE,
265 cm->cm_sg_refs, cm->cm_indirectionpages);
266 memcpy(ring_req->indirect_grefs, &cm->cm_indirectionrefs,
267 sizeof(grant_ref_t) * sc->xbd_max_request_indirectpages);
270 if (cm->cm_operation == BLKIF_OP_READ)
271 op = BUS_DMASYNC_PREREAD;
272 else if (cm->cm_operation == BLKIF_OP_WRITE)
273 op = BUS_DMASYNC_PREWRITE;
276 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
278 gnttab_free_grant_references(cm->cm_gref_head);
280 xbd_enqueue_cm(cm, XBD_Q_BUSY);
283 * If bus dma had to asynchronously call us back to dispatch
284 * this command, we are no longer executing in the context of
285 * xbd_startio(). Thus we cannot rely on xbd_startio()'s call to
286 * xbd_flush_requests() to publish this command to the backend
287 * along with any other commands that it could batch.
289 if ((cm->cm_flags & XBDCF_ASYNC_MAPPING) != 0)
290 xbd_flush_requests(sc);
296 xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm)
300 if (cm->cm_bp != NULL)
301 error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map,
302 cm->cm_bp, xbd_queue_cb, cm, 0);
304 error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map,
305 cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, 0);
306 if (error == EINPROGRESS) {
308 * Maintain queuing order by freezing the queue. The next
309 * command may not require as many resources as the command
310 * we just attempted to map, so we can't rely on bus dma
311 * blocking for it too.
313 xbd_cm_freeze(sc, cm, XBDCF_ASYNC_MAPPING);
321 xbd_restart_queue_callback(void *arg)
323 struct xbd_softc *sc = arg;
325 mtx_lock(&sc->xbd_io_lock);
327 xbd_thaw(sc, XBDF_GNT_SHORTAGE);
331 mtx_unlock(&sc->xbd_io_lock);
334 static struct xbd_command *
335 xbd_bio_command(struct xbd_softc *sc)
337 struct xbd_command *cm;
340 if (__predict_false(sc->xbd_state != XBD_STATE_CONNECTED))
343 bp = xbd_dequeue_bio(sc);
347 if ((cm = xbd_dequeue_cm(sc, XBD_Q_FREE)) == NULL) {
348 xbd_freeze(sc, XBDF_CM_SHORTAGE);
349 xbd_requeue_bio(sc, bp);
353 if (gnttab_alloc_grant_references(sc->xbd_max_request_segments,
354 &cm->cm_gref_head) != 0) {
355 gnttab_request_free_callback(&sc->xbd_callback,
356 xbd_restart_queue_callback, sc,
357 sc->xbd_max_request_segments);
358 xbd_freeze(sc, XBDF_GNT_SHORTAGE);
359 xbd_requeue_bio(sc, bp);
360 xbd_enqueue_cm(cm, XBD_Q_FREE);
365 cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno;
367 switch (bp->bio_cmd) {
369 cm->cm_operation = BLKIF_OP_READ;
372 cm->cm_operation = BLKIF_OP_WRITE;
373 if ((bp->bio_flags & BIO_ORDERED) != 0) {
374 if ((sc->xbd_flags & XBDF_BARRIER) != 0) {
375 cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
378 * Single step this command.
380 cm->cm_flags |= XBDCF_Q_FREEZE;
381 if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
383 * Wait for in-flight requests to
386 xbd_freeze(sc, XBDF_WAIT_IDLE);
387 xbd_requeue_cm(cm, XBD_Q_READY);
394 if ((sc->xbd_flags & XBDF_FLUSH) != 0)
395 cm->cm_operation = BLKIF_OP_FLUSH_DISKCACHE;
396 else if ((sc->xbd_flags & XBDF_BARRIER) != 0)
397 cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
399 panic("flush request, but no flush support available");
402 panic("unknown bio command %d", bp->bio_cmd);
409 * Dequeue buffers and place them in the shared communication ring.
410 * Return when no more requests can be accepted or all buffers have
413 * Signal XEN once the ring has been filled out.
416 xbd_startio(struct xbd_softc *sc)
418 struct xbd_command *cm;
419 int error, queued = 0;
421 mtx_assert(&sc->xbd_io_lock, MA_OWNED);
423 if (sc->xbd_state != XBD_STATE_CONNECTED)
426 while (!RING_FULL(&sc->xbd_ring)) {
428 if (sc->xbd_qfrozen_cnt != 0)
431 cm = xbd_dequeue_cm(sc, XBD_Q_READY);
434 cm = xbd_bio_command(sc);
439 if ((cm->cm_flags & XBDCF_Q_FREEZE) != 0) {
441 * Single step command. Future work is
442 * held off until this command completes.
444 xbd_cm_freeze(sc, cm, XBDCF_Q_FREEZE);
447 if ((error = xbd_queue_request(sc, cm)) != 0) {
448 printf("xbd_queue_request returned %d\n", error);
455 xbd_flush_requests(sc);
459 xbd_bio_complete(struct xbd_softc *sc, struct xbd_command *cm)
465 if (__predict_false(cm->cm_status != BLKIF_RSP_OKAY)) {
466 disk_err(bp, "disk error" , -1, 0);
467 printf(" status: %x\n", cm->cm_status);
468 bp->bio_flags |= BIO_ERROR;
471 if (bp->bio_flags & BIO_ERROR)
476 xbd_free_command(cm);
483 struct xbd_softc *sc = xsc;
484 struct xbd_command *cm;
485 blkif_response_t *bret;
489 mtx_lock(&sc->xbd_io_lock);
491 if (__predict_false(sc->xbd_state == XBD_STATE_DISCONNECTED)) {
492 mtx_unlock(&sc->xbd_io_lock);
497 rp = sc->xbd_ring.sring->rsp_prod;
498 rmb(); /* Ensure we see queued responses up to 'rp'. */
500 for (i = sc->xbd_ring.rsp_cons; i != rp;) {
501 bret = RING_GET_RESPONSE(&sc->xbd_ring, i);
502 cm = &sc->xbd_shadow[bret->id];
504 xbd_remove_cm(cm, XBD_Q_BUSY);
505 gnttab_end_foreign_access_references(cm->cm_nseg,
509 if (cm->cm_operation == BLKIF_OP_READ)
510 op = BUS_DMASYNC_POSTREAD;
511 else if (cm->cm_operation == BLKIF_OP_WRITE ||
512 cm->cm_operation == BLKIF_OP_WRITE_BARRIER)
513 op = BUS_DMASYNC_POSTWRITE;
516 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
517 bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map);
520 * Release any hold this command has on future command
526 * Directly call the i/o complete routine to save an
527 * an indirection in the common case.
529 cm->cm_status = bret->status;
531 xbd_bio_complete(sc, cm);
532 else if (cm->cm_complete != NULL)
535 xbd_free_command(cm);
538 sc->xbd_ring.rsp_cons = i;
540 if (i != sc->xbd_ring.req_prod_pvt) {
542 RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, more_to_do);
546 sc->xbd_ring.sring->rsp_event = i + 1;
549 if (xbd_queue_length(sc, XBD_Q_BUSY) == 0)
550 xbd_thaw(sc, XBDF_WAIT_IDLE);
554 if (__predict_false(sc->xbd_state == XBD_STATE_SUSPENDED))
555 wakeup(&sc->xbd_cm_q[XBD_Q_BUSY]);
557 mtx_unlock(&sc->xbd_io_lock);
560 /*------------------------------- Dump Support -------------------------------*/
562 * Quiesce the disk writes for a dump file before allowing the next buffer.
565 xbd_quiesce(struct xbd_softc *sc)
569 // While there are outstanding requests
570 while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
571 RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, mtd);
573 /* Received request completions, update queue. */
576 if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
578 * Still pending requests, wait for the disk i/o
586 /* Kernel dump function for a paravirtualized disk device */
588 xbd_dump_complete(struct xbd_command *cm)
591 xbd_enqueue_cm(cm, XBD_Q_COMPLETE);
595 xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
598 struct disk *dp = arg;
599 struct xbd_softc *sc = dp->d_drv1;
600 struct xbd_command *cm;
608 xbd_quiesce(sc); /* All quiet on the western front. */
611 * If this lock is held, then this module is failing, and a
612 * successful kernel dump is highly unlikely anyway.
614 mtx_lock(&sc->xbd_io_lock);
616 /* Split the 64KB block as needed */
617 for (sbp=0; length > 0; sbp++) {
618 cm = xbd_dequeue_cm(sc, XBD_Q_FREE);
620 mtx_unlock(&sc->xbd_io_lock);
621 device_printf(sc->xbd_dev, "dump: no more commands?\n");
625 if (gnttab_alloc_grant_references(sc->xbd_max_request_segments,
626 &cm->cm_gref_head) != 0) {
627 xbd_free_command(cm);
628 mtx_unlock(&sc->xbd_io_lock);
629 device_printf(sc->xbd_dev, "no more grant allocs?\n");
633 chunk = length > sc->xbd_max_request_size ?
634 sc->xbd_max_request_size : length;
635 cm->cm_data = virtual;
636 cm->cm_datalen = chunk;
637 cm->cm_operation = BLKIF_OP_WRITE;
638 cm->cm_sector_number = offset / dp->d_sectorsize;
639 cm->cm_complete = xbd_dump_complete;
641 xbd_enqueue_cm(cm, XBD_Q_READY);
645 virtual = (char *) virtual + chunk;
648 /* Tell DOM0 to do the I/O */
650 mtx_unlock(&sc->xbd_io_lock);
652 /* Poll for the completion. */
653 xbd_quiesce(sc); /* All quite on the eastern front */
655 /* If there were any errors, bail out... */
656 while ((cm = xbd_dequeue_cm(sc, XBD_Q_COMPLETE)) != NULL) {
657 if (cm->cm_status != BLKIF_RSP_OKAY) {
658 device_printf(sc->xbd_dev,
659 "Dump I/O failed at sector %jd\n",
660 cm->cm_sector_number);
663 xbd_free_command(cm);
669 /*----------------------------- Disk Entrypoints -----------------------------*/
671 xbd_open(struct disk *dp)
673 struct xbd_softc *sc = dp->d_drv1;
676 printf("xbd%d: not found", dp->d_unit);
680 sc->xbd_flags |= XBDF_OPEN;
686 xbd_close(struct disk *dp)
688 struct xbd_softc *sc = dp->d_drv1;
692 sc->xbd_flags &= ~XBDF_OPEN;
693 if (--(sc->xbd_users) == 0) {
695 * Check whether we have been instructed to close. We will
696 * have ignored this request initially, as the device was
699 if (xenbus_get_otherend_state(sc->xbd_dev) ==
701 xbd_closing(sc->xbd_dev);
707 xbd_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td)
709 struct xbd_softc *sc = dp->d_drv1;
718 * Read/write routine for a buffer. Finds the proper unit, place it on
719 * the sortq and kick the controller.
722 xbd_strategy(struct bio *bp)
724 struct xbd_softc *sc = bp->bio_disk->d_drv1;
728 bp->bio_error = EINVAL;
729 bp->bio_flags |= BIO_ERROR;
730 bp->bio_resid = bp->bio_bcount;
736 * Place it in the queue of disk activities for this disk
738 mtx_lock(&sc->xbd_io_lock);
740 xbd_enqueue_bio(sc, bp);
743 mtx_unlock(&sc->xbd_io_lock);
747 /*------------------------------ Ring Management -----------------------------*/
749 xbd_alloc_ring(struct xbd_softc *sc)
751 blkif_sring_t *sring;
752 uintptr_t sring_page_addr;
756 sring = malloc(sc->xbd_ring_pages * PAGE_SIZE, M_XENBLOCKFRONT,
759 xenbus_dev_fatal(sc->xbd_dev, ENOMEM, "allocating shared ring");
762 SHARED_RING_INIT(sring);
763 FRONT_RING_INIT(&sc->xbd_ring, sring, sc->xbd_ring_pages * PAGE_SIZE);
765 for (i = 0, sring_page_addr = (uintptr_t)sring;
766 i < sc->xbd_ring_pages;
767 i++, sring_page_addr += PAGE_SIZE) {
769 error = xenbus_grant_ring(sc->xbd_dev,
770 (vtophys(sring_page_addr) >> PAGE_SHIFT),
771 &sc->xbd_ring_ref[i]);
773 xenbus_dev_fatal(sc->xbd_dev, error,
774 "granting ring_ref(%d)", i);
778 if (sc->xbd_ring_pages == 1) {
779 error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
780 "ring-ref", "%u", sc->xbd_ring_ref[0]);
782 xenbus_dev_fatal(sc->xbd_dev, error,
783 "writing %s/ring-ref",
784 xenbus_get_node(sc->xbd_dev));
788 for (i = 0; i < sc->xbd_ring_pages; i++) {
789 char ring_ref_name[]= "ring_refXX";
791 snprintf(ring_ref_name, sizeof(ring_ref_name),
793 error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
794 ring_ref_name, "%u", sc->xbd_ring_ref[i]);
796 xenbus_dev_fatal(sc->xbd_dev, error,
798 xenbus_get_node(sc->xbd_dev),
805 error = xen_intr_alloc_and_bind_local_port(sc->xbd_dev,
806 xenbus_get_otherend_id(sc->xbd_dev), NULL, xbd_int, sc,
807 INTR_TYPE_BIO | INTR_MPSAFE, &sc->xen_intr_handle);
809 xenbus_dev_fatal(sc->xbd_dev, error,
810 "xen_intr_alloc_and_bind_local_port failed");
818 xbd_free_ring(struct xbd_softc *sc)
822 if (sc->xbd_ring.sring == NULL)
825 for (i = 0; i < sc->xbd_ring_pages; i++) {
826 if (sc->xbd_ring_ref[i] != GRANT_REF_INVALID) {
827 gnttab_end_foreign_access_ref(sc->xbd_ring_ref[i]);
828 sc->xbd_ring_ref[i] = GRANT_REF_INVALID;
831 free(sc->xbd_ring.sring, M_XENBLOCKFRONT);
832 sc->xbd_ring.sring = NULL;
835 /*-------------------------- Initialization/Teardown -------------------------*/
837 xbd_feature_string(struct xbd_softc *sc, char *features, size_t len)
842 sbuf_new(&sb, features, len, SBUF_FIXEDLEN);
845 if ((sc->xbd_flags & XBDF_FLUSH) != 0) {
846 sbuf_printf(&sb, "flush");
850 if ((sc->xbd_flags & XBDF_BARRIER) != 0) {
851 if (feature_cnt != 0)
852 sbuf_printf(&sb, ", ");
853 sbuf_printf(&sb, "write_barrier");
857 if ((sc->xbd_flags & XBDF_DISCARD) != 0) {
858 if (feature_cnt != 0)
859 sbuf_printf(&sb, ", ");
860 sbuf_printf(&sb, "discard");
864 if ((sc->xbd_flags & XBDF_PERSISTENT) != 0) {
865 if (feature_cnt != 0)
866 sbuf_printf(&sb, ", ");
867 sbuf_printf(&sb, "persistent_grants");
871 (void) sbuf_finish(&sb);
872 return (sbuf_len(&sb));
876 xbd_sysctl_features(SYSCTL_HANDLER_ARGS)
879 struct xbd_softc *sc = arg1;
883 error = sysctl_wire_old_buffer(req, 0);
887 len = xbd_feature_string(sc, features, sizeof(features));
889 /* len is -1 on error, which will make the SYSCTL_OUT a no-op. */
890 return (SYSCTL_OUT(req, features, len + 1/*NUL*/));
894 xbd_setup_sysctl(struct xbd_softc *xbd)
896 struct sysctl_ctx_list *sysctl_ctx = NULL;
897 struct sysctl_oid *sysctl_tree = NULL;
898 struct sysctl_oid_list *children;
900 sysctl_ctx = device_get_sysctl_ctx(xbd->xbd_dev);
901 if (sysctl_ctx == NULL)
904 sysctl_tree = device_get_sysctl_tree(xbd->xbd_dev);
905 if (sysctl_tree == NULL)
908 children = SYSCTL_CHILDREN(sysctl_tree);
909 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
910 "max_requests", CTLFLAG_RD, &xbd->xbd_max_requests, -1,
911 "maximum outstanding requests (negotiated)");
913 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
914 "max_request_segments", CTLFLAG_RD,
915 &xbd->xbd_max_request_segments, 0,
916 "maximum number of pages per requests (negotiated)");
918 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
919 "max_request_size", CTLFLAG_RD, &xbd->xbd_max_request_size, 0,
920 "maximum size in bytes of a request (negotiated)");
922 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
923 "ring_pages", CTLFLAG_RD, &xbd->xbd_ring_pages, 0,
924 "communication channel pages (negotiated)");
926 SYSCTL_ADD_PROC(sysctl_ctx, children, OID_AUTO,
927 "features", CTLTYPE_STRING|CTLFLAG_RD, xbd, 0,
928 xbd_sysctl_features, "A", "protocol features (negotiated)");
932 * Translate Linux major/minor to an appropriate name and unit
933 * number. For HVM guests, this allows us to use the same drive names
934 * with blkfront as the emulated drives, easing transition slightly.
937 xbd_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name)
939 static struct vdev_info {
945 {3, 6, 0, "ada"}, /* ide0 */
946 {22, 6, 2, "ada"}, /* ide1 */
947 {33, 6, 4, "ada"}, /* ide2 */
948 {34, 6, 6, "ada"}, /* ide3 */
949 {56, 6, 8, "ada"}, /* ide4 */
950 {57, 6, 10, "ada"}, /* ide5 */
951 {88, 6, 12, "ada"}, /* ide6 */
952 {89, 6, 14, "ada"}, /* ide7 */
953 {90, 6, 16, "ada"}, /* ide8 */
954 {91, 6, 18, "ada"}, /* ide9 */
956 {8, 4, 0, "da"}, /* scsi disk0 */
957 {65, 4, 16, "da"}, /* scsi disk1 */
958 {66, 4, 32, "da"}, /* scsi disk2 */
959 {67, 4, 48, "da"}, /* scsi disk3 */
960 {68, 4, 64, "da"}, /* scsi disk4 */
961 {69, 4, 80, "da"}, /* scsi disk5 */
962 {70, 4, 96, "da"}, /* scsi disk6 */
963 {71, 4, 112, "da"}, /* scsi disk7 */
964 {128, 4, 128, "da"}, /* scsi disk8 */
965 {129, 4, 144, "da"}, /* scsi disk9 */
966 {130, 4, 160, "da"}, /* scsi disk10 */
967 {131, 4, 176, "da"}, /* scsi disk11 */
968 {132, 4, 192, "da"}, /* scsi disk12 */
969 {133, 4, 208, "da"}, /* scsi disk13 */
970 {134, 4, 224, "da"}, /* scsi disk14 */
971 {135, 4, 240, "da"}, /* scsi disk15 */
973 {202, 4, 0, "xbd"}, /* xbd */
977 int major = vdevice >> 8;
978 int minor = vdevice & 0xff;
981 if (vdevice & (1 << 28)) {
982 *unit = (vdevice & ((1 << 28) - 1)) >> 8;
987 for (i = 0; info[i].major; i++) {
988 if (info[i].major == major) {
989 *unit = info[i].base + (minor >> info[i].shift);
990 *name = info[i].name;
1000 xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors,
1001 int vdevice, uint16_t vdisk_info, unsigned long sector_size,
1002 unsigned long phys_sector_size)
1005 int unit, error = 0;
1008 xbd_vdevice_to_unit(vdevice, &unit, &name);
1010 sc->xbd_unit = unit;
1012 if (strcmp(name, "xbd") != 0)
1013 device_printf(sc->xbd_dev, "attaching as %s%d\n", name, unit);
1015 if (xbd_feature_string(sc, features, sizeof(features)) > 0) {
1016 device_printf(sc->xbd_dev, "features: %s\n",
1020 sc->xbd_disk = disk_alloc();
1021 sc->xbd_disk->d_unit = sc->xbd_unit;
1022 sc->xbd_disk->d_open = xbd_open;
1023 sc->xbd_disk->d_close = xbd_close;
1024 sc->xbd_disk->d_ioctl = xbd_ioctl;
1025 sc->xbd_disk->d_strategy = xbd_strategy;
1026 sc->xbd_disk->d_dump = xbd_dump;
1027 sc->xbd_disk->d_name = name;
1028 sc->xbd_disk->d_drv1 = sc;
1029 sc->xbd_disk->d_sectorsize = sector_size;
1030 sc->xbd_disk->d_stripesize = phys_sector_size;
1031 sc->xbd_disk->d_stripeoffset = 0;
1033 sc->xbd_disk->d_mediasize = sectors * sector_size;
1034 sc->xbd_disk->d_maxsize = sc->xbd_max_request_size;
1035 sc->xbd_disk->d_flags = DISKFLAG_UNMAPPED_BIO;
1036 if ((sc->xbd_flags & (XBDF_FLUSH|XBDF_BARRIER)) != 0) {
1037 sc->xbd_disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
1038 device_printf(sc->xbd_dev,
1039 "synchronize cache commands enabled.\n");
1041 disk_create(sc->xbd_disk, DISK_VERSION);
1047 xbd_free(struct xbd_softc *sc)
1051 /* Prevent new requests being issued until we fix things up. */
1052 mtx_lock(&sc->xbd_io_lock);
1053 sc->xbd_state = XBD_STATE_DISCONNECTED;
1054 mtx_unlock(&sc->xbd_io_lock);
1056 /* Free resources associated with old device channel. */
1058 if (sc->xbd_shadow) {
1060 for (i = 0; i < sc->xbd_max_requests; i++) {
1061 struct xbd_command *cm;
1063 cm = &sc->xbd_shadow[i];
1064 if (cm->cm_sg_refs != NULL) {
1065 free(cm->cm_sg_refs, M_XENBLOCKFRONT);
1066 cm->cm_sg_refs = NULL;
1069 if (cm->cm_indirectionpages != NULL) {
1070 gnttab_end_foreign_access_references(
1071 sc->xbd_max_request_indirectpages,
1072 &cm->cm_indirectionrefs[0]);
1073 contigfree(cm->cm_indirectionpages, PAGE_SIZE *
1074 sc->xbd_max_request_indirectpages,
1076 cm->cm_indirectionpages = NULL;
1079 bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map);
1081 free(sc->xbd_shadow, M_XENBLOCKFRONT);
1082 sc->xbd_shadow = NULL;
1084 bus_dma_tag_destroy(sc->xbd_io_dmat);
1086 xbd_initq_cm(sc, XBD_Q_FREE);
1087 xbd_initq_cm(sc, XBD_Q_READY);
1088 xbd_initq_cm(sc, XBD_Q_COMPLETE);
1091 xen_intr_unbind(&sc->xen_intr_handle);
1095 /*--------------------------- State Change Handlers --------------------------*/
1097 xbd_initialize(struct xbd_softc *sc)
1099 const char *otherend_path;
1100 const char *node_path;
1101 uint32_t max_ring_page_order;
1104 if (xenbus_get_state(sc->xbd_dev) != XenbusStateInitialising) {
1105 /* Initialization has already been performed. */
1110 * Protocol defaults valid even if negotiation for a
1113 max_ring_page_order = 0;
1114 sc->xbd_ring_pages = 1;
1117 * Protocol negotiation.
1119 * \note xs_gather() returns on the first encountered error, so
1120 * we must use independent calls in order to guarantee
1121 * we don't miss information in a sparsly populated back-end
1124 * \note xs_scanf() does not update variables for unmatched
1127 otherend_path = xenbus_get_otherend_path(sc->xbd_dev);
1128 node_path = xenbus_get_node(sc->xbd_dev);
1130 /* Support both backend schemes for relaying ring page limits. */
1131 (void)xs_scanf(XST_NIL, otherend_path,
1132 "max-ring-page-order", NULL, "%" PRIu32,
1133 &max_ring_page_order);
1134 sc->xbd_ring_pages = 1 << max_ring_page_order;
1135 (void)xs_scanf(XST_NIL, otherend_path,
1136 "max-ring-pages", NULL, "%" PRIu32,
1137 &sc->xbd_ring_pages);
1138 if (sc->xbd_ring_pages < 1)
1139 sc->xbd_ring_pages = 1;
1141 if (sc->xbd_ring_pages > XBD_MAX_RING_PAGES) {
1142 device_printf(sc->xbd_dev,
1143 "Back-end specified ring-pages of %u "
1144 "limited to front-end limit of %u.\n",
1145 sc->xbd_ring_pages, XBD_MAX_RING_PAGES);
1146 sc->xbd_ring_pages = XBD_MAX_RING_PAGES;
1149 if (powerof2(sc->xbd_ring_pages) == 0) {
1150 uint32_t new_page_limit;
1152 new_page_limit = 0x01 << (fls(sc->xbd_ring_pages) - 1);
1153 device_printf(sc->xbd_dev,
1154 "Back-end specified ring-pages of %u "
1155 "is not a power of 2. Limited to %u.\n",
1156 sc->xbd_ring_pages, new_page_limit);
1157 sc->xbd_ring_pages = new_page_limit;
1160 sc->xbd_max_requests =
1161 BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE);
1162 if (sc->xbd_max_requests > XBD_MAX_REQUESTS) {
1163 device_printf(sc->xbd_dev,
1164 "Back-end specified max_requests of %u "
1165 "limited to front-end limit of %zu.\n",
1166 sc->xbd_max_requests, XBD_MAX_REQUESTS);
1167 sc->xbd_max_requests = XBD_MAX_REQUESTS;
1170 if (xbd_alloc_ring(sc) != 0)
1173 /* Support both backend schemes for relaying ring page limits. */
1174 if (sc->xbd_ring_pages > 1) {
1175 error = xs_printf(XST_NIL, node_path,
1176 "num-ring-pages","%u",
1177 sc->xbd_ring_pages);
1179 xenbus_dev_fatal(sc->xbd_dev, error,
1180 "writing %s/num-ring-pages",
1185 error = xs_printf(XST_NIL, node_path,
1186 "ring-page-order", "%u",
1187 fls(sc->xbd_ring_pages) - 1);
1189 xenbus_dev_fatal(sc->xbd_dev, error,
1190 "writing %s/ring-page-order",
1196 error = xs_printf(XST_NIL, node_path, "event-channel",
1197 "%u", xen_intr_port(sc->xen_intr_handle));
1199 xenbus_dev_fatal(sc->xbd_dev, error,
1200 "writing %s/event-channel",
1205 error = xs_printf(XST_NIL, node_path, "protocol",
1206 "%s", XEN_IO_PROTO_ABI_NATIVE);
1208 xenbus_dev_fatal(sc->xbd_dev, error,
1209 "writing %s/protocol",
1214 xenbus_set_state(sc->xbd_dev, XenbusStateInitialised);
1218 * Invoked when the backend is finally 'ready' (and has published
1219 * the details about the physical device - #sectors, size, etc).
1222 xbd_connect(struct xbd_softc *sc)
1224 device_t dev = sc->xbd_dev;
1225 unsigned long sectors, sector_size, phys_sector_size;
1227 int err, feature_barrier, feature_flush;
1230 if (sc->xbd_state == XBD_STATE_CONNECTED ||
1231 sc->xbd_state == XBD_STATE_SUSPENDED)
1234 DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev));
1236 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1237 "sectors", "%lu", §ors,
1238 "info", "%u", &binfo,
1239 "sector-size", "%lu", §or_size,
1242 xenbus_dev_fatal(dev, err,
1243 "reading backend fields at %s",
1244 xenbus_get_otherend_path(dev));
1247 if ((sectors == 0) || (sector_size == 0)) {
1248 xenbus_dev_fatal(dev, 0,
1249 "invalid parameters from %s:"
1250 " sectors = %lu, sector_size = %lu",
1251 xenbus_get_otherend_path(dev),
1252 sectors, sector_size);
1255 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1256 "physical-sector-size", "%lu", &phys_sector_size,
1258 if (err || phys_sector_size <= sector_size)
1259 phys_sector_size = 0;
1260 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1261 "feature-barrier", "%d", &feature_barrier,
1263 if (err == 0 && feature_barrier != 0)
1264 sc->xbd_flags |= XBDF_BARRIER;
1266 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1267 "feature-flush-cache", "%d", &feature_flush,
1269 if (err == 0 && feature_flush != 0)
1270 sc->xbd_flags |= XBDF_FLUSH;
1272 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1273 "feature-max-indirect-segments", "%" PRIu32,
1274 &sc->xbd_max_request_segments, NULL);
1275 if ((err != 0) || (xbd_enable_indirect == 0))
1276 sc->xbd_max_request_segments = 0;
1277 if (sc->xbd_max_request_segments > XBD_MAX_INDIRECT_SEGMENTS)
1278 sc->xbd_max_request_segments = XBD_MAX_INDIRECT_SEGMENTS;
1279 if (sc->xbd_max_request_segments > XBD_SIZE_TO_SEGS(MAXPHYS))
1280 sc->xbd_max_request_segments = XBD_SIZE_TO_SEGS(MAXPHYS);
1281 sc->xbd_max_request_indirectpages =
1282 XBD_INDIRECT_SEGS_TO_PAGES(sc->xbd_max_request_segments);
1283 if (sc->xbd_max_request_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
1284 sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
1285 sc->xbd_max_request_size =
1286 XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments);
1288 /* Allocate datastructures based on negotiated values. */
1289 err = bus_dma_tag_create(
1290 bus_get_dma_tag(sc->xbd_dev), /* parent */
1291 512, PAGE_SIZE, /* algnmnt, boundary */
1292 BUS_SPACE_MAXADDR, /* lowaddr */
1293 BUS_SPACE_MAXADDR, /* highaddr */
1294 NULL, NULL, /* filter, filterarg */
1295 sc->xbd_max_request_size,
1296 sc->xbd_max_request_segments,
1297 PAGE_SIZE, /* maxsegsize */
1298 BUS_DMA_ALLOCNOW, /* flags */
1299 busdma_lock_mutex, /* lockfunc */
1300 &sc->xbd_io_lock, /* lockarg */
1303 xenbus_dev_fatal(sc->xbd_dev, err,
1304 "Cannot allocate parent DMA tag\n");
1308 /* Per-transaction data allocation. */
1309 sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests,
1310 M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
1311 if (sc->xbd_shadow == NULL) {
1312 bus_dma_tag_destroy(sc->xbd_io_dmat);
1313 xenbus_dev_fatal(sc->xbd_dev, ENOMEM,
1314 "Cannot allocate request structures\n");
1318 for (i = 0; i < sc->xbd_max_requests; i++) {
1319 struct xbd_command *cm;
1320 void * indirectpages;
1322 cm = &sc->xbd_shadow[i];
1323 cm->cm_sg_refs = malloc(
1324 sizeof(grant_ref_t) * sc->xbd_max_request_segments,
1325 M_XENBLOCKFRONT, M_NOWAIT);
1326 if (cm->cm_sg_refs == NULL)
1329 cm->cm_flags = XBDCF_INITIALIZER;
1331 if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
1333 if (sc->xbd_max_request_indirectpages > 0) {
1334 indirectpages = contigmalloc(
1335 PAGE_SIZE * sc->xbd_max_request_indirectpages,
1336 M_XENBLOCKFRONT, M_ZERO | M_NOWAIT, 0, ~0,
1338 if (indirectpages == NULL)
1339 sc->xbd_max_request_indirectpages = 0;
1341 indirectpages = NULL;
1343 for (j = 0; j < sc->xbd_max_request_indirectpages; j++) {
1344 if (gnttab_grant_foreign_access(
1345 xenbus_get_otherend_id(sc->xbd_dev),
1346 (vtophys(indirectpages) >> PAGE_SHIFT) + j,
1347 1 /* grant read-only access */,
1348 &cm->cm_indirectionrefs[j]))
1351 if (j < sc->xbd_max_request_indirectpages) {
1352 contigfree(indirectpages,
1353 PAGE_SIZE * sc->xbd_max_request_indirectpages,
1357 cm->cm_indirectionpages = indirectpages;
1358 xbd_free_command(cm);
1361 if (sc->xbd_disk == NULL) {
1362 device_printf(dev, "%juMB <%s> at %s",
1363 (uintmax_t) sectors / (1048576 / sector_size),
1364 device_get_desc(dev),
1365 xenbus_get_node(dev));
1366 bus_print_child_footer(device_get_parent(dev), dev);
1368 xbd_instance_create(sc, sectors, sc->xbd_vdevice, binfo,
1369 sector_size, phys_sector_size);
1372 (void)xenbus_set_state(dev, XenbusStateConnected);
1374 /* Kick pending requests. */
1375 mtx_lock(&sc->xbd_io_lock);
1376 sc->xbd_state = XBD_STATE_CONNECTED;
1378 sc->xbd_flags |= XBDF_READY;
1379 mtx_unlock(&sc->xbd_io_lock);
1383 * Handle the change of state of the backend to Closing. We must delete our
1384 * device-layer structures now, to ensure that writes are flushed through to
1385 * the backend. Once this is done, we can switch to Closed in
1389 xbd_closing(device_t dev)
1391 struct xbd_softc *sc = device_get_softc(dev);
1393 xenbus_set_state(dev, XenbusStateClosing);
1395 DPRINTK("xbd_closing: %s removed\n", xenbus_get_node(dev));
1397 if (sc->xbd_disk != NULL) {
1398 disk_destroy(sc->xbd_disk);
1399 sc->xbd_disk = NULL;
1402 xenbus_set_state(dev, XenbusStateClosed);
1405 /*---------------------------- NewBus Entrypoints ----------------------------*/
1407 xbd_probe(device_t dev)
1409 if (strcmp(xenbus_get_type(dev), "vbd") != 0)
1412 if (xen_hvm_domain() && xen_disable_pv_disks != 0)
1415 if (xen_hvm_domain()) {
1420 * When running in an HVM domain, IDE disk emulation is
1421 * disabled early in boot so that native drivers will
1422 * not see emulated hardware. However, CDROM device
1423 * emulation cannot be disabled.
1425 * Through use of FreeBSD's vm_guest and xen_hvm_domain()
1426 * APIs, we could modify the native CDROM driver to fail its
1427 * probe when running under Xen. Unfortunatlely, the PV
1428 * CDROM support in XenServer (up through at least version
1429 * 6.2) isn't functional, so we instead rely on the emulated
1430 * CDROM instance, and fail to attach the PV one here in
1431 * the blkfront driver.
1433 error = xs_read(XST_NIL, xenbus_get_node(dev),
1434 "device-type", NULL, (void **) &type);
1438 if (strncmp(type, "cdrom", 5) == 0) {
1439 free(type, M_XENSTORE);
1442 free(type, M_XENSTORE);
1445 device_set_desc(dev, "Virtual Block Device");
1451 * Setup supplies the backend dir, virtual device. We place an event
1452 * channel and shared frame entries. We watch backend to wait if it's
1456 xbd_attach(device_t dev)
1458 struct xbd_softc *sc;
1465 /* FIXME: Use dynamic device id if this is not set. */
1466 error = xs_scanf(XST_NIL, xenbus_get_node(dev),
1467 "virtual-device", NULL, "%" PRIu32, &vdevice);
1469 error = xs_scanf(XST_NIL, xenbus_get_node(dev),
1470 "virtual-device-ext", NULL, "%" PRIu32, &vdevice);
1472 xenbus_dev_fatal(dev, error, "reading virtual-device");
1473 device_printf(dev, "Couldn't determine virtual device.\n");
1477 xbd_vdevice_to_unit(vdevice, &unit, &name);
1478 if (!strcmp(name, "xbd"))
1479 device_set_unit(dev, unit);
1481 sc = device_get_softc(dev);
1482 mtx_init(&sc->xbd_io_lock, "blkfront i/o lock", NULL, MTX_DEF);
1484 for (i = 0; i < XBD_MAX_RING_PAGES; i++)
1485 sc->xbd_ring_ref[i] = GRANT_REF_INVALID;
1488 sc->xbd_vdevice = vdevice;
1489 sc->xbd_state = XBD_STATE_DISCONNECTED;
1491 xbd_setup_sysctl(sc);
1493 /* Wait for backend device to publish its protocol capabilities. */
1494 xenbus_set_state(dev, XenbusStateInitialising);
1500 xbd_detach(device_t dev)
1502 struct xbd_softc *sc = device_get_softc(dev);
1504 DPRINTK("%s: %s removed\n", __func__, xenbus_get_node(dev));
1507 mtx_destroy(&sc->xbd_io_lock);
1513 xbd_suspend(device_t dev)
1515 struct xbd_softc *sc = device_get_softc(dev);
1519 /* Prevent new requests being issued until we fix things up. */
1520 mtx_lock(&sc->xbd_io_lock);
1521 saved_state = sc->xbd_state;
1522 sc->xbd_state = XBD_STATE_SUSPENDED;
1524 /* Wait for outstanding I/O to drain. */
1526 while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
1527 if (msleep(&sc->xbd_cm_q[XBD_Q_BUSY], &sc->xbd_io_lock,
1528 PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) {
1533 mtx_unlock(&sc->xbd_io_lock);
1536 sc->xbd_state = saved_state;
1542 xbd_resume(device_t dev)
1544 struct xbd_softc *sc = device_get_softc(dev);
1546 if (xen_suspend_cancelled) {
1547 sc->xbd_state = XBD_STATE_CONNECTED;
1551 DPRINTK("xbd_resume: %s\n", xenbus_get_node(dev));
1559 * Callback received when the backend's state changes.
1562 xbd_backend_changed(device_t dev, XenbusState backend_state)
1564 struct xbd_softc *sc = device_get_softc(dev);
1566 DPRINTK("backend_state=%d\n", backend_state);
1568 switch (backend_state) {
1569 case XenbusStateUnknown:
1570 case XenbusStateInitialising:
1571 case XenbusStateReconfigured:
1572 case XenbusStateReconfiguring:
1573 case XenbusStateClosed:
1576 case XenbusStateInitWait:
1577 case XenbusStateInitialised:
1581 case XenbusStateConnected:
1586 case XenbusStateClosing:
1587 if (sc->xbd_users > 0) {
1588 device_printf(dev, "detaching with pending users\n");
1589 KASSERT(sc->xbd_disk != NULL,
1590 ("NULL disk with pending users\n"));
1591 disk_gone(sc->xbd_disk);
1599 /*---------------------------- NewBus Registration ---------------------------*/
1600 static device_method_t xbd_methods[] = {
1601 /* Device interface */
1602 DEVMETHOD(device_probe, xbd_probe),
1603 DEVMETHOD(device_attach, xbd_attach),
1604 DEVMETHOD(device_detach, xbd_detach),
1605 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1606 DEVMETHOD(device_suspend, xbd_suspend),
1607 DEVMETHOD(device_resume, xbd_resume),
1609 /* Xenbus interface */
1610 DEVMETHOD(xenbus_otherend_changed, xbd_backend_changed),
1615 static driver_t xbd_driver = {
1618 sizeof(struct xbd_softc),
1620 devclass_t xbd_devclass;
1622 DRIVER_MODULE(xbd, xenbusb_front, xbd_driver, xbd_devclass, 0, 0);