2 * XenBSD block device driver
4 * Copyright (c) 2010-2013 Spectra Logic Corporation
5 * Copyright (c) 2009 Scott Long, Yahoo!
6 * Copyright (c) 2009 Frank Suchomel, Citrix
7 * Copyright (c) 2009 Doug F. Rabson, Citrix
8 * Copyright (c) 2005 Kip Macy
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
44 #include <sys/module.h>
45 #include <sys/sysctl.h>
47 #include <machine/bus.h>
49 #include <machine/resource.h>
50 #include <machine/intr_machdep.h>
51 #include <machine/vmparam.h>
52 #include <sys/bus_dma.h>
54 #include <xen/xen-os.h>
55 #include <xen/hypervisor.h>
56 #include <xen/xen_intr.h>
57 #include <xen/gnttab.h>
58 #include <xen/interface/grant_table.h>
59 #include <xen/interface/io/protocols.h>
60 #include <xen/xenbus/xenbusvar.h>
62 #include <machine/_inttypes.h>
63 #include <machine/xen/xenvar.h>
65 #include <geom/geom_disk.h>
67 #include <dev/xen/blkfront/block.h>
69 #include "xenbus_if.h"
71 /*--------------------------- Forward Declarations ---------------------------*/
72 static void xbd_closing(device_t);
73 static void xbd_startio(struct xbd_softc *sc);
75 /*---------------------------------- Macros ----------------------------------*/
77 #define DPRINTK(fmt, args...) printf("[XEN] %s:%d: " fmt ".\n", __func__, __LINE__, ##args)
79 #define DPRINTK(fmt, args...)
82 #define XBD_SECTOR_SHFT 9
84 /*---------------------------- Global Static Data ----------------------------*/
85 static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data");
87 /*---------------------------- Command Processing ----------------------------*/
89 xbd_freeze(struct xbd_softc *sc, xbd_flag_t xbd_flag)
91 if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) != 0)
94 sc->xbd_flags |= xbd_flag;
95 sc->xbd_qfrozen_cnt++;
99 xbd_thaw(struct xbd_softc *sc, xbd_flag_t xbd_flag)
101 if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) == 0)
104 if (sc->xbd_qfrozen_cnt == 0)
105 panic("%s: Thaw with flag 0x%x while not frozen.",
108 sc->xbd_flags &= ~xbd_flag;
109 sc->xbd_qfrozen_cnt--;
113 xbd_cm_freeze(struct xbd_softc *sc, struct xbd_command *cm, xbdc_flag_t cm_flag)
115 if ((cm->cm_flags & XBDCF_FROZEN) != 0)
118 cm->cm_flags |= XBDCF_FROZEN|cm_flag;
119 xbd_freeze(sc, XBDF_NONE);
123 xbd_cm_thaw(struct xbd_softc *sc, struct xbd_command *cm)
125 if ((cm->cm_flags & XBDCF_FROZEN) == 0)
128 cm->cm_flags &= ~XBDCF_FROZEN;
129 xbd_thaw(sc, XBDF_NONE);
133 xbd_flush_requests(struct xbd_softc *sc)
137 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->xbd_ring, notify);
140 xen_intr_signal(sc->xen_intr_handle);
144 xbd_free_command(struct xbd_command *cm)
147 KASSERT((cm->cm_flags & XBDCF_Q_MASK) == XBD_Q_NONE,
148 ("Freeing command that is still on queue %d.",
149 cm->cm_flags & XBDCF_Q_MASK));
151 cm->cm_flags = XBDCF_INITIALIZER;
153 cm->cm_complete = NULL;
154 xbd_enqueue_cm(cm, XBD_Q_FREE);
155 xbd_thaw(cm->cm_sc, XBDF_CM_SHORTAGE);
159 xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
161 struct xbd_softc *sc;
162 struct xbd_command *cm;
163 blkif_request_t *ring_req;
164 struct blkif_request_segment *sg;
165 struct blkif_request_segment *last_block_sg;
167 vm_paddr_t buffer_ma;
168 uint64_t fsect, lsect;
177 printf("error %d in xbd_queue_cb\n", error);
178 cm->cm_bp->bio_error = EIO;
180 xbd_free_command(cm);
184 /* Fill out a communications ring structure. */
185 ring_req = RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
186 sc->xbd_ring.req_prod_pvt++;
187 ring_req->id = cm->cm_id;
188 ring_req->operation = cm->cm_operation;
189 ring_req->sector_number = cm->cm_sector_number;
190 ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk;
191 ring_req->nr_segments = nsegs;
194 block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK);
196 last_block_sg = sg + block_segs;
197 sg_ref = cm->cm_sg_refs;
201 while (sg < last_block_sg) {
202 buffer_ma = segs->ds_addr;
203 fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT;
204 lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1;
206 KASSERT(lsect <= 7, ("XEN disk driver data cannot "
207 "cross a page boundary"));
209 /* install a grant reference. */
210 ref = gnttab_claim_grant_reference(&cm->cm_gref_head);
213 * GNTTAB_LIST_END == 0xffffffff, but it is private
216 KASSERT(ref != ~0, ("grant_reference failed"));
218 gnttab_grant_foreign_access_ref(
220 xenbus_get_otherend_id(sc->xbd_dev),
221 buffer_ma >> PAGE_SHIFT,
222 ring_req->operation == BLKIF_OP_WRITE);
225 *sg = (struct blkif_request_segment) {
235 block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK);
239 sg = BLKRING_GET_SEG_BLOCK(&sc->xbd_ring,
240 sc->xbd_ring.req_prod_pvt);
241 sc->xbd_ring.req_prod_pvt++;
242 last_block_sg = sg + block_segs;
245 if (cm->cm_operation == BLKIF_OP_READ)
246 op = BUS_DMASYNC_PREREAD;
247 else if (cm->cm_operation == BLKIF_OP_WRITE)
248 op = BUS_DMASYNC_PREWRITE;
251 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
253 gnttab_free_grant_references(cm->cm_gref_head);
255 xbd_enqueue_cm(cm, XBD_Q_BUSY);
258 * If bus dma had to asynchronously call us back to dispatch
259 * this command, we are no longer executing in the context of
260 * xbd_startio(). Thus we cannot rely on xbd_startio()'s call to
261 * xbd_flush_requests() to publish this command to the backend
262 * along with any other commands that it could batch.
264 if ((cm->cm_flags & XBDCF_ASYNC_MAPPING) != 0)
265 xbd_flush_requests(sc);
271 xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm)
275 error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map, cm->cm_data,
276 cm->cm_datalen, xbd_queue_cb, cm, 0);
277 if (error == EINPROGRESS) {
279 * Maintain queuing order by freezing the queue. The next
280 * command may not require as many resources as the command
281 * we just attempted to map, so we can't rely on bus dma
282 * blocking for it too.
284 xbd_cm_freeze(sc, cm, XBDCF_ASYNC_MAPPING);
292 xbd_restart_queue_callback(void *arg)
294 struct xbd_softc *sc = arg;
296 mtx_lock(&sc->xbd_io_lock);
298 xbd_thaw(sc, XBDF_GNT_SHORTAGE);
302 mtx_unlock(&sc->xbd_io_lock);
305 static struct xbd_command *
306 xbd_bio_command(struct xbd_softc *sc)
308 struct xbd_command *cm;
311 if (__predict_false(sc->xbd_state != XBD_STATE_CONNECTED))
314 bp = xbd_dequeue_bio(sc);
318 if ((cm = xbd_dequeue_cm(sc, XBD_Q_FREE)) == NULL) {
319 xbd_freeze(sc, XBDF_CM_SHORTAGE);
320 xbd_requeue_bio(sc, bp);
324 if (gnttab_alloc_grant_references(sc->xbd_max_request_segments,
325 &cm->cm_gref_head) != 0) {
326 gnttab_request_free_callback(&sc->xbd_callback,
327 xbd_restart_queue_callback, sc,
328 sc->xbd_max_request_segments);
329 xbd_freeze(sc, XBDF_GNT_SHORTAGE);
330 xbd_requeue_bio(sc, bp);
331 xbd_enqueue_cm(cm, XBD_Q_FREE);
336 cm->cm_data = bp->bio_data;
337 cm->cm_datalen = bp->bio_bcount;
338 cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno;
340 switch (bp->bio_cmd) {
342 cm->cm_operation = BLKIF_OP_READ;
345 cm->cm_operation = BLKIF_OP_WRITE;
346 if ((bp->bio_flags & BIO_ORDERED) != 0) {
347 if ((sc->xbd_flags & XBDF_BARRIER) != 0) {
348 cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
351 * Single step this command.
353 cm->cm_flags |= XBDCF_Q_FREEZE;
354 if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
356 * Wait for in-flight requests to
359 xbd_freeze(sc, XBDF_WAIT_IDLE);
360 xbd_requeue_cm(cm, XBD_Q_READY);
367 if ((sc->xbd_flags & XBDF_FLUSH) != 0)
368 cm->cm_operation = BLKIF_OP_FLUSH_DISKCACHE;
369 else if ((sc->xbd_flags & XBDF_BARRIER) != 0)
370 cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
372 panic("flush request, but no flush support available");
375 panic("unknown bio command %d", bp->bio_cmd);
382 * Dequeue buffers and place them in the shared communication ring.
383 * Return when no more requests can be accepted or all buffers have
386 * Signal XEN once the ring has been filled out.
389 xbd_startio(struct xbd_softc *sc)
391 struct xbd_command *cm;
392 int error, queued = 0;
394 mtx_assert(&sc->xbd_io_lock, MA_OWNED);
396 if (sc->xbd_state != XBD_STATE_CONNECTED)
399 while (RING_FREE_REQUESTS(&sc->xbd_ring) >=
400 sc->xbd_max_request_blocks) {
401 if (sc->xbd_qfrozen_cnt != 0)
404 cm = xbd_dequeue_cm(sc, XBD_Q_READY);
407 cm = xbd_bio_command(sc);
412 if ((cm->cm_flags & XBDCF_Q_FREEZE) != 0) {
414 * Single step command. Future work is
415 * held off until this command completes.
417 xbd_cm_freeze(sc, cm, XBDCF_Q_FREEZE);
420 if ((error = xbd_queue_request(sc, cm)) != 0) {
421 printf("xbd_queue_request returned %d\n", error);
428 xbd_flush_requests(sc);
432 xbd_bio_complete(struct xbd_softc *sc, struct xbd_command *cm)
438 if (__predict_false(cm->cm_status != BLKIF_RSP_OKAY)) {
439 disk_err(bp, "disk error" , -1, 0);
440 printf(" status: %x\n", cm->cm_status);
441 bp->bio_flags |= BIO_ERROR;
444 if (bp->bio_flags & BIO_ERROR)
449 xbd_free_command(cm);
454 xbd_completion(struct xbd_command *cm)
456 gnttab_end_foreign_access_references(cm->cm_nseg, cm->cm_sg_refs);
457 return (BLKIF_SEGS_TO_BLOCKS(cm->cm_nseg));
463 struct xbd_softc *sc = xsc;
464 struct xbd_command *cm;
465 blkif_response_t *bret;
469 mtx_lock(&sc->xbd_io_lock);
471 if (__predict_false(sc->xbd_state == XBD_STATE_DISCONNECTED)) {
472 mtx_unlock(&sc->xbd_io_lock);
477 rp = sc->xbd_ring.sring->rsp_prod;
478 rmb(); /* Ensure we see queued responses up to 'rp'. */
480 for (i = sc->xbd_ring.rsp_cons; i != rp;) {
481 bret = RING_GET_RESPONSE(&sc->xbd_ring, i);
482 cm = &sc->xbd_shadow[bret->id];
484 xbd_remove_cm(cm, XBD_Q_BUSY);
485 i += xbd_completion(cm);
487 if (cm->cm_operation == BLKIF_OP_READ)
488 op = BUS_DMASYNC_POSTREAD;
489 else if (cm->cm_operation == BLKIF_OP_WRITE ||
490 cm->cm_operation == BLKIF_OP_WRITE_BARRIER)
491 op = BUS_DMASYNC_POSTWRITE;
494 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
495 bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map);
498 * Release any hold this command has on future command
504 * Directly call the i/o complete routine to save an
505 * an indirection in the common case.
507 cm->cm_status = bret->status;
509 xbd_bio_complete(sc, cm);
510 else if (cm->cm_complete != NULL)
513 xbd_free_command(cm);
516 sc->xbd_ring.rsp_cons = i;
518 if (i != sc->xbd_ring.req_prod_pvt) {
520 RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, more_to_do);
524 sc->xbd_ring.sring->rsp_event = i + 1;
527 if (xbd_queue_length(sc, XBD_Q_BUSY) == 0)
528 xbd_thaw(sc, XBDF_WAIT_IDLE);
532 if (__predict_false(sc->xbd_state == XBD_STATE_SUSPENDED))
533 wakeup(&sc->xbd_cm_q[XBD_Q_BUSY]);
535 mtx_unlock(&sc->xbd_io_lock);
538 /*------------------------------- Dump Support -------------------------------*/
540 * Quiesce the disk writes for a dump file before allowing the next buffer.
543 xbd_quiesce(struct xbd_softc *sc)
547 // While there are outstanding requests
548 while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
549 RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, mtd);
551 /* Recieved request completions, update queue. */
554 if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
556 * Still pending requests, wait for the disk i/o
564 /* Kernel dump function for a paravirtualized disk device */
566 xbd_dump_complete(struct xbd_command *cm)
569 xbd_enqueue_cm(cm, XBD_Q_COMPLETE);
573 xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
576 struct disk *dp = arg;
577 struct xbd_softc *sc = dp->d_drv1;
578 struct xbd_command *cm;
586 xbd_quiesce(sc); /* All quiet on the western front. */
589 * If this lock is held, then this module is failing, and a
590 * successful kernel dump is highly unlikely anyway.
592 mtx_lock(&sc->xbd_io_lock);
594 /* Split the 64KB block as needed */
595 for (sbp=0; length > 0; sbp++) {
596 cm = xbd_dequeue_cm(sc, XBD_Q_FREE);
598 mtx_unlock(&sc->xbd_io_lock);
599 device_printf(sc->xbd_dev, "dump: no more commands?\n");
603 if (gnttab_alloc_grant_references(sc->xbd_max_request_segments,
604 &cm->cm_gref_head) != 0) {
605 xbd_free_command(cm);
606 mtx_unlock(&sc->xbd_io_lock);
607 device_printf(sc->xbd_dev, "no more grant allocs?\n");
611 chunk = length > sc->xbd_max_request_size ?
612 sc->xbd_max_request_size : length;
613 cm->cm_data = virtual;
614 cm->cm_datalen = chunk;
615 cm->cm_operation = BLKIF_OP_WRITE;
616 cm->cm_sector_number = offset / dp->d_sectorsize;
617 cm->cm_complete = xbd_dump_complete;
619 xbd_enqueue_cm(cm, XBD_Q_READY);
623 virtual = (char *) virtual + chunk;
626 /* Tell DOM0 to do the I/O */
628 mtx_unlock(&sc->xbd_io_lock);
630 /* Poll for the completion. */
631 xbd_quiesce(sc); /* All quite on the eastern front */
633 /* If there were any errors, bail out... */
634 while ((cm = xbd_dequeue_cm(sc, XBD_Q_COMPLETE)) != NULL) {
635 if (cm->cm_status != BLKIF_RSP_OKAY) {
636 device_printf(sc->xbd_dev,
637 "Dump I/O failed at sector %jd\n",
638 cm->cm_sector_number);
641 xbd_free_command(cm);
647 /*----------------------------- Disk Entrypoints -----------------------------*/
649 xbd_open(struct disk *dp)
651 struct xbd_softc *sc = dp->d_drv1;
654 printf("xb%d: not found", sc->xbd_unit);
658 sc->xbd_flags |= XBDF_OPEN;
664 xbd_close(struct disk *dp)
666 struct xbd_softc *sc = dp->d_drv1;
670 sc->xbd_flags &= ~XBDF_OPEN;
671 if (--(sc->xbd_users) == 0) {
673 * Check whether we have been instructed to close. We will
674 * have ignored this request initially, as the device was
677 if (xenbus_get_otherend_state(sc->xbd_dev) ==
679 xbd_closing(sc->xbd_dev);
685 xbd_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td)
687 struct xbd_softc *sc = dp->d_drv1;
696 * Read/write routine for a buffer. Finds the proper unit, place it on
697 * the sortq and kick the controller.
700 xbd_strategy(struct bio *bp)
702 struct xbd_softc *sc = bp->bio_disk->d_drv1;
706 bp->bio_error = EINVAL;
707 bp->bio_flags |= BIO_ERROR;
708 bp->bio_resid = bp->bio_bcount;
714 * Place it in the queue of disk activities for this disk
716 mtx_lock(&sc->xbd_io_lock);
718 xbd_enqueue_bio(sc, bp);
721 mtx_unlock(&sc->xbd_io_lock);
725 /*------------------------------ Ring Management -----------------------------*/
727 xbd_alloc_ring(struct xbd_softc *sc)
729 blkif_sring_t *sring;
730 uintptr_t sring_page_addr;
734 sring = malloc(sc->xbd_ring_pages * PAGE_SIZE, M_XENBLOCKFRONT,
737 xenbus_dev_fatal(sc->xbd_dev, ENOMEM, "allocating shared ring");
740 SHARED_RING_INIT(sring);
741 FRONT_RING_INIT(&sc->xbd_ring, sring, sc->xbd_ring_pages * PAGE_SIZE);
743 for (i = 0, sring_page_addr = (uintptr_t)sring;
744 i < sc->xbd_ring_pages;
745 i++, sring_page_addr += PAGE_SIZE) {
747 error = xenbus_grant_ring(sc->xbd_dev,
748 (vtomach(sring_page_addr) >> PAGE_SHIFT),
749 &sc->xbd_ring_ref[i]);
751 xenbus_dev_fatal(sc->xbd_dev, error,
752 "granting ring_ref(%d)", i);
756 if (sc->xbd_ring_pages == 1) {
757 error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
758 "ring-ref", "%u", sc->xbd_ring_ref[0]);
760 xenbus_dev_fatal(sc->xbd_dev, error,
761 "writing %s/ring-ref",
762 xenbus_get_node(sc->xbd_dev));
766 for (i = 0; i < sc->xbd_ring_pages; i++) {
767 char ring_ref_name[]= "ring_refXX";
769 snprintf(ring_ref_name, sizeof(ring_ref_name),
771 error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
772 ring_ref_name, "%u", sc->xbd_ring_ref[i]);
774 xenbus_dev_fatal(sc->xbd_dev, error,
776 xenbus_get_node(sc->xbd_dev),
783 error = xen_intr_alloc_and_bind_local_port(sc->xbd_dev,
784 xenbus_get_otherend_id(sc->xbd_dev), NULL, xbd_int, sc,
785 INTR_TYPE_BIO | INTR_MPSAFE, &sc->xen_intr_handle);
787 xenbus_dev_fatal(sc->xbd_dev, error,
788 "xen_intr_alloc_and_bind_local_port failed");
796 xbd_free_ring(struct xbd_softc *sc)
800 if (sc->xbd_ring.sring == NULL)
803 for (i = 0; i < sc->xbd_ring_pages; i++) {
804 if (sc->xbd_ring_ref[i] != GRANT_REF_INVALID) {
805 gnttab_end_foreign_access_ref(sc->xbd_ring_ref[i]);
806 sc->xbd_ring_ref[i] = GRANT_REF_INVALID;
809 free(sc->xbd_ring.sring, M_XENBLOCKFRONT);
810 sc->xbd_ring.sring = NULL;
813 /*-------------------------- Initialization/Teardown -------------------------*/
815 xbd_feature_string(struct xbd_softc *sc, char *features, size_t len)
820 sbuf_new(&sb, features, len, SBUF_FIXEDLEN);
823 if ((sc->xbd_flags & XBDF_FLUSH) != 0) {
824 sbuf_printf(&sb, "flush");
828 if ((sc->xbd_flags & XBDF_BARRIER) != 0) {
829 if (feature_cnt != 0)
830 sbuf_printf(&sb, ", ");
831 sbuf_printf(&sb, "write_barrier");
835 (void) sbuf_finish(&sb);
836 return (sbuf_len(&sb));
840 xbd_sysctl_features(SYSCTL_HANDLER_ARGS)
843 struct xbd_softc *sc = arg1;
847 error = sysctl_wire_old_buffer(req, 0);
851 len = xbd_feature_string(sc, features, sizeof(features));
853 /* len is -1 on error, which will make the SYSCTL_OUT a no-op. */
854 return (SYSCTL_OUT(req, features, len + 1/*NUL*/));
858 xbd_setup_sysctl(struct xbd_softc *xbd)
860 struct sysctl_ctx_list *sysctl_ctx = NULL;
861 struct sysctl_oid *sysctl_tree = NULL;
862 struct sysctl_oid_list *children;
864 sysctl_ctx = device_get_sysctl_ctx(xbd->xbd_dev);
865 if (sysctl_ctx == NULL)
868 sysctl_tree = device_get_sysctl_tree(xbd->xbd_dev);
869 if (sysctl_tree == NULL)
872 children = SYSCTL_CHILDREN(sysctl_tree);
873 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
874 "max_requests", CTLFLAG_RD, &xbd->xbd_max_requests, -1,
875 "maximum outstanding requests (negotiated)");
877 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
878 "max_request_segments", CTLFLAG_RD,
879 &xbd->xbd_max_request_segments, 0,
880 "maximum number of pages per requests (negotiated)");
882 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
883 "max_request_size", CTLFLAG_RD, &xbd->xbd_max_request_size, 0,
884 "maximum size in bytes of a request (negotiated)");
886 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
887 "ring_pages", CTLFLAG_RD, &xbd->xbd_ring_pages, 0,
888 "communication channel pages (negotiated)");
890 SYSCTL_ADD_PROC(sysctl_ctx, children, OID_AUTO,
891 "features", CTLTYPE_STRING|CTLFLAG_RD, xbd, 0,
892 xbd_sysctl_features, "A", "protocol features (negotiated)");
896 * Translate Linux major/minor to an appropriate name and unit
897 * number. For HVM guests, this allows us to use the same drive names
898 * with blkfront as the emulated drives, easing transition slightly.
901 xbd_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name)
903 static struct vdev_info {
909 {3, 6, 0, "ada"}, /* ide0 */
910 {22, 6, 2, "ada"}, /* ide1 */
911 {33, 6, 4, "ada"}, /* ide2 */
912 {34, 6, 6, "ada"}, /* ide3 */
913 {56, 6, 8, "ada"}, /* ide4 */
914 {57, 6, 10, "ada"}, /* ide5 */
915 {88, 6, 12, "ada"}, /* ide6 */
916 {89, 6, 14, "ada"}, /* ide7 */
917 {90, 6, 16, "ada"}, /* ide8 */
918 {91, 6, 18, "ada"}, /* ide9 */
920 {8, 4, 0, "da"}, /* scsi disk0 */
921 {65, 4, 16, "da"}, /* scsi disk1 */
922 {66, 4, 32, "da"}, /* scsi disk2 */
923 {67, 4, 48, "da"}, /* scsi disk3 */
924 {68, 4, 64, "da"}, /* scsi disk4 */
925 {69, 4, 80, "da"}, /* scsi disk5 */
926 {70, 4, 96, "da"}, /* scsi disk6 */
927 {71, 4, 112, "da"}, /* scsi disk7 */
928 {128, 4, 128, "da"}, /* scsi disk8 */
929 {129, 4, 144, "da"}, /* scsi disk9 */
930 {130, 4, 160, "da"}, /* scsi disk10 */
931 {131, 4, 176, "da"}, /* scsi disk11 */
932 {132, 4, 192, "da"}, /* scsi disk12 */
933 {133, 4, 208, "da"}, /* scsi disk13 */
934 {134, 4, 224, "da"}, /* scsi disk14 */
935 {135, 4, 240, "da"}, /* scsi disk15 */
937 {202, 4, 0, "xbd"}, /* xbd */
941 int major = vdevice >> 8;
942 int minor = vdevice & 0xff;
945 if (vdevice & (1 << 28)) {
946 *unit = (vdevice & ((1 << 28) - 1)) >> 8;
951 for (i = 0; info[i].major; i++) {
952 if (info[i].major == major) {
953 *unit = info[i].base + (minor >> info[i].shift);
954 *name = info[i].name;
964 xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors,
965 int vdevice, uint16_t vdisk_info, unsigned long sector_size)
971 xbd_vdevice_to_unit(vdevice, &unit, &name);
975 if (strcmp(name, "xbd") != 0)
976 device_printf(sc->xbd_dev, "attaching as %s%d\n", name, unit);
978 if (xbd_feature_string(sc, features, sizeof(features)) > 0) {
979 device_printf(sc->xbd_dev, "features: %s\n",
983 sc->xbd_disk = disk_alloc();
984 sc->xbd_disk->d_unit = sc->xbd_unit;
985 sc->xbd_disk->d_open = xbd_open;
986 sc->xbd_disk->d_close = xbd_close;
987 sc->xbd_disk->d_ioctl = xbd_ioctl;
988 sc->xbd_disk->d_strategy = xbd_strategy;
989 sc->xbd_disk->d_dump = xbd_dump;
990 sc->xbd_disk->d_name = name;
991 sc->xbd_disk->d_drv1 = sc;
992 sc->xbd_disk->d_sectorsize = sector_size;
994 sc->xbd_disk->d_mediasize = sectors * sector_size;
995 sc->xbd_disk->d_maxsize = sc->xbd_max_request_size;
996 sc->xbd_disk->d_flags = 0;
997 if ((sc->xbd_flags & (XBDF_FLUSH|XBDF_BARRIER)) != 0) {
998 sc->xbd_disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
999 device_printf(sc->xbd_dev,
1000 "synchronize cache commands enabled.\n");
1002 disk_create(sc->xbd_disk, DISK_VERSION);
1008 xbd_free(struct xbd_softc *sc)
1012 /* Prevent new requests being issued until we fix things up. */
1013 mtx_lock(&sc->xbd_io_lock);
1014 sc->xbd_state = XBD_STATE_DISCONNECTED;
1015 mtx_unlock(&sc->xbd_io_lock);
1017 /* Free resources associated with old device channel. */
1019 if (sc->xbd_shadow) {
1021 for (i = 0; i < sc->xbd_max_requests; i++) {
1022 struct xbd_command *cm;
1024 cm = &sc->xbd_shadow[i];
1025 if (cm->cm_sg_refs != NULL) {
1026 free(cm->cm_sg_refs, M_XENBLOCKFRONT);
1027 cm->cm_sg_refs = NULL;
1030 bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map);
1032 free(sc->xbd_shadow, M_XENBLOCKFRONT);
1033 sc->xbd_shadow = NULL;
1035 bus_dma_tag_destroy(sc->xbd_io_dmat);
1037 xbd_initq_cm(sc, XBD_Q_FREE);
1038 xbd_initq_cm(sc, XBD_Q_READY);
1039 xbd_initq_cm(sc, XBD_Q_COMPLETE);
1042 xen_intr_unbind(&sc->xen_intr_handle);
1046 /*--------------------------- State Change Handlers --------------------------*/
1048 xbd_initialize(struct xbd_softc *sc)
1050 const char *otherend_path;
1051 const char *node_path;
1052 uint32_t max_ring_page_order;
1056 if (xenbus_get_state(sc->xbd_dev) != XenbusStateInitialising) {
1057 /* Initialization has already been performed. */
1062 * Protocol defaults valid even if negotiation for a
1065 max_ring_page_order = 0;
1066 sc->xbd_ring_pages = 1;
1067 sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
1068 sc->xbd_max_request_size =
1069 XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments);
1070 sc->xbd_max_request_blocks =
1071 BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments);
1074 * Protocol negotiation.
1076 * \note xs_gather() returns on the first encountered error, so
1077 * we must use independant calls in order to guarantee
1078 * we don't miss information in a sparsly populated back-end
1081 * \note xs_scanf() does not update variables for unmatched
1084 otherend_path = xenbus_get_otherend_path(sc->xbd_dev);
1085 node_path = xenbus_get_node(sc->xbd_dev);
1087 /* Support both backend schemes for relaying ring page limits. */
1088 (void)xs_scanf(XST_NIL, otherend_path,
1089 "max-ring-page-order", NULL, "%" PRIu32,
1090 &max_ring_page_order);
1091 sc->xbd_ring_pages = 1 << max_ring_page_order;
1092 (void)xs_scanf(XST_NIL, otherend_path,
1093 "max-ring-pages", NULL, "%" PRIu32,
1094 &sc->xbd_ring_pages);
1095 if (sc->xbd_ring_pages < 1)
1096 sc->xbd_ring_pages = 1;
1098 sc->xbd_max_requests =
1099 BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE);
1100 (void)xs_scanf(XST_NIL, otherend_path,
1101 "max-requests", NULL, "%" PRIu32,
1102 &sc->xbd_max_requests);
1104 (void)xs_scanf(XST_NIL, otherend_path,
1105 "max-request-segments", NULL, "%" PRIu32,
1106 &sc->xbd_max_request_segments);
1108 (void)xs_scanf(XST_NIL, otherend_path,
1109 "max-request-size", NULL, "%" PRIu32,
1110 &sc->xbd_max_request_size);
1112 if (sc->xbd_ring_pages > XBD_MAX_RING_PAGES) {
1113 device_printf(sc->xbd_dev,
1114 "Back-end specified ring-pages of %u "
1115 "limited to front-end limit of %zu.\n",
1116 sc->xbd_ring_pages, XBD_MAX_RING_PAGES);
1117 sc->xbd_ring_pages = XBD_MAX_RING_PAGES;
1120 if (powerof2(sc->xbd_ring_pages) == 0) {
1121 uint32_t new_page_limit;
1123 new_page_limit = 0x01 << (fls(sc->xbd_ring_pages) - 1);
1124 device_printf(sc->xbd_dev,
1125 "Back-end specified ring-pages of %u "
1126 "is not a power of 2. Limited to %u.\n",
1127 sc->xbd_ring_pages, new_page_limit);
1128 sc->xbd_ring_pages = new_page_limit;
1131 if (sc->xbd_max_requests > XBD_MAX_REQUESTS) {
1132 device_printf(sc->xbd_dev,
1133 "Back-end specified max_requests of %u "
1134 "limited to front-end limit of %u.\n",
1135 sc->xbd_max_requests, XBD_MAX_REQUESTS);
1136 sc->xbd_max_requests = XBD_MAX_REQUESTS;
1139 if (sc->xbd_max_request_segments > XBD_MAX_SEGMENTS_PER_REQUEST) {
1140 device_printf(sc->xbd_dev,
1141 "Back-end specified max_request_segments of %u "
1142 "limited to front-end limit of %u.\n",
1143 sc->xbd_max_request_segments,
1144 XBD_MAX_SEGMENTS_PER_REQUEST);
1145 sc->xbd_max_request_segments = XBD_MAX_SEGMENTS_PER_REQUEST;
1148 if (sc->xbd_max_request_size > XBD_MAX_REQUEST_SIZE) {
1149 device_printf(sc->xbd_dev,
1150 "Back-end specified max_request_size of %u "
1151 "limited to front-end limit of %u.\n",
1152 sc->xbd_max_request_size,
1153 XBD_MAX_REQUEST_SIZE);
1154 sc->xbd_max_request_size = XBD_MAX_REQUEST_SIZE;
1157 if (sc->xbd_max_request_size >
1158 XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments)) {
1159 device_printf(sc->xbd_dev,
1160 "Back-end specified max_request_size of %u "
1161 "limited to front-end limit of %u. (Too few segments.)\n",
1162 sc->xbd_max_request_size,
1163 XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments));
1164 sc->xbd_max_request_size =
1165 XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments);
1168 sc->xbd_max_request_blocks =
1169 BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments);
1171 /* Allocate datastructures based on negotiated values. */
1172 error = bus_dma_tag_create(
1173 bus_get_dma_tag(sc->xbd_dev), /* parent */
1174 512, PAGE_SIZE, /* algnmnt, boundary */
1175 BUS_SPACE_MAXADDR, /* lowaddr */
1176 BUS_SPACE_MAXADDR, /* highaddr */
1177 NULL, NULL, /* filter, filterarg */
1178 sc->xbd_max_request_size,
1179 sc->xbd_max_request_segments,
1180 PAGE_SIZE, /* maxsegsize */
1181 BUS_DMA_ALLOCNOW, /* flags */
1182 busdma_lock_mutex, /* lockfunc */
1183 &sc->xbd_io_lock, /* lockarg */
1186 xenbus_dev_fatal(sc->xbd_dev, error,
1187 "Cannot allocate parent DMA tag\n");
1191 /* Per-transaction data allocation. */
1192 sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests,
1193 M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
1194 if (sc->xbd_shadow == NULL) {
1195 bus_dma_tag_destroy(sc->xbd_io_dmat);
1196 xenbus_dev_fatal(sc->xbd_dev, error,
1197 "Cannot allocate request structures\n");
1201 for (i = 0; i < sc->xbd_max_requests; i++) {
1202 struct xbd_command *cm;
1204 cm = &sc->xbd_shadow[i];
1205 cm->cm_sg_refs = malloc(
1206 sizeof(grant_ref_t) * sc->xbd_max_request_segments,
1207 M_XENBLOCKFRONT, M_NOWAIT);
1208 if (cm->cm_sg_refs == NULL)
1211 cm->cm_flags = XBDCF_INITIALIZER;
1213 if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
1215 xbd_free_command(cm);
1218 if (xbd_alloc_ring(sc) != 0)
1221 /* Support both backend schemes for relaying ring page limits. */
1222 if (sc->xbd_ring_pages > 1) {
1223 error = xs_printf(XST_NIL, node_path,
1224 "num-ring-pages","%u",
1225 sc->xbd_ring_pages);
1227 xenbus_dev_fatal(sc->xbd_dev, error,
1228 "writing %s/num-ring-pages",
1233 error = xs_printf(XST_NIL, node_path,
1234 "ring-page-order", "%u",
1235 fls(sc->xbd_ring_pages) - 1);
1237 xenbus_dev_fatal(sc->xbd_dev, error,
1238 "writing %s/ring-page-order",
1244 error = xs_printf(XST_NIL, node_path,
1245 "max-requests","%u",
1246 sc->xbd_max_requests);
1248 xenbus_dev_fatal(sc->xbd_dev, error,
1249 "writing %s/max-requests",
1254 error = xs_printf(XST_NIL, node_path,
1255 "max-request-segments","%u",
1256 sc->xbd_max_request_segments);
1258 xenbus_dev_fatal(sc->xbd_dev, error,
1259 "writing %s/max-request-segments",
1264 error = xs_printf(XST_NIL, node_path,
1265 "max-request-size","%u",
1266 sc->xbd_max_request_size);
1268 xenbus_dev_fatal(sc->xbd_dev, error,
1269 "writing %s/max-request-size",
1274 error = xs_printf(XST_NIL, node_path, "event-channel",
1275 "%u", xen_intr_port(sc->xen_intr_handle));
1277 xenbus_dev_fatal(sc->xbd_dev, error,
1278 "writing %s/event-channel",
1283 error = xs_printf(XST_NIL, node_path, "protocol",
1284 "%s", XEN_IO_PROTO_ABI_NATIVE);
1286 xenbus_dev_fatal(sc->xbd_dev, error,
1287 "writing %s/protocol",
1292 xenbus_set_state(sc->xbd_dev, XenbusStateInitialised);
1296 * Invoked when the backend is finally 'ready' (and has published
1297 * the details about the physical device - #sectors, size, etc).
1300 xbd_connect(struct xbd_softc *sc)
1302 device_t dev = sc->xbd_dev;
1303 unsigned long sectors, sector_size;
1305 int err, feature_barrier, feature_flush;
1307 if (sc->xbd_state == XBD_STATE_CONNECTED ||
1308 sc->xbd_state == XBD_STATE_SUSPENDED)
1311 DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev));
1313 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1314 "sectors", "%lu", §ors,
1315 "info", "%u", &binfo,
1316 "sector-size", "%lu", §or_size,
1319 xenbus_dev_fatal(dev, err,
1320 "reading backend fields at %s",
1321 xenbus_get_otherend_path(dev));
1324 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1325 "feature-barrier", "%lu", &feature_barrier,
1327 if (err == 0 && feature_barrier != 0)
1328 sc->xbd_flags |= XBDF_BARRIER;
1330 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1331 "feature-flush-cache", "%lu", &feature_flush,
1333 if (err == 0 && feature_flush != 0)
1334 sc->xbd_flags |= XBDF_FLUSH;
1336 if (sc->xbd_disk == NULL) {
1337 device_printf(dev, "%juMB <%s> at %s",
1338 (uintmax_t) sectors / (1048576 / sector_size),
1339 device_get_desc(dev),
1340 xenbus_get_node(dev));
1341 bus_print_child_footer(device_get_parent(dev), dev);
1343 xbd_instance_create(sc, sectors, sc->xbd_vdevice, binfo,
1347 (void)xenbus_set_state(dev, XenbusStateConnected);
1349 /* Kick pending requests. */
1350 mtx_lock(&sc->xbd_io_lock);
1351 sc->xbd_state = XBD_STATE_CONNECTED;
1353 sc->xbd_flags |= XBDF_READY;
1354 mtx_unlock(&sc->xbd_io_lock);
1358 * Handle the change of state of the backend to Closing. We must delete our
1359 * device-layer structures now, to ensure that writes are flushed through to
1360 * the backend. Once this is done, we can switch to Closed in
1364 xbd_closing(device_t dev)
1366 struct xbd_softc *sc = device_get_softc(dev);
1368 xenbus_set_state(dev, XenbusStateClosing);
1370 DPRINTK("xbd_closing: %s removed\n", xenbus_get_node(dev));
1372 if (sc->xbd_disk != NULL) {
1373 disk_destroy(sc->xbd_disk);
1374 sc->xbd_disk = NULL;
1377 xenbus_set_state(dev, XenbusStateClosed);
1380 /*---------------------------- NewBus Entrypoints ----------------------------*/
1382 xbd_probe(device_t dev)
1385 if (!strcmp(xenbus_get_type(dev), "vbd")) {
1386 device_set_desc(dev, "Virtual Block Device");
1395 * Setup supplies the backend dir, virtual device. We place an event
1396 * channel and shared frame entries. We watch backend to wait if it's
1400 xbd_attach(device_t dev)
1402 struct xbd_softc *sc;
1409 /* FIXME: Use dynamic device id if this is not set. */
1410 error = xs_scanf(XST_NIL, xenbus_get_node(dev),
1411 "virtual-device", NULL, "%" PRIu32, &vdevice);
1413 error = xs_scanf(XST_NIL, xenbus_get_node(dev),
1414 "virtual-device-ext", NULL, "%" PRIu32, &vdevice);
1416 xenbus_dev_fatal(dev, error, "reading virtual-device");
1417 device_printf(dev, "Couldn't determine virtual device.\n");
1421 xbd_vdevice_to_unit(vdevice, &unit, &name);
1422 if (!strcmp(name, "xbd"))
1423 device_set_unit(dev, unit);
1425 sc = device_get_softc(dev);
1426 mtx_init(&sc->xbd_io_lock, "blkfront i/o lock", NULL, MTX_DEF);
1428 for (i = 0; i < XBD_MAX_RING_PAGES; i++)
1429 sc->xbd_ring_ref[i] = GRANT_REF_INVALID;
1432 sc->xbd_vdevice = vdevice;
1433 sc->xbd_state = XBD_STATE_DISCONNECTED;
1435 xbd_setup_sysctl(sc);
1437 /* Wait for backend device to publish its protocol capabilities. */
1438 xenbus_set_state(dev, XenbusStateInitialising);
1444 xbd_detach(device_t dev)
1446 struct xbd_softc *sc = device_get_softc(dev);
1448 DPRINTK("%s: %s removed\n", __func__, xenbus_get_node(dev));
1451 mtx_destroy(&sc->xbd_io_lock);
1457 xbd_suspend(device_t dev)
1459 struct xbd_softc *sc = device_get_softc(dev);
1463 /* Prevent new requests being issued until we fix things up. */
1464 mtx_lock(&sc->xbd_io_lock);
1465 saved_state = sc->xbd_state;
1466 sc->xbd_state = XBD_STATE_SUSPENDED;
1468 /* Wait for outstanding I/O to drain. */
1470 while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
1471 if (msleep(&sc->xbd_cm_q[XBD_Q_BUSY], &sc->xbd_io_lock,
1472 PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) {
1477 mtx_unlock(&sc->xbd_io_lock);
1480 sc->xbd_state = saved_state;
1486 xbd_resume(device_t dev)
1488 struct xbd_softc *sc = device_get_softc(dev);
1490 DPRINTK("xbd_resume: %s\n", xenbus_get_node(dev));
1498 * Callback received when the backend's state changes.
1501 xbd_backend_changed(device_t dev, XenbusState backend_state)
1503 struct xbd_softc *sc = device_get_softc(dev);
1505 DPRINTK("backend_state=%d\n", backend_state);
1507 switch (backend_state) {
1508 case XenbusStateUnknown:
1509 case XenbusStateInitialising:
1510 case XenbusStateReconfigured:
1511 case XenbusStateReconfiguring:
1512 case XenbusStateClosed:
1515 case XenbusStateInitWait:
1516 case XenbusStateInitialised:
1520 case XenbusStateConnected:
1525 case XenbusStateClosing:
1526 if (sc->xbd_users > 0)
1527 xenbus_dev_error(dev, -EBUSY,
1528 "Device in use; refusing to close");
1535 /*---------------------------- NewBus Registration ---------------------------*/
1536 static device_method_t xbd_methods[] = {
1537 /* Device interface */
1538 DEVMETHOD(device_probe, xbd_probe),
1539 DEVMETHOD(device_attach, xbd_attach),
1540 DEVMETHOD(device_detach, xbd_detach),
1541 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1542 DEVMETHOD(device_suspend, xbd_suspend),
1543 DEVMETHOD(device_resume, xbd_resume),
1545 /* Xenbus interface */
1546 DEVMETHOD(xenbus_otherend_changed, xbd_backend_changed),
1551 static driver_t xbd_driver = {
1554 sizeof(struct xbd_softc),
1556 devclass_t xbd_devclass;
1558 DRIVER_MODULE(xbd, xenbusb_front, xbd_driver, xbd_devclass, 0, 0);