2 * XenBSD block device driver
4 * Copyright (c) 2010-2013 Spectra Logic Corporation
5 * Copyright (c) 2009 Scott Long, Yahoo!
6 * Copyright (c) 2009 Frank Suchomel, Citrix
7 * Copyright (c) 2009 Doug F. Rabson, Citrix
8 * Copyright (c) 2005 Kip Macy
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
33 #ifndef __XEN_BLKFRONT_BLOCK_H__
34 #define __XEN_BLKFRONT_BLOCK_H__
35 #include <xen/blkif.h>
38 * Given a number of blkif segments, compute the maximum I/O size supported.
40 * \note This calculation assumes that all but the first and last segments
41 * of the I/O are fully utilized.
43 * \note We reserve a segement from the maximum supported by the transport to
44 * guarantee we can handle an unaligned transfer without the need to
45 * use a bounce buffer.
47 #define XBD_SEGS_TO_SIZE(segs) \
48 (((segs) - 1) * PAGE_SIZE)
51 * Compute the maximum number of blkif segments requried to represent
52 * an I/O of the given size.
54 * \note This calculation assumes that all but the first and last segments
55 * of the I/O are fully utilized.
57 * \note We reserve a segement to guarantee we can handle an unaligned
58 * transfer without the need to use a bounce buffer.
60 #define XBD_SIZE_TO_SEGS(size) \
61 ((size / PAGE_SIZE) + 1)
64 * The maximum number of shared memory ring pages we will allow in a
65 * negotiated block-front/back communication channel. Allow enough
66 * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd.
68 #define XBD_MAX_RING_PAGES 32
71 * The maximum number of outstanding requests we will allow in a negotiated
72 * block-front/back communication channel.
74 #define XBD_MAX_REQUESTS \
75 __CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES)
78 * The maximum number of blkif segments which can be provided per indirect
79 * page in an indirect request.
81 #define XBD_MAX_SEGMENTS_PER_PAGE \
82 (PAGE_SIZE / sizeof(struct blkif_request_segment))
85 * The maximum number of blkif segments which can be provided in an indirect
88 #define XBD_MAX_INDIRECT_SEGMENTS \
89 (BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST * XBD_MAX_SEGMENTS_PER_PAGE)
92 * Compute the number of indirect segment pages required for an I/O with the
93 * specified number of indirect segments.
95 #define XBD_INDIRECT_SEGS_TO_PAGES(segs) \
96 ((segs + XBD_MAX_SEGMENTS_PER_PAGE - 1) / XBD_MAX_SEGMENTS_PER_PAGE)
100 /* This command has contributed to xbd_qfrozen_cnt. */
102 /* Freeze the command queue on dispatch (i.e. single step command). */
103 XBDCF_Q_FREEZE = 1<<9,
104 /* Bus DMA returned EINPROGRESS for this command. */
105 XBDCF_ASYNC_MAPPING = 1<<10,
106 XBDCF_INITIALIZER = XBDCF_Q_MASK
110 typedef void xbd_cbcf_t(struct xbd_command *);
113 TAILQ_ENTRY(xbd_command) cm_link;
114 struct xbd_softc *cm_sc;
115 xbdc_flag_t cm_flags;
118 grant_ref_t *cm_sg_refs;
120 grant_ref_t cm_gref_head;
125 blkif_sector_t cm_sector_number;
127 xbd_cbcf_t *cm_complete;
128 void *cm_indirectionpages;
129 grant_ref_t cm_indirectionrefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
139 XBD_Q_NONE = XBDCF_Q_MASK
142 typedef struct xbd_cm_q {
143 TAILQ_HEAD(, xbd_command) q_tailq;
149 XBD_STATE_DISCONNECTED,
156 XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */
157 XBDF_BARRIER = 1 << 1, /* backend supports barriers */
158 XBDF_FLUSH = 1 << 2, /* backend supports flush */
159 XBDF_READY = 1 << 3, /* Is ready */
160 XBDF_CM_SHORTAGE = 1 << 4, /* Free cm resource shortage active. */
161 XBDF_GNT_SHORTAGE = 1 << 5, /* Grant ref resource shortage active */
162 XBDF_WAIT_IDLE = 1 << 6 /*
163 * No new work until oustanding work
169 * We have one of these per vbd, whether ide, scsi or 'other'.
173 struct disk *xbd_disk; /* disk params */
174 struct bio_queue_head xbd_bioq; /* sort queue */
176 xbd_flag_t xbd_flags;
179 xbd_state_t xbd_state;
180 u_int xbd_ring_pages;
181 uint32_t xbd_max_requests;
182 uint32_t xbd_max_request_segments;
183 uint32_t xbd_max_request_size;
184 uint32_t xbd_max_request_indirectpages;
185 grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES];
186 blkif_front_ring_t xbd_ring;
187 xen_intr_handle_t xen_intr_handle;
188 struct gnttab_free_callback xbd_callback;
189 xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT];
190 bus_dma_tag_t xbd_io_dmat;
193 * The number of people holding this device open. We won't allow a
194 * hot-unplug unless this is 0.
197 struct mtx xbd_io_lock;
199 struct xbd_command *xbd_shadow;
202 int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device,
203 uint16_t vdisk_info, unsigned long sector_size);
206 xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index)
208 struct xbd_cm_q *cmq;
210 cmq = &sc->xbd_cm_q[index];
212 if (cmq->q_length > cmq->q_max)
213 cmq->q_max = cmq->q_length;
217 xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index)
219 sc->xbd_cm_q[index].q_length--;
222 static inline uint32_t
223 xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index)
225 return (sc->xbd_cm_q[index].q_length);
229 xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index)
231 struct xbd_cm_q *cmq;
233 cmq = &sc->xbd_cm_q[index];
234 TAILQ_INIT(&cmq->q_tailq);
240 xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index)
242 KASSERT(index != XBD_Q_BIO,
243 ("%s: Commands cannot access the bio queue.", __func__));
244 if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
245 panic("%s: command %p is already on queue %d.",
246 __func__, cm, cm->cm_flags & XBDCF_Q_MASK);
247 TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
248 cm->cm_flags &= ~XBDCF_Q_MASK;
249 cm->cm_flags |= index;
250 xbd_added_qentry(cm->cm_sc, index);
254 xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index)
256 KASSERT(index != XBD_Q_BIO,
257 ("%s: Commands cannot access the bio queue.", __func__));
258 if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
259 panic("%s: command %p is already on queue %d.",
260 __func__, cm, cm->cm_flags & XBDCF_Q_MASK);
261 TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
262 cm->cm_flags &= ~XBDCF_Q_MASK;
263 cm->cm_flags |= index;
264 xbd_added_qentry(cm->cm_sc, index);
267 static inline struct xbd_command *
268 xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index)
270 struct xbd_command *cm;
272 KASSERT(index != XBD_Q_BIO,
273 ("%s: Commands cannot access the bio queue.", __func__));
275 if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) {
276 if ((cm->cm_flags & XBDCF_Q_MASK) != index) {
277 panic("%s: command %p is on queue %d, "
278 "not specified queue %d",
280 cm->cm_flags & XBDCF_Q_MASK,
283 TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link);
284 cm->cm_flags &= ~XBDCF_Q_MASK;
285 cm->cm_flags |= XBD_Q_NONE;
286 xbd_removed_qentry(cm->cm_sc, index);
292 xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index)
296 index = cm->cm_flags & XBDCF_Q_MASK;
298 KASSERT(index != XBD_Q_BIO,
299 ("%s: Commands cannot access the bio queue.", __func__));
301 if (index != expected_index) {
302 panic("%s: command %p is on queue %d, not specified queue %d",
303 __func__, cm, index, expected_index);
305 TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
306 cm->cm_flags &= ~XBDCF_Q_MASK;
307 cm->cm_flags |= XBD_Q_NONE;
308 xbd_removed_qentry(cm->cm_sc, index);
312 xbd_initq_bio(struct xbd_softc *sc)
314 bioq_init(&sc->xbd_bioq);
318 xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp)
320 bioq_insert_tail(&sc->xbd_bioq, bp);
321 xbd_added_qentry(sc, XBD_Q_BIO);
325 xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp)
327 bioq_insert_head(&sc->xbd_bioq, bp);
328 xbd_added_qentry(sc, XBD_Q_BIO);
331 static inline struct bio *
332 xbd_dequeue_bio(struct xbd_softc *sc)
336 if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) {
337 bioq_remove(&sc->xbd_bioq, bp);
338 xbd_removed_qentry(sc, XBD_Q_BIO);
344 xbd_initqs(struct xbd_softc *sc)
348 for (index = 0; index < XBD_Q_COUNT; index++)
349 xbd_initq_cm(sc, index);
354 #endif /* __XEN_BLKFRONT_BLOCK_H__ */