2 * XenBSD block device driver
4 * Copyright (c) 2010-2013 Spectra Logic Corporation
5 * Copyright (c) 2009 Scott Long, Yahoo!
6 * Copyright (c) 2009 Frank Suchomel, Citrix
7 * Copyright (c) 2009 Doug F. Rabson, Citrix
8 * Copyright (c) 2005 Kip Macy
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
33 #ifndef __XEN_BLKFRONT_BLOCK_H__
34 #define __XEN_BLKFRONT_BLOCK_H__
35 #include <xen/blkif.h>
38 * Given a number of blkif segments, compute the maximum I/O size supported.
40 * \note This calculation assumes that all but the first and last segments
41 * of the I/O are fully utilized.
43 * \note We reserve a segement from the maximum supported by the transport to
44 * guarantee we can handle an unaligned transfer without the need to
45 * use a bounce buffer.
47 #define XBD_SEGS_TO_SIZE(segs) \
48 (((segs) - 1) * PAGE_SIZE)
51 * Compute the maximum number of blkif segments requried to represent
52 * an I/O of the given size.
54 * \note This calculation assumes that all but the first and last segments
55 * of the I/O are fully utilized.
57 * \note We reserve a segement to guarantee we can handle an unaligned
58 * transfer without the need to use a bounce buffer.
60 #define XBD_SIZE_TO_SEGS(size) \
61 ((size / PAGE_SIZE) + 1)
64 * The maximum number of outstanding requests blocks (request headers plus
65 * additional segment blocks) we will allow in a negotiated block-front/back
66 * communication channel.
68 #define XBD_MAX_REQUESTS 256
71 * The maximum mapped region size per request we will allow in a negotiated
72 * block-front/back communication channel.
74 #define XBD_MAX_REQUEST_SIZE \
75 MIN(MAXPHYS, XBD_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST))
78 * The maximum number of segments (within a request header and accompanying
79 * segment blocks) per request we will allow in a negotiated block-front/back
80 * communication channel.
82 #define XBD_MAX_SEGMENTS_PER_REQUEST \
83 (MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \
84 XBD_SIZE_TO_SEGS(XBD_MAX_REQUEST_SIZE)))
87 * The maximum number of shared memory ring pages we will allow in a
88 * negotiated block-front/back communication channel. Allow enough
89 * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd.
91 #define XBD_MAX_RING_PAGES \
92 BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBD_MAX_SEGMENTS_PER_REQUEST) \
97 /* This command has contributed to xbd_qfrozen_cnt. */
99 /* Freeze the command queue on dispatch (i.e. single step command). */
100 XBDCF_Q_FREEZE = 1<<9,
101 /* Bus DMA returned EINPROGRESS for this command. */
102 XBDCF_ASYNC_MAPPING = 1<<10,
103 XBDCF_INITIALIZER = XBDCF_Q_MASK
107 typedef void xbd_cbcf_t(struct xbd_command *);
110 TAILQ_ENTRY(xbd_command) cm_link;
111 struct xbd_softc *cm_sc;
112 xbdc_flag_t cm_flags;
115 grant_ref_t *cm_sg_refs;
117 grant_ref_t cm_gref_head;
122 blkif_sector_t cm_sector_number;
124 xbd_cbcf_t *cm_complete;
134 XBD_Q_NONE = XBDCF_Q_MASK
137 typedef struct xbd_cm_q {
138 TAILQ_HEAD(, xbd_command) q_tailq;
144 XBD_STATE_DISCONNECTED,
151 XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */
152 XBDF_BARRIER = 1 << 1, /* backend supports barriers */
153 XBDF_FLUSH = 1 << 2, /* backend supports flush */
154 XBDF_READY = 1 << 3, /* Is ready */
155 XBDF_CM_SHORTAGE = 1 << 4, /* Free cm resource shortage active. */
156 XBDF_GNT_SHORTAGE = 1 << 5, /* Grant ref resource shortage active */
157 XBDF_WAIT_IDLE = 1 << 6 /*
158 * No new work until oustanding work
164 * We have one of these per vbd, whether ide, scsi or 'other'.
168 struct disk *xbd_disk; /* disk params */
169 struct bio_queue_head xbd_bioq; /* sort queue */
171 xbd_flag_t xbd_flags;
174 xbd_state_t xbd_state;
175 u_int xbd_ring_pages;
176 uint32_t xbd_max_requests;
177 uint32_t xbd_max_request_segments;
178 uint32_t xbd_max_request_blocks;
179 uint32_t xbd_max_request_size;
180 grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES];
181 blkif_front_ring_t xbd_ring;
182 xen_intr_handle_t xen_intr_handle;
183 struct gnttab_free_callback xbd_callback;
184 xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT];
185 bus_dma_tag_t xbd_io_dmat;
188 * The number of people holding this device open. We won't allow a
189 * hot-unplug unless this is 0.
192 struct mtx xbd_io_lock;
194 struct xbd_command *xbd_shadow;
197 int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device,
198 uint16_t vdisk_info, unsigned long sector_size);
201 xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index)
203 struct xbd_cm_q *cmq;
205 cmq = &sc->xbd_cm_q[index];
207 if (cmq->q_length > cmq->q_max)
208 cmq->q_max = cmq->q_length;
212 xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index)
214 sc->xbd_cm_q[index].q_length--;
217 static inline uint32_t
218 xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index)
220 return (sc->xbd_cm_q[index].q_length);
224 xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index)
226 struct xbd_cm_q *cmq;
228 cmq = &sc->xbd_cm_q[index];
229 TAILQ_INIT(&cmq->q_tailq);
235 xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index)
237 KASSERT(index != XBD_Q_BIO,
238 ("%s: Commands cannot access the bio queue.", __func__));
239 if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
240 panic("%s: command %p is already on queue %d.",
241 __func__, cm, cm->cm_flags & XBDCF_Q_MASK);
242 TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
243 cm->cm_flags &= ~XBDCF_Q_MASK;
244 cm->cm_flags |= index;
245 xbd_added_qentry(cm->cm_sc, index);
249 xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index)
251 KASSERT(index != XBD_Q_BIO,
252 ("%s: Commands cannot access the bio queue.", __func__));
253 if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
254 panic("%s: command %p is already on queue %d.",
255 __func__, cm, cm->cm_flags & XBDCF_Q_MASK);
256 TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
257 cm->cm_flags &= ~XBDCF_Q_MASK;
258 cm->cm_flags |= index;
259 xbd_added_qentry(cm->cm_sc, index);
262 static inline struct xbd_command *
263 xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index)
265 struct xbd_command *cm;
267 KASSERT(index != XBD_Q_BIO,
268 ("%s: Commands cannot access the bio queue.", __func__));
270 if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) {
271 if ((cm->cm_flags & XBDCF_Q_MASK) != index) {
272 panic("%s: command %p is on queue %d, "
273 "not specified queue %d",
275 cm->cm_flags & XBDCF_Q_MASK,
278 TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link);
279 cm->cm_flags &= ~XBDCF_Q_MASK;
280 cm->cm_flags |= XBD_Q_NONE;
281 xbd_removed_qentry(cm->cm_sc, index);
287 xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index)
291 index = cm->cm_flags & XBDCF_Q_MASK;
293 KASSERT(index != XBD_Q_BIO,
294 ("%s: Commands cannot access the bio queue.", __func__));
296 if (index != expected_index) {
297 panic("%s: command %p is on queue %d, not specified queue %d",
298 __func__, cm, index, expected_index);
300 TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
301 cm->cm_flags &= ~XBDCF_Q_MASK;
302 cm->cm_flags |= XBD_Q_NONE;
303 xbd_removed_qentry(cm->cm_sc, index);
307 xbd_initq_bio(struct xbd_softc *sc)
309 bioq_init(&sc->xbd_bioq);
313 xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp)
315 bioq_insert_tail(&sc->xbd_bioq, bp);
316 xbd_added_qentry(sc, XBD_Q_BIO);
320 xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp)
322 bioq_insert_head(&sc->xbd_bioq, bp);
323 xbd_added_qentry(sc, XBD_Q_BIO);
326 static inline struct bio *
327 xbd_dequeue_bio(struct xbd_softc *sc)
331 if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) {
332 bioq_remove(&sc->xbd_bioq, bp);
333 xbd_removed_qentry(sc, XBD_Q_BIO);
339 xbd_initqs(struct xbd_softc *sc)
343 for (index = 0; index < XBD_Q_COUNT; index++)
344 xbd_initq_cm(sc, index);
349 #endif /* __XEN_BLKFRONT_BLOCK_H__ */