2 * XenBSD block device driver
4 * Copyright (c) 2009 Scott Long, Yahoo!
5 * Copyright (c) 2009 Frank Suchomel, Citrix
6 * Copyright (c) 2009 Doug F. Rabson, Citrix
7 * Copyright (c) 2005 Kip Macy
8 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
9 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to
14 * deal in the Software without restriction, including without limitation the
15 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
16 * sell copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 * DEALINGS IN THE SOFTWARE.
33 #ifndef __XEN_DRIVERS_BLOCK_H__
34 #define __XEN_DRIVERS_BLOCK_H__
35 #include <xen/blkif.h>
38 * Given a number of blkif segments, compute the maximum I/O size supported.
40 * \note This calculation assumes that all but the first and last segments
41 * of the I/O are fully utilized.
43 * \note We reserve a segement from the maximum supported by the transport to
44 * guarantee we can handle an unaligned transfer without the need to
45 * use a bounce buffer.
47 #define XBF_SEGS_TO_SIZE(segs) \
48 (((segs) - 1) * PAGE_SIZE)
51 * Compute the maximum number of blkif segments requried to represent
52 * an I/O of the given size.
54 * \note This calculation assumes that all but the first and last segments
55 * of the I/O are fully utilized.
57 * \note We reserve a segement to guarantee we can handle an unaligned
58 * transfer without the need to use a bounce buffer.
60 #define XBF_SIZE_TO_SEGS(size) \
61 ((size / PAGE_SIZE) + 1)
64 * The maximum number of outstanding requests blocks (request headers plus
65 * additional segment blocks) we will allow in a negotiated block-front/back
66 * communication channel.
68 #define XBF_MAX_REQUESTS 256
71 * The maximum mapped region size per request we will allow in a negotiated
72 * block-front/back communication channel.
74 #define XBF_MAX_REQUEST_SIZE \
75 MIN(MAXPHYS, XBF_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST))
78 * The maximum number of segments (within a request header and accompanying
79 * segment blocks) per request we will allow in a negotiated block-front/back
80 * communication channel.
82 #define XBF_MAX_SEGMENTS_PER_REQUEST \
83 (MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \
84 XBF_SIZE_TO_SEGS(XBF_MAX_REQUEST_SIZE)))
87 * The maximum number of shared memory ring pages we will allow in a
88 * negotiated block-front/back communication channel. Allow enough
89 * ring space for all requests to be XBF_MAX_REQUEST_SIZE'd.
91 #define XBF_MAX_RING_PAGES \
92 BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBF_MAX_SEGMENTS_PER_REQUEST) \
103 struct xlbd_major_info
108 struct xlbd_type_info *type;
112 TAILQ_ENTRY(xb_command) cm_link;
113 struct xb_softc *cm_sc;
115 #define XB_CMD_FROZEN (1<<0)
116 #define XB_CMD_POLLED (1<<1)
117 #define XB_ON_XBQ_FREE (1<<2)
118 #define XB_ON_XBQ_READY (1<<3)
119 #define XB_ON_XBQ_BUSY (1<<4)
120 #define XB_ON_XBQ_COMPLETE (1<<5)
121 #define XB_ON_XBQ_MASK ((1<<2)|(1<<3)|(1<<4)|(1<<5))
124 grant_ref_t *sg_refs;
126 grant_ref_t gref_head;
131 blkif_sector_t sector_number;
133 void (* cm_complete)(struct xb_command *);
140 #define XBQ_COMPLETE 4
148 union xb_statrequest {
150 struct xb_qstat ms_qstat;
154 * We have one of these per vbd, whether ide, scsi or 'other'.
158 struct disk *xb_disk; /* disk params */
159 struct bio_queue_head xb_bioq; /* sort queue */
162 #define XB_OPEN (1<<0) /* drive is open (can't shut down) */
163 #define XB_BARRIER (1 << 1) /* backend supports barriers */
164 #define XB_READY (1 << 2) /* Is ready */
165 #define XB_FROZEN (1 << 3) /* Waiting for resources */
169 uint32_t max_requests;
170 uint32_t max_request_segments;
171 uint32_t max_request_blocks;
172 uint32_t max_request_size;
173 grant_ref_t ring_ref[XBF_MAX_RING_PAGES];
174 blkif_front_ring_t ring;
176 struct gnttab_free_callback callback;
177 TAILQ_HEAD(,xb_command) cm_free;
178 TAILQ_HEAD(,xb_command) cm_ready;
179 TAILQ_HEAD(,xb_command) cm_busy;
180 TAILQ_HEAD(,xb_command) cm_complete;
181 struct xb_qstat xb_qstat[XBQ_COUNT];
182 bus_dma_tag_t xb_io_dmat;
185 * The number of people holding this device open. We won't allow a
186 * hot-unplug unless this is 0.
189 struct mtx xb_io_lock;
191 struct xb_command *shadow;
194 int xlvbd_add(struct xb_softc *, blkif_sector_t sectors, int device,
195 uint16_t vdisk_info, unsigned long sector_size);
196 void xlvbd_del(struct xb_softc *);
198 #define XBQ_ADD(sc, qname) \
200 struct xb_qstat *qs; \
202 qs = &(sc)->xb_qstat[qname]; \
204 if (qs->q_length > qs->q_max) \
205 qs->q_max = qs->q_length; \
208 #define XBQ_REMOVE(sc, qname) (sc)->xb_qstat[qname].q_length--
210 #define XBQ_INIT(sc, qname) \
212 sc->xb_qstat[qname].q_length = 0; \
213 sc->xb_qstat[qname].q_max = 0; \
216 #define XBQ_COMMAND_QUEUE(name, index) \
217 static __inline void \
218 xb_initq_ ## name (struct xb_softc *sc) \
220 TAILQ_INIT(&sc->cm_ ## name); \
221 XBQ_INIT(sc, index); \
223 static __inline void \
224 xb_enqueue_ ## name (struct xb_command *cm) \
226 if ((cm->cm_flags & XB_ON_XBQ_MASK) != 0) { \
227 printf("command %p is on another queue, " \
228 "flags = %#x\n", cm, cm->cm_flags); \
229 panic("command is on another queue"); \
231 TAILQ_INSERT_TAIL(&cm->cm_sc->cm_ ## name, cm, cm_link); \
232 cm->cm_flags |= XB_ON_ ## index; \
233 XBQ_ADD(cm->cm_sc, index); \
235 static __inline void \
236 xb_requeue_ ## name (struct xb_command *cm) \
238 if ((cm->cm_flags & XB_ON_XBQ_MASK) != 0) { \
239 printf("command %p is on another queue, " \
240 "flags = %#x\n", cm, cm->cm_flags); \
241 panic("command is on another queue"); \
243 TAILQ_INSERT_HEAD(&cm->cm_sc->cm_ ## name, cm, cm_link); \
244 cm->cm_flags |= XB_ON_ ## index; \
245 XBQ_ADD(cm->cm_sc, index); \
247 static __inline struct xb_command * \
248 xb_dequeue_ ## name (struct xb_softc *sc) \
250 struct xb_command *cm; \
252 if ((cm = TAILQ_FIRST(&sc->cm_ ## name)) != NULL) { \
253 if ((cm->cm_flags & XB_ON_XBQ_MASK) != \
255 printf("command %p not in queue, " \
256 "flags = %#x, bit = %#x\n", cm, \
257 cm->cm_flags, XB_ON_ ## index); \
258 panic("command not in queue"); \
260 TAILQ_REMOVE(&sc->cm_ ## name, cm, cm_link); \
261 cm->cm_flags &= ~XB_ON_ ## index; \
262 XBQ_REMOVE(sc, index); \
266 static __inline void \
267 xb_remove_ ## name (struct xb_command *cm) \
269 if ((cm->cm_flags & XB_ON_XBQ_MASK) != XB_ON_ ## index){\
270 printf("command %p not in queue, flags = %#x, " \
271 "bit = %#x\n", cm, cm->cm_flags, \
273 panic("command not in queue"); \
275 TAILQ_REMOVE(&cm->cm_sc->cm_ ## name, cm, cm_link); \
276 cm->cm_flags &= ~XB_ON_ ## index; \
277 XBQ_REMOVE(cm->cm_sc, index); \
281 XBQ_COMMAND_QUEUE(free, XBQ_FREE);
282 XBQ_COMMAND_QUEUE(ready, XBQ_READY);
283 XBQ_COMMAND_QUEUE(busy, XBQ_BUSY);
284 XBQ_COMMAND_QUEUE(complete, XBQ_COMPLETE);
287 xb_initq_bio(struct xb_softc *sc)
289 bioq_init(&sc->xb_bioq);
290 XBQ_INIT(sc, XBQ_BIO);
294 xb_enqueue_bio(struct xb_softc *sc, struct bio *bp)
296 bioq_insert_tail(&sc->xb_bioq, bp);
297 XBQ_ADD(sc, XBQ_BIO);
301 xb_requeue_bio(struct xb_softc *sc, struct bio *bp)
303 bioq_insert_head(&sc->xb_bioq, bp);
304 XBQ_ADD(sc, XBQ_BIO);
307 static __inline struct bio *
308 xb_dequeue_bio(struct xb_softc *sc)
312 if ((bp = bioq_first(&sc->xb_bioq)) != NULL) {
313 bioq_remove(&sc->xb_bioq, bp);
314 XBQ_REMOVE(sc, XBQ_BIO);
319 #endif /* __XEN_DRIVERS_BLOCK_H__ */