1 /******************************************************************************
4 * Unified block-device I/O interface for Xen guest OSes.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
24 * Copyright (c) 2003-2004, Keir Fraser
27 #ifndef __XEN_PUBLIC_IO_BLKIF_H__
28 #define __XEN_PUBLIC_IO_BLKIF_H__
30 #include <xen/interface/io/ring.h>
31 #include <xen/interface/grant_table.h>
34 * Front->back notifications: When enqueuing a new request, sending a
35 * notification can be made conditional on req_event (i.e., the generic
36 * hold-off mechanism provided by the ring macros). Backends must set
37 * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
39 * Back->front notifications: When enqueuing a new response, sending a
40 * notification can be made conditional on rsp_event (i.e., the generic
41 * hold-off mechanism provided by the ring macros). Frontends must set
42 * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
46 #define blkif_vdev_t uint16_t
48 #define blkif_sector_t uint64_t
53 #define BLKIF_OP_READ 0
54 #define BLKIF_OP_WRITE 1
56 * Recognised only if "feature-barrier" is present in backend xenbus info.
57 * The "feature-barrier" node contains a boolean indicating whether barrier
58 * requests are likely to succeed or fail. Either way, a barrier request
59 * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
60 * the underlying block-device hardware. The boolean simply indicates whether
61 * or not it is worthwhile for the frontend to attempt barrier requests.
62 * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
63 * create the "feature-barrier" node!
65 #define BLKIF_OP_WRITE_BARRIER 2
67 * Recognised if "feature-flush-cache" is present in backend xenbus
68 * info. A flush will ask the underlying storage hardware to flush its
69 * non-volatile caches as appropriate. The "feature-flush-cache" node
70 * contains a boolean indicating whether flush requests are likely to
71 * succeed or fail. Either way, a flush request may fail at any time
72 * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
73 * block-device hardware. The boolean simply indicates whether or not it
74 * is worthwhile for the frontend to attempt flushes. If a backend does
75 * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
76 * "feature-flush-cache" node!
78 #define BLKIF_OP_FLUSH_DISKCACHE 3
81 * Maximum scatter/gather segments associated with a request header block.
83 #define BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK 11
86 * Maximum scatter/gather segments associated with a segment block.
88 #define BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK 14
91 * Maximum scatter/gather segments per request (header + segment blocks).
93 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 255
95 struct blkif_request_segment {
96 grant_ref_t gref; /* reference to I/O buffer frame */
97 /* @first_sect: first sector in frame to transfer (inclusive). */
98 /* @last_sect: last sector in frame to transfer (inclusive). */
99 uint8_t first_sect, last_sect;
101 typedef struct blkif_request_segment blkif_request_segment_t;
103 struct blkif_request {
104 uint8_t operation; /* BLKIF_OP_??? */
105 uint8_t nr_segments; /* number of segments */
106 blkif_vdev_t handle; /* only for read/write requests */
107 uint64_t id; /* private guest value, echoed in resp */
108 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
109 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK];
111 typedef struct blkif_request blkif_request_t;
113 struct blkif_response {
114 uint64_t id; /* copied from request */
115 uint8_t operation; /* copied from request */
116 int16_t status; /* BLKIF_RSP_??? */
118 typedef struct blkif_response blkif_response_t;
121 * STATUS RETURN CODES.
123 /* Operation not supported (only happens on barrier writes). */
124 #define BLKIF_RSP_EOPNOTSUPP -2
125 /* Operation failed for some unspecified reason (-EIO). */
126 #define BLKIF_RSP_ERROR -1
127 /* Operation completed successfully. */
128 #define BLKIF_RSP_OKAY 0
131 * Generate blkif ring structures and types.
134 DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
136 #define BLKRING_GET_SG_REQUEST(_r, _idx) \
137 ((struct blkif_request_segment *)RING_GET_REQUEST(_r, _idx))
139 #define VDISK_CDROM 0x1
140 #define VDISK_REMOVABLE 0x2
141 #define VDISK_READONLY 0x4
144 * The number of ring request blocks required to handle an I/O
145 * request containing _segs segments.
147 #define BLKIF_SEGS_TO_BLOCKS(_segs) \
148 ((((_segs - BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK) \
149 + (BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK - 1)) \
150 / BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK) + /*header_block*/1)
152 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
160 * indent-tabs-mode: nil