2 * Copyright (c) 2009-2012 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/param.h>
32 #include <sys/mutex.h>
34 #include "hv_vmbus_priv.h"
36 /* Amount of space to write to */
37 #define HV_BYTES_AVAIL_TO_WRITE(r, w, z) ((w) >= (r))? \
38 ((z) - ((w) - (r))):((r) - (w))
41 * @brief Get number of bytes available to read and to write to
42 * for the specified ring buffer
45 get_ring_buffer_avail_bytes(
46 hv_vmbus_ring_buffer_info* rbi,
50 uint32_t read_loc, write_loc;
53 * Capture the read/write indices before they changed
55 read_loc = rbi->ring_buffer->read_index;
56 write_loc = rbi->ring_buffer->write_index;
58 *write = HV_BYTES_AVAIL_TO_WRITE(
59 read_loc, write_loc, rbi->ring_data_size);
60 *read = rbi->ring_data_size - *write;
64 * @brief Get the next write location for the specified ring buffer
66 static inline uint32_t
67 get_next_write_location(hv_vmbus_ring_buffer_info* ring_info)
69 uint32_t next = ring_info->ring_buffer->write_index;
74 * @brief Set the next write location for the specified ring buffer
77 set_next_write_location(
78 hv_vmbus_ring_buffer_info* ring_info,
79 uint32_t next_write_location)
81 ring_info->ring_buffer->write_index = next_write_location;
85 * @brief Get the next read location for the specified ring buffer
87 static inline uint32_t
88 get_next_read_location(hv_vmbus_ring_buffer_info* ring_info)
90 uint32_t next = ring_info->ring_buffer->read_index;
95 * @brief Get the next read location + offset for the specified ring buffer.
96 * This allows the caller to skip.
98 static inline uint32_t
99 get_next_read_location_with_offset(
100 hv_vmbus_ring_buffer_info* ring_info,
103 uint32_t next = ring_info->ring_buffer->read_index;
105 next %= ring_info->ring_data_size;
110 * @brief Set the next read location for the specified ring buffer
113 set_next_read_location(
114 hv_vmbus_ring_buffer_info* ring_info,
115 uint32_t next_read_location)
117 ring_info->ring_buffer->read_index = next_read_location;
121 * @brief Get the start of the ring buffer
124 get_ring_buffer(hv_vmbus_ring_buffer_info* ring_info)
126 return (void *) ring_info->ring_buffer->buffer;
130 * @brief Get the size of the ring buffer.
132 static inline uint32_t
133 get_ring_buffer_size(hv_vmbus_ring_buffer_info* ring_info)
135 return ring_info->ring_data_size;
139 * Get the read and write indices as uint64_t of the specified ring buffer.
141 static inline uint64_t
142 get_ring_buffer_indices(hv_vmbus_ring_buffer_info* ring_info)
144 return (uint64_t) ring_info->ring_buffer->write_index << 32;
147 static uint32_t copy_to_ring_buffer(
148 hv_vmbus_ring_buffer_info* ring_info,
149 uint32_t start_write_offset,
153 static uint32_t copy_from_ring_buffer(
154 hv_vmbus_ring_buffer_info* ring_info,
157 uint32_t start_read_offset);
161 * @brief Get the interrupt mask for the specified ring buffer.
164 hv_vmbus_get_ring_buffer_interrupt_mask(hv_vmbus_ring_buffer_info *rbi)
166 return rbi->ring_buffer->interrupt_mask;
170 * @brief Initialize the ring buffer.
173 hv_vmbus_ring_buffer_init(
174 hv_vmbus_ring_buffer_info* ring_info,
178 memset(ring_info, 0, sizeof(hv_vmbus_ring_buffer_info));
180 ring_info->ring_buffer = (hv_vmbus_ring_buffer*) buffer;
181 ring_info->ring_buffer->read_index =
182 ring_info->ring_buffer->write_index = 0;
184 ring_info->ring_size = buffer_len;
185 ring_info->ring_data_size = buffer_len - sizeof(hv_vmbus_ring_buffer);
187 mtx_init(&ring_info->ring_lock, "vmbus ring buffer", NULL, MTX_SPIN);
193 * @brief Cleanup the ring buffer.
195 void hv_ring_buffer_cleanup(hv_vmbus_ring_buffer_info* ring_info)
197 mtx_destroy(&ring_info->ring_lock);
201 * @brief Write to the ring buffer.
204 hv_ring_buffer_write(
205 hv_vmbus_ring_buffer_info* out_ring_info,
206 hv_vmbus_sg_buffer_list sg_buffers[],
207 uint32_t sg_buffer_count)
210 uint32_t byte_avail_to_write;
211 uint32_t byte_avail_to_read;
212 uint32_t total_bytes_to_write = 0;
214 volatile uint32_t next_write_location;
215 uint64_t prev_indices = 0;
217 for (i = 0; i < sg_buffer_count; i++) {
218 total_bytes_to_write += sg_buffers[i].length;
221 total_bytes_to_write += sizeof(uint64_t);
223 mtx_lock_spin(&out_ring_info->ring_lock);
225 get_ring_buffer_avail_bytes(out_ring_info, &byte_avail_to_read,
226 &byte_avail_to_write);
229 * If there is only room for the packet, assume it is full.
230 * Otherwise, the next time around, we think the ring buffer
231 * is empty since the read index == write index
234 if (byte_avail_to_write <= total_bytes_to_write) {
236 mtx_unlock_spin(&out_ring_info->ring_lock);
241 * Write to the ring buffer
243 next_write_location = get_next_write_location(out_ring_info);
245 for (i = 0; i < sg_buffer_count; i++) {
246 next_write_location = copy_to_ring_buffer(out_ring_info,
247 next_write_location, (char *) sg_buffers[i].data,
248 sg_buffers[i].length);
252 * Set previous packet start
254 prev_indices = get_ring_buffer_indices(out_ring_info);
256 next_write_location = copy_to_ring_buffer(
257 out_ring_info, next_write_location,
258 (char *) &prev_indices, sizeof(uint64_t));
261 * Make sure we flush all writes before updating the writeIndex
266 * Now, update the write location
268 set_next_write_location(out_ring_info, next_write_location);
270 mtx_unlock_spin(&out_ring_info->ring_lock);
276 * @brief Read without advancing the read index.
280 hv_vmbus_ring_buffer_info* in_ring_info,
284 uint32_t bytesAvailToWrite;
285 uint32_t bytesAvailToRead;
286 uint32_t nextReadLocation = 0;
288 mtx_lock_spin(&in_ring_info->ring_lock);
290 get_ring_buffer_avail_bytes(in_ring_info, &bytesAvailToRead,
294 * Make sure there is something to read
296 if (bytesAvailToRead < buffer_len) {
297 mtx_unlock_spin(&in_ring_info->ring_lock);
302 * Convert to byte offset
304 nextReadLocation = get_next_read_location(in_ring_info);
306 nextReadLocation = copy_from_ring_buffer(
307 in_ring_info, (char *)buffer, buffer_len, nextReadLocation);
309 mtx_unlock_spin(&in_ring_info->ring_lock);
315 * @brief Read and advance the read index.
319 hv_vmbus_ring_buffer_info* in_ring_info,
324 uint32_t bytes_avail_to_write;
325 uint32_t bytes_avail_to_read;
326 uint32_t next_read_location = 0;
327 uint64_t prev_indices = 0;
332 mtx_lock_spin(&in_ring_info->ring_lock);
334 get_ring_buffer_avail_bytes(
335 in_ring_info, &bytes_avail_to_read,
336 &bytes_avail_to_write);
339 * Make sure there is something to read
341 if (bytes_avail_to_read < buffer_len) {
342 mtx_unlock_spin(&in_ring_info->ring_lock);
346 next_read_location = get_next_read_location_with_offset(
350 next_read_location = copy_from_ring_buffer(
356 next_read_location = copy_from_ring_buffer(
358 (char *) &prev_indices,
363 * Make sure all reads are done before we update the read index since
364 * the writer may start writing to the read area once the read index
370 * Update the read index
372 set_next_read_location(in_ring_info, next_read_location);
374 mtx_unlock_spin(&in_ring_info->ring_lock);
380 * @brief Helper routine to copy from source to ring buffer.
382 * Assume there is enough room. Handles wrap-around in dest case only!
386 hv_vmbus_ring_buffer_info* ring_info,
387 uint32_t start_write_offset,
391 char *ring_buffer = get_ring_buffer(ring_info);
392 uint32_t ring_buffer_size = get_ring_buffer_size(ring_info);
395 if (src_len > ring_buffer_size - start_write_offset) {
396 /* wrap-around detected! */
397 fragLen = ring_buffer_size - start_write_offset;
398 memcpy(ring_buffer + start_write_offset, src, fragLen);
399 memcpy(ring_buffer, src + fragLen, src_len - fragLen);
401 memcpy(ring_buffer + start_write_offset, src, src_len);
404 start_write_offset += src_len;
405 start_write_offset %= ring_buffer_size;
407 return (start_write_offset);
411 * @brief Helper routine to copy to source from ring buffer.
413 * Assume there is enough room. Handles wrap-around in src case only!
416 copy_from_ring_buffer(
417 hv_vmbus_ring_buffer_info* ring_info,
420 uint32_t start_read_offset)
423 char *ring_buffer = get_ring_buffer(ring_info);
424 uint32_t ring_buffer_size = get_ring_buffer_size(ring_info);
426 if (dest_len > ring_buffer_size - start_read_offset) {
427 /* wrap-around detected at the src */
428 fragLen = ring_buffer_size - start_read_offset;
429 memcpy(dest, ring_buffer + start_read_offset, fragLen);
430 memcpy(dest + fragLen, ring_buffer, dest_len - fragLen);
432 memcpy(dest, ring_buffer + start_read_offset, dest_len);
435 start_read_offset += dest_len;
436 start_read_offset %= ring_buffer_size;
438 return (start_read_offset);