]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/hyperv/vmbus/hv_ring_buffer.c
Copy head (r256279) to stable/10 as part of the 10.0-RELEASE cycle.
[FreeBSD/stable/10.git] / sys / dev / hyperv / vmbus / hv_ring_buffer.c
1 /*-
2  * Copyright (c) 2009-2012 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29
30 #include <sys/param.h>
31 #include <sys/lock.h>
32 #include <sys/mutex.h>
33
34 #include "hv_vmbus_priv.h"
35
36 /* Amount of space to write to */
37 #define HV_BYTES_AVAIL_TO_WRITE(r, w, z) ((w) >= (r))? \
38                                 ((z) - ((w) - (r))):((r) - (w))
39
40 /**
41  * @brief Get number of bytes available to read and to write to
42  * for the specified ring buffer
43  */
44 static inline void
45 get_ring_buffer_avail_bytes(
46             hv_vmbus_ring_buffer_info*  rbi,
47             uint32_t*                   read,
48             uint32_t*                   write)
49 {
50         uint32_t read_loc, write_loc;
51
52         /*
53          * Capture the read/write indices before they changed
54          */
55         read_loc = rbi->ring_buffer->read_index;
56         write_loc = rbi->ring_buffer->write_index;
57
58         *write = HV_BYTES_AVAIL_TO_WRITE(
59                 read_loc, write_loc, rbi->ring_data_size);
60         *read = rbi->ring_data_size - *write;
61 }
62
63 /**
64  * @brief Get the next write location for the specified ring buffer
65  */
66 static inline uint32_t
67 get_next_write_location(hv_vmbus_ring_buffer_info* ring_info) 
68 {
69         uint32_t next = ring_info->ring_buffer->write_index;
70         return (next);
71 }
72
73 /**
74  * @brief Set the next write location for the specified ring buffer
75  */
76 static inline void
77 set_next_write_location(
78         hv_vmbus_ring_buffer_info*      ring_info,
79         uint32_t                        next_write_location)
80 {
81         ring_info->ring_buffer->write_index = next_write_location;
82 }
83
84 /**
85  * @brief Get the next read location for the specified ring buffer
86  */
87 static inline uint32_t
88 get_next_read_location(hv_vmbus_ring_buffer_info* ring_info) 
89 {
90         uint32_t next = ring_info->ring_buffer->read_index;
91         return (next);
92 }
93
94 /**
95  * @brief Get the next read location + offset for the specified ring buffer.
96  * This allows the caller to skip.
97  */
98 static inline uint32_t
99 get_next_read_location_with_offset(
100         hv_vmbus_ring_buffer_info*      ring_info,
101         uint32_t                        offset)
102 {
103         uint32_t next = ring_info->ring_buffer->read_index;
104         next += offset;
105         next %= ring_info->ring_data_size;
106         return (next);
107 }
108
109 /**
110  * @brief Set the next read location for the specified ring buffer
111  */
112 static inline void
113 set_next_read_location(
114         hv_vmbus_ring_buffer_info*      ring_info,
115         uint32_t                        next_read_location)
116 {
117         ring_info->ring_buffer->read_index = next_read_location;
118 }
119
120 /**
121  * @brief Get the start of the ring buffer
122  */
123 static inline void *
124 get_ring_buffer(hv_vmbus_ring_buffer_info* ring_info) 
125 {
126         return (void *) ring_info->ring_buffer->buffer;
127 }
128
129 /**
130  * @brief Get the size of the ring buffer.
131  */
132 static inline uint32_t
133 get_ring_buffer_size(hv_vmbus_ring_buffer_info* ring_info) 
134 {
135         return ring_info->ring_data_size;
136 }
137
138 /**
139  * Get the read and write indices as uint64_t of the specified ring buffer.
140  */
141 static inline uint64_t
142 get_ring_buffer_indices(hv_vmbus_ring_buffer_info* ring_info) 
143 {
144         return (uint64_t) ring_info->ring_buffer->write_index << 32;
145 }
146
147 static uint32_t copy_to_ring_buffer(
148                         hv_vmbus_ring_buffer_info*      ring_info,
149                         uint32_t                        start_write_offset,
150                         char*                           src,
151                         uint32_t                        src_len);
152
153 static uint32_t copy_from_ring_buffer(
154                         hv_vmbus_ring_buffer_info*      ring_info,
155                         char*                           dest,
156                         uint32_t                        dest_len,
157                         uint32_t                        start_read_offset);
158
159
160 /**
161  * @brief Get the interrupt mask for the specified ring buffer.
162  */
163 uint32_t
164 hv_vmbus_get_ring_buffer_interrupt_mask(hv_vmbus_ring_buffer_info *rbi) 
165 {
166         return rbi->ring_buffer->interrupt_mask;
167 }
168
169 /**
170  * @brief Initialize the ring buffer.
171  */
172 int
173 hv_vmbus_ring_buffer_init(
174         hv_vmbus_ring_buffer_info*      ring_info,
175         void*                           buffer,
176         uint32_t                        buffer_len)
177 {
178         memset(ring_info, 0, sizeof(hv_vmbus_ring_buffer_info));
179
180         ring_info->ring_buffer = (hv_vmbus_ring_buffer*) buffer;
181         ring_info->ring_buffer->read_index =
182             ring_info->ring_buffer->write_index = 0;
183
184         ring_info->ring_size = buffer_len;
185         ring_info->ring_data_size = buffer_len - sizeof(hv_vmbus_ring_buffer);
186
187         mtx_init(&ring_info->ring_lock, "vmbus ring buffer", NULL, MTX_SPIN);
188
189         return (0);
190 }
191
192 /**
193  * @brief Cleanup the ring buffer.
194  */
195 void hv_ring_buffer_cleanup(hv_vmbus_ring_buffer_info* ring_info) 
196 {
197         mtx_destroy(&ring_info->ring_lock);
198 }
199
200 /**
201  * @brief Write to the ring buffer.
202  */
203 int
204 hv_ring_buffer_write(
205         hv_vmbus_ring_buffer_info*      out_ring_info,
206         hv_vmbus_sg_buffer_list         sg_buffers[],
207         uint32_t                        sg_buffer_count)
208 {
209         int i = 0;
210         uint32_t byte_avail_to_write;
211         uint32_t byte_avail_to_read;
212         uint32_t total_bytes_to_write = 0;
213
214         volatile uint32_t next_write_location;
215         uint64_t prev_indices = 0;
216
217         for (i = 0; i < sg_buffer_count; i++) {
218             total_bytes_to_write += sg_buffers[i].length;
219         }
220
221         total_bytes_to_write += sizeof(uint64_t);
222
223         mtx_lock_spin(&out_ring_info->ring_lock);
224
225         get_ring_buffer_avail_bytes(out_ring_info, &byte_avail_to_read,
226             &byte_avail_to_write);
227
228         /*
229          * If there is only room for the packet, assume it is full.
230          * Otherwise, the next time around, we think the ring buffer
231          * is empty since the read index == write index
232          */
233
234         if (byte_avail_to_write <= total_bytes_to_write) {
235
236             mtx_unlock_spin(&out_ring_info->ring_lock);
237             return (EAGAIN);
238         }
239
240         /*
241          * Write to the ring buffer
242          */
243         next_write_location = get_next_write_location(out_ring_info);
244
245         for (i = 0; i < sg_buffer_count; i++) {
246             next_write_location = copy_to_ring_buffer(out_ring_info,
247                 next_write_location, (char *) sg_buffers[i].data,
248                 sg_buffers[i].length);
249         }
250
251         /*
252          * Set previous packet start
253          */
254         prev_indices = get_ring_buffer_indices(out_ring_info);
255
256         next_write_location = copy_to_ring_buffer(
257                 out_ring_info, next_write_location,
258                 (char *) &prev_indices, sizeof(uint64_t));
259
260         /*
261          * Make sure we flush all writes before updating the writeIndex
262          */
263         wmb();
264
265         /*
266          * Now, update the write location
267          */
268         set_next_write_location(out_ring_info, next_write_location);
269
270         mtx_unlock_spin(&out_ring_info->ring_lock);
271
272         return (0);
273 }
274
275 /**
276  * @brief Read without advancing the read index.
277  */
278 int
279 hv_ring_buffer_peek(
280         hv_vmbus_ring_buffer_info*      in_ring_info,
281         void*                           buffer,
282         uint32_t                        buffer_len)
283 {
284         uint32_t bytesAvailToWrite;
285         uint32_t bytesAvailToRead;
286         uint32_t nextReadLocation = 0;
287
288         mtx_lock_spin(&in_ring_info->ring_lock);
289
290         get_ring_buffer_avail_bytes(in_ring_info, &bytesAvailToRead,
291                 &bytesAvailToWrite);
292
293         /*
294          * Make sure there is something to read
295          */
296         if (bytesAvailToRead < buffer_len) {
297             mtx_unlock_spin(&in_ring_info->ring_lock);
298             return (EAGAIN);
299         }
300
301         /*
302          * Convert to byte offset
303          */
304         nextReadLocation = get_next_read_location(in_ring_info);
305
306         nextReadLocation = copy_from_ring_buffer(
307                 in_ring_info, (char *)buffer, buffer_len, nextReadLocation);
308
309         mtx_unlock_spin(&in_ring_info->ring_lock);
310
311         return (0);
312 }
313
314 /**
315  * @brief Read and advance the read index.
316  */
317 int
318 hv_ring_buffer_read(
319         hv_vmbus_ring_buffer_info*      in_ring_info,
320         void*                           buffer,
321         uint32_t                        buffer_len,
322         uint32_t                        offset)
323 {
324         uint32_t bytes_avail_to_write;
325         uint32_t bytes_avail_to_read;
326         uint32_t next_read_location = 0;
327         uint64_t prev_indices = 0;
328
329         if (buffer_len <= 0)
330             return (EINVAL);
331
332         mtx_lock_spin(&in_ring_info->ring_lock);
333
334         get_ring_buffer_avail_bytes(
335             in_ring_info, &bytes_avail_to_read,
336             &bytes_avail_to_write);
337
338         /*
339          * Make sure there is something to read
340          */
341         if (bytes_avail_to_read < buffer_len) {
342             mtx_unlock_spin(&in_ring_info->ring_lock);
343             return (EAGAIN);
344         }
345
346         next_read_location = get_next_read_location_with_offset(
347             in_ring_info,
348             offset);
349
350         next_read_location = copy_from_ring_buffer(
351             in_ring_info,
352             (char *) buffer,
353             buffer_len,
354             next_read_location);
355
356         next_read_location = copy_from_ring_buffer(
357             in_ring_info,
358             (char *) &prev_indices,
359             sizeof(uint64_t),
360             next_read_location);
361
362         /*
363          * Make sure all reads are done before we update the read index since
364          * the writer may start writing to the read area once the read index
365          * is updated.
366          */
367         wmb();
368
369         /*
370          * Update the read index
371          */
372         set_next_read_location(in_ring_info, next_read_location);
373
374         mtx_unlock_spin(&in_ring_info->ring_lock);
375
376         return (0);
377 }
378
379 /**
380  * @brief Helper routine to copy from source to ring buffer.
381  *
382  * Assume there is enough room. Handles wrap-around in dest case only!
383  */
384 uint32_t
385 copy_to_ring_buffer(
386         hv_vmbus_ring_buffer_info*      ring_info,
387         uint32_t                        start_write_offset,
388         char*                           src,
389         uint32_t                        src_len)
390 {
391         char *ring_buffer = get_ring_buffer(ring_info);
392         uint32_t ring_buffer_size = get_ring_buffer_size(ring_info);
393         uint32_t fragLen;
394
395         if (src_len > ring_buffer_size - start_write_offset)  {
396             /* wrap-around detected! */
397             fragLen = ring_buffer_size - start_write_offset;
398             memcpy(ring_buffer + start_write_offset, src, fragLen);
399             memcpy(ring_buffer, src + fragLen, src_len - fragLen);
400         } else {
401             memcpy(ring_buffer + start_write_offset, src, src_len);
402         }
403
404         start_write_offset += src_len;
405         start_write_offset %= ring_buffer_size;
406
407         return (start_write_offset);
408 }
409
410 /**
411  * @brief Helper routine to copy to source from ring buffer.
412  *
413  * Assume there is enough room. Handles wrap-around in src case only!
414  */
415 uint32_t
416 copy_from_ring_buffer(
417         hv_vmbus_ring_buffer_info*      ring_info,
418         char*                           dest,
419         uint32_t                        dest_len,
420         uint32_t                        start_read_offset)
421 {
422         uint32_t fragLen;
423         char *ring_buffer = get_ring_buffer(ring_info);
424         uint32_t ring_buffer_size = get_ring_buffer_size(ring_info);
425
426         if (dest_len > ring_buffer_size - start_read_offset) {
427             /*  wrap-around detected at the src */
428             fragLen = ring_buffer_size - start_read_offset;
429             memcpy(dest, ring_buffer + start_read_offset, fragLen);
430             memcpy(dest + fragLen, ring_buffer, dest_len - fragLen);
431         } else {
432             memcpy(dest, ring_buffer + start_read_offset, dest_len);
433         }
434
435         start_read_offset += dest_len;
436         start_read_offset %= ring_buffer_size;
437
438         return (start_read_offset);
439 }
440