2 * Copyright (c) 2002-2007 Neterion, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <dev/nxge/include/xge-queue.h>
32 * xge_queue_item_data - Get item's data.
35 * Returns: item data(variable size). Note that xge_queue_t
36 * contains items comprized of a fixed xge_queue_item_t "header"
37 * and a variable size data. This function returns the variable
38 * user-defined portion of the queue item.
40 void* xge_queue_item_data(xge_queue_item_t *item)
42 return (char *)item + sizeof(xge_queue_item_t);
46 * __queue_consume - (Lockless) dequeue an item from the specified queue.
48 * @queue: Event queue.
49 * See xge_queue_consume().
51 static xge_queue_status_e
52 __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
55 xge_queue_item_t *elem;
57 if (xge_list_is_empty(&queue->list_head))
58 return XGE_QUEUE_IS_EMPTY;
60 elem = (xge_queue_item_t *)queue->list_head.next;
61 if (elem->data_size > data_max_size)
62 return XGE_QUEUE_NOT_ENOUGH_SPACE;
64 xge_list_remove(&elem->item);
65 real_size = elem->data_size + sizeof(xge_queue_item_t);
66 if (queue->head_ptr == elem) {
67 queue->head_ptr = (char *)queue->head_ptr + real_size;
68 xge_debug_queue(XGE_TRACE,
69 "event_type: %d removing from the head: "
70 "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
71 ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
73 (u64)(ulong_t)queue->start_ptr,
74 (u64)(ulong_t)queue->head_ptr,
75 (u64)(ulong_t)queue->tail_ptr,
76 (u64)(ulong_t)queue->end_ptr,
79 } else if ((char *)queue->tail_ptr - real_size == (char*)elem) {
80 queue->tail_ptr = (char *)queue->tail_ptr - real_size;
81 xge_debug_queue(XGE_TRACE,
82 "event_type: %d removing from the tail: "
83 "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
84 ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
86 (u64)(ulong_t)queue->start_ptr,
87 (u64)(ulong_t)queue->head_ptr,
88 (u64)(ulong_t)queue->tail_ptr,
89 (u64)(ulong_t)queue->end_ptr,
93 xge_debug_queue(XGE_TRACE,
94 "event_type: %d removing from the list: "
95 "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
96 ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
98 (u64)(ulong_t)queue->start_ptr,
99 (u64)(ulong_t)queue->head_ptr,
100 (u64)(ulong_t)queue->tail_ptr,
101 (u64)(ulong_t)queue->end_ptr,
105 xge_assert(queue->tail_ptr >= queue->head_ptr);
106 xge_assert(queue->tail_ptr >= queue->start_ptr &&
107 queue->tail_ptr <= queue->end_ptr);
108 xge_assert(queue->head_ptr >= queue->start_ptr &&
109 queue->head_ptr < queue->end_ptr);
110 xge_os_memcpy(item, elem, sizeof(xge_queue_item_t));
111 xge_os_memcpy(xge_queue_item_data(item), xge_queue_item_data(elem),
114 if (xge_list_is_empty(&queue->list_head)) {
115 /* reset buffer pointers just to be clean */
116 queue->head_ptr = queue->tail_ptr = queue->start_ptr;
122 * xge_queue_produce - Enqueue an item (see xge_queue_item_t{})
123 * into the specified queue.
124 * @queueh: Queue handle.
125 * @event_type: Event type. One of the enumerated event types
126 * that both consumer and producer "understand".
127 * For an example, please refer to xge_hal_event_e.
128 * @context: Opaque (void*) "context", for instance event producer object.
129 * @is_critical: For critical event, e.g. ECC.
130 * @data_size: Size of the @data.
131 * @data: User data of variable @data_size that is _copied_ into
132 * the new queue item (see xge_queue_item_t{}). Upon return
133 * from the call the @data memory can be re-used or released.
135 * Enqueue a new item.
137 * Returns: XGE_QUEUE_OK - success.
138 * XGE_QUEUE_IS_FULL - Queue is full.
139 * XGE_QUEUE_OUT_OF_MEMORY - Memory allocation failed.
141 * See also: xge_queue_item_t{}, xge_queue_consume().
144 xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
145 int is_critical, const int data_size, void *data)
147 xge_queue_t *queue = (xge_queue_t *)queueh;
148 int real_size = data_size + sizeof(xge_queue_item_t);
149 xge_queue_item_t *elem;
150 unsigned long flags = 0;
152 xge_assert(real_size <= XGE_QUEUE_BUF_SIZE);
154 xge_os_spin_lock_irq(&queue->lock, flags);
156 if (is_critical && !queue->has_critical_event) {
157 unsigned char item_buf[sizeof(xge_queue_item_t) +
158 XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
159 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
160 xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
161 XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
163 while (__queue_consume(queue,
164 XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
165 item) != XGE_QUEUE_IS_EMPTY)
170 if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) {
171 elem = (xge_queue_item_t *) queue->tail_ptr;
172 queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
173 xge_debug_queue(XGE_TRACE,
174 "event_type: %d adding to the tail: "
175 "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
176 ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
178 (u64)(ulong_t)queue->start_ptr,
179 (u64)(ulong_t)queue->head_ptr,
180 (u64)(ulong_t)queue->tail_ptr,
181 (u64)(ulong_t)queue->end_ptr,
184 } else if ((char *)queue->head_ptr - real_size >=
185 (char *)queue->start_ptr) {
186 elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size);
187 queue->head_ptr = elem;
188 xge_debug_queue(XGE_TRACE,
189 "event_type: %d adding to the head: "
190 "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
191 ":0x"XGE_OS_LLXFMT" length %d",
193 (u64)(ulong_t)queue->start_ptr,
194 (u64)(ulong_t)queue->head_ptr,
195 (u64)(ulong_t)queue->tail_ptr,
196 (u64)(ulong_t)queue->end_ptr,
199 xge_queue_status_e status;
201 if (queue->pages_current >= queue->pages_max) {
202 xge_os_spin_unlock_irq(&queue->lock, flags);
203 return XGE_QUEUE_IS_FULL;
206 if (queue->has_critical_event) {
207 xge_os_spin_unlock_irq(&queue->lock, flags);
208 return XGE_QUEUE_IS_FULL;
212 status = __io_queue_grow(queueh);
213 if (status != XGE_QUEUE_OK) {
214 xge_os_spin_unlock_irq(&queue->lock, flags);
220 xge_assert(queue->tail_ptr >= queue->head_ptr);
221 xge_assert(queue->tail_ptr >= queue->start_ptr &&
222 queue->tail_ptr <= queue->end_ptr);
223 xge_assert(queue->head_ptr >= queue->start_ptr &&
224 queue->head_ptr < queue->end_ptr);
225 elem->data_size = data_size;
226 elem->event_type = (xge_hal_event_e) event_type;
227 elem->is_critical = is_critical;
229 queue->has_critical_event = 1;
230 elem->context = context;
231 xge_os_memcpy(xge_queue_item_data(elem), data, data_size);
232 xge_list_insert_before(&elem->item, &queue->list_head);
233 xge_os_spin_unlock_irq(&queue->lock, flags);
236 queue->queued_func(queue->queued_data, event_type);
243 * xge_queue_create - Create protected first-in-first-out queue.
244 * @pdev: PCI device handle.
245 * @irqh: PCI device IRQ handle.
246 * @pages_initial: Number of pages to be initially allocated at the
247 * time of queue creation.
248 * @pages_max: Max number of pages that can be allocated in the queue.
249 * @queued: Optional callback function to be called each time a new item is
250 * added to the queue.
251 * @queued_data: Argument to the callback function.
253 * Create protected (fifo) queue.
255 * Returns: Pointer to xge_queue_t structure,
258 * See also: xge_queue_item_t{}, xge_queue_destroy().
261 xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
262 int pages_max, xge_queued_f queued, void *queued_data)
266 if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
269 queue->queued_func = queued;
270 queue->queued_data = queued_data;
273 queue->pages_current = pages_initial;
274 queue->start_ptr = xge_os_malloc(pdev, queue->pages_current *
276 if (queue->start_ptr == NULL) {
277 xge_os_free(pdev, queue, sizeof(xge_queue_t));
280 queue->head_ptr = queue->tail_ptr = queue->start_ptr;
281 queue->end_ptr = (char *)queue->start_ptr +
282 queue->pages_current * XGE_QUEUE_BUF_SIZE;
283 xge_os_spin_lock_init_irq(&queue->lock, irqh);
284 queue->pages_initial = pages_initial;
285 queue->pages_max = pages_max;
286 xge_list_init(&queue->list_head);
292 * xge_queue_destroy - Destroy xge_queue_t object.
293 * @queueh: Queue handle.
295 * Destroy the specified xge_queue_t object.
297 * See also: xge_queue_item_t{}, xge_queue_create().
299 void xge_queue_destroy(xge_queue_h queueh)
301 xge_queue_t *queue = (xge_queue_t *)queueh;
302 xge_os_spin_lock_destroy_irq(&queue->lock, queue->irqh);
303 if (!xge_list_is_empty(&queue->list_head)) {
304 xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x"
305 XGE_OS_LLXFMT, (u64)(ulong_t)queue);
307 xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
310 xge_os_free(queue->pdev, queue, sizeof(xge_queue_t));
314 * __io_queue_grow - Dynamically increases the size of the queue.
315 * @queueh: Queue handle.
317 * This function is called in the case of no slot avaialble in the queue
318 * to accomodate the newly received event.
319 * Note that queue cannot grow beyond the max size specified for the
322 * Returns XGE_QUEUE_OK: On success.
323 * XGE_QUEUE_OUT_OF_MEMORY : No memory is available.
326 __io_queue_grow(xge_queue_h queueh)
328 xge_queue_t *queue = (xge_queue_t *)queueh;
329 void *newbuf, *oldbuf;
331 xge_queue_item_t *elem;
333 xge_debug_queue(XGE_TRACE, "queue 0x"XGE_OS_LLXFMT":%d is growing",
334 (u64)(ulong_t)queue, queue->pages_current);
336 newbuf = xge_os_malloc(queue->pdev,
337 (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE);
339 return XGE_QUEUE_OUT_OF_MEMORY;
341 xge_os_memcpy(newbuf, queue->start_ptr,
342 queue->pages_current * XGE_QUEUE_BUF_SIZE);
343 oldbuf = queue->start_ptr;
345 /* adjust queue sizes */
346 queue->start_ptr = newbuf;
347 queue->end_ptr = (char *)newbuf +
348 (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE;
349 queue->tail_ptr = (char *)newbuf + ((char *)queue->tail_ptr -
351 queue->head_ptr = (char *)newbuf + ((char *)queue->head_ptr -
353 xge_assert(!xge_list_is_empty(&queue->list_head));
354 queue->list_head.next = (xge_list_t *) (void *)((char *)newbuf +
355 ((char *)queue->list_head.next - (char *)oldbuf));
356 queue->list_head.prev = (xge_list_t *) (void *)((char *)newbuf +
357 ((char *)queue->list_head.prev - (char *)oldbuf));
358 /* adjust queue list */
359 xge_list_for_each(item, &queue->list_head) {
360 elem = xge_container_of(item, xge_queue_item_t, item);
361 if (elem->item.next != &queue->list_head) {
363 (xge_list_t*)(void *)((char *)newbuf +
364 ((char *)elem->item.next - (char *)oldbuf));
366 if (elem->item.prev != &queue->list_head) {
368 (xge_list_t*) (void *)((char *)newbuf +
369 ((char *)elem->item.prev - (char *)oldbuf));
372 xge_os_free(queue->pdev, oldbuf,
373 queue->pages_current * XGE_QUEUE_BUF_SIZE);
374 queue->pages_current++;
380 * xge_queue_consume - Dequeue an item from the specified queue.
381 * @queueh: Queue handle.
382 * @data_max_size: Maximum expected size of the item.
383 * @item: Memory area into which the item is _copied_ upon return
386 * Dequeue an item from the queue. The caller is required to provide
387 * enough space for the item.
389 * Returns: XGE_QUEUE_OK - success.
390 * XGE_QUEUE_IS_EMPTY - Queue is empty.
391 * XGE_QUEUE_NOT_ENOUGH_SPACE - Requested item size(@data_max_size)
392 * is too small to accomodate an item from the queue.
394 * See also: xge_queue_item_t{}, xge_queue_produce().
397 xge_queue_consume(xge_queue_h queueh, int data_max_size, xge_queue_item_t *item)
399 xge_queue_t *queue = (xge_queue_t *)queueh;
400 unsigned long flags = 0;
401 xge_queue_status_e status;
403 xge_os_spin_lock_irq(&queue->lock, flags);
404 status = __queue_consume(queue, data_max_size, item);
405 xge_os_spin_unlock_irq(&queue->lock, flags);
412 * xge_queue_flush - Flush, or empty, the queue.
413 * @queueh: Queue handle.
415 * Flush the queue, i.e. make it empty by consuming all events
416 * without invoking the event processing logic (callbacks, etc.)
418 void xge_queue_flush(xge_queue_h queueh)
420 unsigned char item_buf[sizeof(xge_queue_item_t) +
421 XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
422 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
423 xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
424 XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
426 /* flush queue by consuming all enqueued items */
427 while (xge_queue_consume(queueh,
428 XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
429 item) != XGE_QUEUE_IS_EMPTY) {
431 xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed",
432 item, item->event_type);
434 (void) __queue_get_reset_critical (queueh);
438 * __queue_get_reset_critical - Check for critical events in the queue,
441 * Check for critical event(s) in the queue, and reset the
442 * "has-critical-event" flag upon return.
443 * Returns: 1 - if the queue contains atleast one critical event.
444 * 0 - If there are no critical events in the queue.
446 int __queue_get_reset_critical (xge_queue_h qh) {
447 xge_queue_t* queue = (xge_queue_t*)qh;
448 int c = queue->has_critical_event;
450 queue->has_critical_event = 0;