2 * Copyright(c) 2002-2011 Exar Corp.
5 * Redistribution and use in source and binary forms, with or without
6 * modification are permitted provided the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Exar Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include <dev/vxge/vxgehal/vxgehal.h>
36 * __hal_ring_block_memblock_idx - Return the memblock index
37 * @block: Virtual address of memory block
39 * This function returns the index of memory block
42 __hal_ring_block_memblock_idx(
43 vxge_hal_ring_block_t block)
45 return (u32)*((u64 *) ((void *)((u8 *) block +
46 VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET)));
50 * __hal_ring_block_memblock_idx_set - Sets the memblock index
51 * @block: Virtual address of memory block
52 * @memblock_idx: Index of memory block
54 * This function sets index to a memory block
57 __hal_ring_block_memblock_idx_set(
58 vxge_hal_ring_block_t block,
61 *((u64 *) ((void *)((u8 *) block +
62 VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET))) = memblock_idx;
66 * __hal_ring_block_next_pointer - Returns the dma address of next block
69 * Returns the dma address of next block stored in the RxD block
71 static inline dma_addr_t
73 __hal_ring_block_next_pointer(
74 vxge_hal_ring_block_t *block)
76 return (dma_addr_t)*((u64 *) ((void *)((u8 *) block +
77 VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)));
81 * __hal_ring_block_next_pointer_set - Sets the next block pointer in RxD block
83 * @dma_next: dma address of next block
85 * Sets the next block pointer in RxD block
88 __hal_ring_block_next_pointer_set(
89 vxge_hal_ring_block_t *block,
92 *((u64 *) ((void *)((u8 *) block +
93 VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET))) = dma_next;
97 * __hal_ring_first_block_address_get - Returns the dma address of the
99 * @ringh: Handle to the ring
101 * Returns the dma address of the first RxD block
104 __hal_ring_first_block_address_get(
105 vxge_hal_ring_h ringh)
107 __hal_ring_t *ring = (__hal_ring_t *) ringh;
108 vxge_hal_mempool_dma_t *dma_object;
110 dma_object = __hal_mempool_memblock_dma(ring->mempool, 0);
112 vxge_assert(dma_object != NULL);
114 return (dma_object->addr);
118 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
120 * __hal_ring_item_dma_offset - Return the dma offset of an item
121 * @mempoolh: Handle to the memory pool of the ring
122 * @item: Item for which to get the dma offset
124 * This function returns the dma offset of a given item
127 __hal_ring_item_dma_offset(
128 vxge_hal_mempool_h mempoolh,
133 vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh;
134 __hal_device_t *hldev;
136 vxge_assert((mempoolh != NULL) && (item != NULL) &&
137 (dma_handle != NULL));
139 hldev = (__hal_device_t *) mempool->devh;
141 vxge_hal_trace_log_ring("==> %s:%s:%d",
142 __FILE__, __func__, __LINE__);
144 vxge_hal_trace_log_ring(
145 "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT,
146 (ptr_t) mempoolh, (ptr_t) item);
148 /* get owner memblock index */
149 memblock_idx = __hal_ring_block_memblock_idx(item);
151 /* get owner memblock by memblock index */
152 memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
154 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
155 __FILE__, __func__, __LINE__);
157 return ((u8 *) item - (u8 *) memblock);
162 * __hal_ring_item_dma_addr - Return the dma address of an item
163 * @mempoolh: Handle to the memory pool of the ring
164 * @item: Item for which to get the dma offset
165 * @dma_handle: dma handle
167 * This function returns the dma address of a given item
170 __hal_ring_item_dma_addr(
171 vxge_hal_mempool_h mempoolh,
173 pci_dma_h *dma_handle)
177 vxge_hal_mempool_dma_t *memblock_dma_object;
178 vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh;
179 __hal_device_t *hldev;
180 ptrdiff_t dma_item_offset;
182 vxge_assert((mempoolh != NULL) && (item != NULL) &&
183 (dma_handle != NULL));
185 hldev = (__hal_device_t *) mempool->devh;
187 vxge_hal_trace_log_ring("==> %s:%s:%d",
188 __FILE__, __func__, __LINE__);
190 vxge_hal_trace_log_ring(
191 "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT", "
192 "dma_handle = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh,
193 (ptr_t) item, (ptr_t) dma_handle);
195 /* get owner memblock index */
196 memblock_idx = __hal_ring_block_memblock_idx((u8 *) item);
198 /* get owner memblock by memblock index */
199 memblock = __hal_mempool_memblock(
200 (vxge_hal_mempool_t *) mempoolh, memblock_idx);
202 /* get memblock DMA object by memblock index */
203 memblock_dma_object = __hal_mempool_memblock_dma(
204 (vxge_hal_mempool_t *) mempoolh, memblock_idx);
206 /* calculate offset in the memblock of this item */
208 dma_item_offset = (u8 *) item - (u8 *) memblock;
210 *dma_handle = memblock_dma_object->handle;
212 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
213 __FILE__, __func__, __LINE__);
215 return (memblock_dma_object->addr + dma_item_offset);
219 * __hal_ring_rxdblock_link - Link the RxD blocks
220 * @mempoolh: Handle to the memory pool of the ring
222 * @from: RxD block from which to link
223 * @to: RxD block to which to link to
225 * This function returns the dma address of a given item
228 __hal_ring_rxdblock_link(
229 vxge_hal_mempool_h mempoolh,
234 vxge_hal_ring_block_t *to_item, *from_item;
235 dma_addr_t to_dma, from_dma;
236 pci_dma_h to_dma_handle, from_dma_handle;
237 __hal_device_t *hldev;
239 vxge_assert((mempoolh != NULL) && (ring != NULL));
241 hldev = (__hal_device_t *) ring->channel.devh;
243 vxge_hal_trace_log_ring("==> %s:%s:%d",
244 __FILE__, __func__, __LINE__);
246 vxge_hal_trace_log_ring(
247 "mempoolh = 0x"VXGE_OS_STXFMT", ring = 0x"VXGE_OS_STXFMT", "
248 "from = %d, to = %d", (ptr_t) mempoolh, (ptr_t) ring, from, to);
250 /* get "from" RxD block */
251 from_item = (vxge_hal_ring_block_t *) __hal_mempool_item(
252 (vxge_hal_mempool_t *) mempoolh, from);
253 vxge_assert(from_item);
255 /* get "to" RxD block */
256 to_item = (vxge_hal_ring_block_t *) __hal_mempool_item(
257 (vxge_hal_mempool_t *) mempoolh, to);
258 vxge_assert(to_item);
260 /* return address of the beginning of previous RxD block */
261 to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
264 * set next pointer for this RxD block to point on
265 * previous item's DMA start address
267 __hal_ring_block_next_pointer_set(from_item, to_dma);
269 /* return "from" RxD block's DMA start address */
270 from_dma = __hal_ring_item_dma_addr(
271 mempoolh, from_item, &from_dma_handle);
273 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
274 /* we must sync "from" RxD block, so hardware will see it */
275 vxge_os_dma_sync(ring->channel.pdev,
277 from_dma + VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
278 __hal_ring_item_dma_offset(mempoolh, from_item) +
279 VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
281 VXGE_OS_DMA_DIR_TODEVICE);
284 vxge_hal_info_log_ring(
285 "block%d:0x"VXGE_OS_STXFMT" => block%d:0x"VXGE_OS_STXFMT,
286 from, (ptr_t) from_dma, to, (ptr_t) to_dma);
288 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
289 __FILE__, __func__, __LINE__);
294 * __hal_ring_mempool_item_alloc - Allocate List blocks for RxD block callback
295 * @mempoolh: Handle to memory pool
296 * @memblock: Address of this memory block
297 * @memblock_index: Index of this memory block
298 * @dma_object: dma object for this block
299 * @item: Pointer to this item
300 * @index: Index of this item in memory block
301 * @is_last: If this is last item in the block
302 * @userdata: Specific data of user
304 * This function is callback passed to __hal_mempool_create to create memory
307 static vxge_hal_status_e
308 __hal_ring_mempool_item_alloc(
309 vxge_hal_mempool_h mempoolh,
312 vxge_hal_mempool_dma_t *dma_object,
319 __hal_ring_t *ring = (__hal_ring_t *) userdata;
320 __hal_device_t *hldev;
322 vxge_assert((item != NULL) && (ring != NULL));
324 hldev = (__hal_device_t *) ring->channel.devh;
326 vxge_hal_trace_log_pool("==> %s:%s:%d",
327 __FILE__, __func__, __LINE__);
329 vxge_hal_trace_log_pool(
330 "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", "
331 "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", "
332 "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, "
333 "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock,
334 memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
337 /* format rxds array */
338 for (i = 0; i < ring->rxds_per_block; i++) {
342 __hal_ring_rxd_priv_t *rxd_priv;
343 vxge_hal_ring_rxd_1_t *rxdp;
344 u32 memblock_item_idx;
345 u32 dtr_index = item_index * ring->rxds_per_block + i;
347 ring->channel.dtr_arr[dtr_index].dtr =
348 ((u8 *) item) + i * ring->rxd_size;
351 * Note: memblock_item_idx is index of the item within
352 * the memblock. For instance, in case of three RxD-blocks
353 * per memblock this value can be 0, 1 or 2.
355 rxdblock_priv = __hal_mempool_item_priv(
356 (vxge_hal_mempool_t *) mempoolh,
361 rxdp = (vxge_hal_ring_rxd_1_t *)
362 ring->channel.dtr_arr[dtr_index].dtr;
364 uld_priv = ((u8 *) rxdblock_priv + ring->rxd_priv_size * i);
366 (__hal_ring_rxd_priv_t *) ((void *)(((char *) uld_priv) +
367 ring->per_rxd_space));
369 ((vxge_hal_ring_rxd_5_t *) rxdp)->host_control = dtr_index;
371 ring->channel.dtr_arr[dtr_index].uld_priv = (void *)uld_priv;
372 ring->channel.dtr_arr[dtr_index].hal_priv = (void *)rxd_priv;
374 /* pre-format per-RxD Ring's private */
376 rxd_priv->dma_offset = (u8 *) rxdp - (u8 *) memblock;
377 rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset;
378 rxd_priv->dma_handle = dma_object->handle;
379 #if defined(VXGE_DEBUG_ASSERT)
380 rxd_priv->dma_object = dma_object;
382 rxd_priv->db_bytes = ring->rxd_size;
384 if (i == (ring->rxds_per_block - 1)) {
385 rxd_priv->db_bytes +=
386 (((vxge_hal_mempool_t *) mempoolh)->memblock_size -
387 (ring->rxds_per_block * ring->rxd_size));
391 __hal_ring_block_memblock_idx_set((u8 *) item, memblock_index);
393 /* link last one with first one */
394 __hal_ring_rxdblock_link(mempoolh, ring, item_index, 0);
397 if (item_index > 0) {
398 /* link this RxD block with previous one */
399 __hal_ring_rxdblock_link(mempoolh, ring, item_index - 1, item_index);
402 vxge_hal_trace_log_pool("<== %s:%s:%d Result: 0",
403 __FILE__, __func__, __LINE__);
405 return (VXGE_HAL_OK);
409 * __hal_ring_mempool_item_free - Free RxD blockt callback
410 * @mempoolh: Handle to memory pool
411 * @memblock: Address of this memory block
412 * @memblock_index: Index of this memory block
413 * @dma_object: dma object for this block
414 * @item: Pointer to this item
415 * @index: Index of this item in memory block
416 * @is_last: If this is last item in the block
417 * @userdata: Specific data of user
419 * This function is callback passed to __hal_mempool_free to destroy memory
422 static vxge_hal_status_e
423 __hal_ring_mempool_item_free(
424 vxge_hal_mempool_h mempoolh,
427 vxge_hal_mempool_dma_t *dma_object,
433 __hal_ring_t *ring = (__hal_ring_t *) userdata;
434 __hal_device_t *hldev;
436 vxge_assert((item != NULL) && (ring != NULL));
438 hldev = (__hal_device_t *) ring->channel.devh;
440 vxge_hal_trace_log_pool("==> %s:%s:%d",
441 __FILE__, __func__, __LINE__);
443 vxge_hal_trace_log_pool(
444 "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", "
445 "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", "
446 "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, "
447 "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock,
448 memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
451 vxge_hal_trace_log_pool("<== %s:%s:%d Result: 0",
452 __FILE__, __func__, __LINE__);
454 return (VXGE_HAL_OK);
458 * __hal_ring_initial_replenish - Initial replenish of RxDs
460 * @reopen: Flag to denote if it is open or repopen
462 * This function replenishes the RxDs from reserve array to work array
464 static vxge_hal_status_e
465 __hal_ring_initial_replenish(
467 vxge_hal_reopen_e reopen)
471 __hal_device_t *hldev;
472 vxge_hal_status_e status;
474 vxge_assert(ring != NULL);
476 hldev = (__hal_device_t *) ring->channel.devh;
478 vxge_hal_trace_log_ring("==> %s:%s:%d",
479 __FILE__, __func__, __LINE__);
481 vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d",
482 (ptr_t) ring, reopen);
484 while (vxge_hal_ring_rxd_reserve(ring->channel.vph, &rxd, &uld_priv) ==
487 if (ring->rxd_init) {
488 status = ring->rxd_init(ring->channel.vph,
491 VXGE_HAL_RING_RXD_INDEX(rxd),
492 ring->channel.userdata,
494 if (status != VXGE_HAL_OK) {
495 vxge_hal_ring_rxd_free(ring->channel.vph, rxd);
496 vxge_hal_trace_log_ring("<== %s:%s:%d \
498 __FILE__, __func__, __LINE__, status);
503 vxge_hal_ring_rxd_post(ring->channel.vph, rxd);
506 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
507 __FILE__, __func__, __LINE__);
508 return (VXGE_HAL_OK);
512 * __hal_ring_create - Create a Ring
513 * @vpath_handle: Handle returned by virtual path open
514 * @attr: Ring configuration parameters structure
516 * This function creates Ring and initializes it.
521 vxge_hal_vpath_h vpath_handle,
522 vxge_hal_ring_attr_t *attr)
524 vxge_hal_status_e status;
526 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
527 vxge_hal_ring_config_t *config;
528 __hal_device_t *hldev;
530 vxge_assert((vpath_handle != NULL) && (attr != NULL));
532 hldev = (__hal_device_t *) vp->vpath->hldev;
534 vxge_hal_trace_log_ring("==> %s:%s:%d",
535 __FILE__, __func__, __LINE__);
537 vxge_hal_trace_log_ring(
538 "vpath_handle = 0x"VXGE_OS_STXFMT", attr = 0x"VXGE_OS_STXFMT,
539 (ptr_t) vpath_handle, (ptr_t) attr);
541 if ((vpath_handle == NULL) || (attr == NULL)) {
542 vxge_hal_err_log_ring("null pointer passed == > %s : %d",
544 vxge_hal_trace_log_ring("<== %s:%s:%d Result:1",
545 __FILE__, __func__, __LINE__);
546 return (VXGE_HAL_FAIL);
550 &vp->vpath->hldev->header.config.vp_config[vp->vpath->vp_id].ring;
552 config->ring_length = ((config->ring_length +
553 vxge_hal_ring_rxds_per_block_get(config->buffer_mode) - 1) /
554 vxge_hal_ring_rxds_per_block_get(config->buffer_mode)) *
555 vxge_hal_ring_rxds_per_block_get(config->buffer_mode);
557 ring = (__hal_ring_t *) vxge_hal_channel_allocate(
558 (vxge_hal_device_h) vp->vpath->hldev,
560 VXGE_HAL_CHANNEL_TYPE_RING,
566 vxge_hal_err_log_ring("Memory allocation failed == > %s : %d",
568 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
569 __FILE__, __func__, __LINE__,
570 VXGE_HAL_ERR_OUT_OF_MEMORY);
571 return (VXGE_HAL_ERR_OUT_OF_MEMORY);
574 vp->vpath->ringh = (vxge_hal_ring_h) ring;
576 ring->stats = &vp->vpath->sw_stats->ring_stats;
578 ring->config = config;
579 ring->callback = attr->callback;
580 ring->rxd_init = attr->rxd_init;
581 ring->rxd_term = attr->rxd_term;
583 ring->indicate_max_pkts = config->indicate_max_pkts;
584 ring->buffer_mode = config->buffer_mode;
586 #if defined(VXGE_HAL_RX_MULTI_POST)
587 vxge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
588 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
589 vxge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
592 ring->rxd_size = vxge_hal_ring_rxd_size_get(config->buffer_mode);
593 ring->rxd_priv_size =
594 sizeof(__hal_ring_rxd_priv_t) + attr->per_rxd_space;
595 ring->per_rxd_space = attr->per_rxd_space;
597 ring->rxd_priv_size =
598 ((ring->rxd_priv_size + __vxge_os_cacheline_size - 1) /
599 __vxge_os_cacheline_size) * __vxge_os_cacheline_size;
602 * how many RxDs can fit into one block. Depends on configured
605 ring->rxds_per_block =
606 vxge_hal_ring_rxds_per_block_get(config->buffer_mode);
608 /* calculate actual RxD block private size */
609 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
611 ring->rxd_mem_avail =
612 ((__hal_vpath_handle_t *) ring->channel.vph)->vpath->rxd_mem_size;
614 ring->db_byte_count = 0;
616 ring->mempool = vxge_hal_mempool_create(
617 (vxge_hal_device_h) vp->vpath->hldev,
618 VXGE_OS_HOST_PAGE_SIZE,
619 VXGE_OS_HOST_PAGE_SIZE,
620 ring->rxdblock_priv_size,
621 ring->config->ring_length / ring->rxds_per_block,
622 ring->config->ring_length / ring->rxds_per_block,
623 __hal_ring_mempool_item_alloc,
624 __hal_ring_mempool_item_free,
627 if (ring->mempool == NULL) {
628 __hal_ring_delete(vpath_handle);
629 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
630 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY);
631 return (VXGE_HAL_ERR_OUT_OF_MEMORY);
634 status = vxge_hal_channel_initialize(&ring->channel);
635 if (status != VXGE_HAL_OK) {
636 __hal_ring_delete(vpath_handle);
637 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
638 __FILE__, __func__, __LINE__, status);
645 * Specifying rxd_init callback means two things:
646 * 1) rxds need to be initialized by ULD at channel-open time;
647 * 2) rxds need to be posted at channel-open time
648 * (that's what the initial_replenish() below does)
649 * Currently we don't have a case when the 1) is done without the 2).
651 if (ring->rxd_init) {
652 if ((status = __hal_ring_initial_replenish(
654 VXGE_HAL_OPEN_NORMAL))
656 __hal_ring_delete(vpath_handle);
657 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
658 __FILE__, __func__, __LINE__, status);
664 * initial replenish will increment the counter in its post() routine,
665 * we have to reset it
667 ring->stats->common_stats.usage_cnt = 0;
669 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
670 __FILE__, __func__, __LINE__);
671 return (VXGE_HAL_OK);
675 * __hal_ring_abort - Returns the RxD
676 * @ringh: Ring to be reset
677 * @reopen: See vxge_hal_reopen_e {}.
679 * This function terminates the RxDs of ring
683 vxge_hal_ring_h ringh,
684 vxge_hal_reopen_e reopen)
689 __hal_device_t *hldev;
690 __hal_ring_t *ring = (__hal_ring_t *) ringh;
692 vxge_assert(ringh != NULL);
694 hldev = (__hal_device_t *) ring->channel.devh;
696 vxge_hal_trace_log_ring("==> %s:%s:%d",
697 __FILE__, __func__, __LINE__);
699 vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d",
700 (ptr_t) ringh, reopen);
702 if (ring->rxd_term) {
703 __hal_channel_for_each_dtr(&ring->channel, rxdh, i) {
704 if (!__hal_channel_is_posted_dtr(&ring->channel, i)) {
705 ring->rxd_term(ring->channel.vph, rxdh,
706 VXGE_HAL_RING_ULD_PRIV(ring, rxdh),
707 VXGE_HAL_RXD_STATE_FREED,
708 ring->channel.userdata,
715 __hal_channel_dtr_try_complete(&ring->channel, &rxdh);
719 __hal_channel_dtr_complete(&ring->channel);
720 if (ring->rxd_term) {
721 ring->rxd_term(ring->channel.vph, rxdh,
722 VXGE_HAL_RING_ULD_PRIV(ring, rxdh),
723 VXGE_HAL_RXD_STATE_POSTED,
724 ring->channel.userdata,
727 __hal_channel_dtr_free(&ring->channel,
728 VXGE_HAL_RING_RXD_INDEX(rxdh));
731 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
732 __FILE__, __func__, __LINE__);
736 * __hal_ring_reset - Resets the ring
737 * @ringh: Ring to be reset
739 * This function resets the ring during vpath reset operation
743 vxge_hal_ring_h ringh)
745 __hal_ring_t *ring = (__hal_ring_t *) ringh;
746 __hal_device_t *hldev;
747 vxge_hal_status_e status;
748 __hal_vpath_handle_t *vph = (__hal_vpath_handle_t *) ring->channel.vph;
750 vxge_assert(ringh != NULL);
752 hldev = (__hal_device_t *) ring->channel.devh;
754 vxge_hal_trace_log_ring("==> %s:%s:%d",
755 __FILE__, __func__, __LINE__);
757 vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT,
760 __hal_ring_abort(ringh, VXGE_HAL_RESET_ONLY);
762 status = __hal_channel_reset(&ring->channel);
764 if (status != VXGE_HAL_OK) {
766 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
767 __FILE__, __func__, __LINE__, status);
771 ring->rxd_mem_avail = vph->vpath->rxd_mem_size;
772 ring->db_byte_count = 0;
775 if (ring->rxd_init) {
776 if ((status = __hal_ring_initial_replenish(
778 VXGE_HAL_RESET_ONLY))
780 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
781 __FILE__, __func__, __LINE__, status);
786 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
787 __FILE__, __func__, __LINE__);
789 return (VXGE_HAL_OK);
793 * __hal_ring_delete - Removes the ring
794 * @vpath_handle: Virtual path handle to which this queue belongs
796 * This function freeup the memory pool and removes the ring
800 vxge_hal_vpath_h vpath_handle)
802 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
803 __hal_device_t *hldev;
806 vxge_assert(vpath_handle != NULL);
808 hldev = (__hal_device_t *) vp->vpath->hldev;
810 vxge_hal_trace_log_ring("==> %s:%s:%d",
811 __FILE__, __func__, __LINE__);
813 vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
814 (ptr_t) vpath_handle);
816 ring = (__hal_ring_t *) vp->vpath->ringh;
818 vxge_assert(ring != NULL);
820 vxge_assert(ring->channel.pdev);
822 __hal_ring_abort(vp->vpath->ringh, VXGE_HAL_OPEN_NORMAL);
826 vxge_hal_mempool_destroy(ring->mempool);
829 vxge_hal_channel_terminate(&ring->channel);
831 #if defined(VXGE_HAL_RX_MULTI_POST)
832 vxge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
833 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
834 vxge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
837 vxge_hal_channel_free(&ring->channel);
839 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
840 __FILE__, __func__, __LINE__);
845 * __hal_ring_frame_length_set - Set the maximum frame length of recv frames.
846 * @vpath: virtual Path
847 * @new_frmlen: New frame length
850 * Returns: VXGE_HAL_OK - success.
851 * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
855 __hal_ring_frame_length_set(
856 __hal_virtualpath_t *vpath,
860 __hal_device_t *hldev;
862 vxge_assert(vpath != NULL);
864 hldev = (__hal_device_t *) vpath->hldev;
866 vxge_hal_trace_log_ring("==> %s:%s:%d",
867 __FILE__, __func__, __LINE__);
869 vxge_hal_trace_log_ring(
870 "vpath = 0x"VXGE_OS_STXFMT", new_frmlen = %d",
871 (ptr_t) vpath, new_frmlen);
873 if (vpath->vp_open == VXGE_HAL_VP_NOT_OPEN) {
875 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
876 __FILE__, __func__, __LINE__,
877 VXGE_HAL_ERR_VPATH_NOT_OPEN);
878 return (VXGE_HAL_ERR_VPATH_NOT_OPEN);
882 val64 = vxge_os_pio_mem_read64(
883 vpath->hldev->header.pdev,
884 vpath->hldev->header.regh0,
885 &vpath->vp_reg->rxmac_vcfg0);
887 val64 &= ~VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
889 if (vpath->vp_config->ring.max_frm_len !=
890 VXGE_HAL_MAX_RING_FRM_LEN_USE_MTU) {
892 val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
893 vpath->vp_config->ring.max_frm_len +
894 VXGE_HAL_MAC_HEADER_MAX_SIZE);
898 val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_frmlen +
899 VXGE_HAL_MAC_HEADER_MAX_SIZE);
902 vxge_os_pio_mem_write64(
903 vpath->hldev->header.pdev,
904 vpath->hldev->header.regh0,
906 &vpath->vp_reg->rxmac_vcfg0);
908 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
909 __FILE__, __func__, __LINE__);
911 return (VXGE_HAL_OK);
915 * vxge_hal_ring_rxd_reserve - Reserve ring descriptor.
916 * @vpath_handle: virtual Path handle.
917 * @rxdh: Reserved descriptor. On success HAL fills this "out" parameter
918 * with a valid handle.
919 * @rxd_priv: Buffer to return pointer to per rxd private space
921 * Reserve Rx descriptor for the subsequent filling-in (by upper layer
922 * driver (ULD)) and posting on the corresponding channel (@channelh)
923 * via vxge_hal_ring_rxd_post().
925 * Returns: VXGE_HAL_OK - success.
926 * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
930 vxge_hal_ring_rxd_reserve(
931 vxge_hal_vpath_h vpath_handle,
932 vxge_hal_rxd_h * rxdh,
935 vxge_hal_status_e status;
936 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
939 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
940 __hal_device_t *hldev;
943 vxge_assert((vpath_handle != NULL) && (rxdh != NULL) &&
946 hldev = (__hal_device_t *) vp->vpath->hldev;
948 vxge_hal_trace_log_ring("==> %s:%s:%d",
949 __FILE__, __func__, __LINE__);
951 vxge_hal_trace_log_ring(
952 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", "
953 "rxd_priv = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle,
954 (ptr_t) rxdh, (ptr_t) rxd_priv);
956 ring = (__hal_ring_t *) vp->vpath->ringh;
958 vxge_assert(ring != NULL);
960 #if defined(VXGE_HAL_RX_MULTI_POST)
961 vxge_os_spin_lock(&ring->channel.post_lock);
962 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
963 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
966 status = __hal_channel_dtr_reserve(&ring->channel, rxdh);
968 #if defined(VXGE_HAL_RX_MULTI_POST)
969 vxge_os_spin_unlock(&ring->channel.post_lock);
970 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
971 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
974 if (status == VXGE_HAL_OK) {
975 vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *)*rxdh;
977 /* instead of memset: reset this RxD */
978 rxdp->control_0 = rxdp->control_1 = 0;
980 *rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp);
982 #if defined(VXGE_OS_MEMORY_CHECK)
983 VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->allocated = 1;
987 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
988 __FILE__, __func__, __LINE__);
993 * vxge_hal_ring_rxd_pre_post - Prepare rxd and post
994 * @vpath_handle: virtual Path handle.
995 * @rxdh: Descriptor handle.
997 * This routine prepares a rxd and posts
1000 vxge_hal_ring_rxd_pre_post(
1001 vxge_hal_vpath_h vpath_handle,
1002 vxge_hal_rxd_h rxdh)
1005 #if defined(VXGE_DEBUG_ASSERT)
1006 vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1010 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1011 unsigned long flags;
1014 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1015 __hal_device_t *hldev;
1018 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1020 hldev = (__hal_device_t *) vp->vpath->hldev;
1022 vxge_hal_trace_log_ring("==> %s:%s:%d",
1023 __FILE__, __func__, __LINE__);
1025 vxge_hal_trace_log_ring(
1026 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1027 (ptr_t) vpath_handle, (ptr_t) rxdh);
1029 ring = (__hal_ring_t *) vp->vpath->ringh;
1031 vxge_assert(ring != NULL);
1033 #if defined(VXGE_DEBUG_ASSERT)
1034 /* make sure device overwrites the (illegal) t_code on completion */
1036 VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED);
1039 #if defined(VXGE_HAL_RX_MULTI_POST)
1040 vxge_os_spin_lock(&ring->channel.post_lock);
1041 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1042 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1045 #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER)
1047 if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) {
1048 vxge_hal_rxd_h prev_rxdh;
1049 __hal_ring_rxd_priv_t *rxdp_priv;
1052 rxdp_priv = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1054 if (VXGE_HAL_RING_RXD_INDEX(rxdp) == 0)
1055 index = ring->channel.length;
1057 index = VXGE_HAL_RING_RXD_INDEX(rxdp) - 1;
1059 prev_rxdh = ring->channel.dtr_arr[index].dtr;
1061 if (prev_rxdh != NULL &&
1062 (rxdp_priv->dma_offset & (~0xFFF)) !=
1063 rxdp_priv->dma_offset) {
1064 vxge_assert((char *) prev_rxdh +
1065 ring->rxd_size == rxdh);
1071 __hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1073 ring->db_byte_count +=
1074 VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->db_bytes;
1076 #if defined(VXGE_HAL_RX_MULTI_POST)
1077 vxge_os_spin_unlock(&ring->channel.post_lock);
1078 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1079 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1082 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1083 __FILE__, __func__, __LINE__);
1087 * vxge_hal_ring_rxd_post_post - Process rxd after post.
1088 * @vpath_handle: virtual Path handle.
1089 * @rxdh: Descriptor handle.
1091 * Processes rxd after post
1094 vxge_hal_ring_rxd_post_post(
1095 vxge_hal_vpath_h vpath_handle,
1096 vxge_hal_rxd_h rxdh)
1098 vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1100 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1101 __hal_ring_rxd_priv_t *priv;
1104 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1105 __hal_device_t *hldev;
1108 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1110 hldev = (__hal_device_t *) vp->vpath->hldev;
1112 vxge_hal_trace_log_ring("==> %s:%s:%d",
1113 __FILE__, __func__, __LINE__);
1115 vxge_hal_trace_log_ring(
1116 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1117 (ptr_t) vpath_handle, (ptr_t) rxdh);
1119 ring = (__hal_ring_t *) vp->vpath->ringh;
1121 vxge_assert(ring != NULL);
1124 rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1126 rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER;
1128 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1129 priv = __hal_ring_rxd_priv(ring, rxdp);
1130 vxge_os_dma_sync(ring->channel.pdev,
1135 VXGE_OS_DMA_DIR_TODEVICE);
1137 if (ring->stats->common_stats.usage_cnt > 0)
1138 ring->stats->common_stats.usage_cnt--;
1140 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1141 __FILE__, __func__, __LINE__);
1145 * vxge_hal_ring_rxd_post - Post descriptor on the ring.
1146 * @vpath_handle: virtual Path handle.
1147 * @rxdh: Descriptor obtained via vxge_hal_ring_rxd_reserve().
1149 * Post descriptor on the ring.
1150 * Prior to posting the descriptor should be filled in accordance with
1151 * Host/X3100 interface specification for a given service (LL, etc.).
1155 vxge_hal_ring_rxd_post(
1156 vxge_hal_vpath_h vpath_handle,
1157 vxge_hal_rxd_h rxdh)
1159 vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1161 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1162 unsigned long flags;
1165 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1166 __hal_device_t *hldev;
1169 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1171 hldev = (__hal_device_t *) vp->vpath->hldev;
1173 vxge_hal_trace_log_ring("==> %s:%s:%d",
1174 __FILE__, __func__, __LINE__);
1176 vxge_hal_trace_log_ring(
1177 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1178 (ptr_t) vpath_handle, (ptr_t) rxdh);
1180 ring = (__hal_ring_t *) vp->vpath->ringh;
1182 vxge_assert(ring != NULL);
1184 /* Based on Titan HW bugzilla # 3039, we need to reset the tcode */
1185 rxdp->control_0 = 0;
1187 #if defined(VXGE_DEBUG_ASSERT)
1188 /* make sure device overwrites the (illegal) t_code on completion */
1190 VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED);
1193 rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER;
1194 rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1196 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1198 __hal_ring_rxd_priv_t *rxdp_temp1;
1199 rxdp_temp1 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1200 vxge_os_dma_sync(ring->channel.pdev,
1201 rxdp_temp1->dma_handle,
1202 rxdp_temp1->dma_addr,
1203 rxdp_temp1->dma_offset,
1205 VXGE_OS_DMA_DIR_TODEVICE);
1209 #if defined(VXGE_HAL_RX_MULTI_POST)
1210 vxge_os_spin_lock(&ring->channel.post_lock);
1211 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1212 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1215 #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER)
1217 if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) {
1219 vxge_hal_rxd_h prev_rxdh;
1220 __hal_ring_rxd_priv_t *rxdp_temp2;
1222 rxdp_temp2 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1224 ring->channel.dtr_arr[VXGE_HAL_RING_RXD_INDEX(rxdp) - 1].dtr;
1226 if (prev_rxdh != NULL &&
1227 (rxdp_temp2->dma_offset & (~0xFFF)) != rxdp_temp2->dma_offset)
1228 vxge_assert((char *) prev_rxdh + ring->rxd_size == rxdh);
1233 __hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1235 ring->db_byte_count +=
1236 VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->db_bytes;
1238 #if defined(VXGE_HAL_RX_MULTI_POST)
1239 vxge_os_spin_unlock(&ring->channel.post_lock);
1240 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1241 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1244 if (ring->stats->common_stats.usage_cnt > 0)
1245 ring->stats->common_stats.usage_cnt--;
1247 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1248 __FILE__, __func__, __LINE__);
1252 * vxge_hal_ring_rxd_post_post_wmb - Process rxd after post with memory barrier
1253 * @vpath_handle: virtual Path handle.
1254 * @rxdh: Descriptor handle.
1256 * Processes rxd after post with memory barrier.
1259 vxge_hal_ring_rxd_post_post_wmb(
1260 vxge_hal_vpath_h vpath_handle,
1261 vxge_hal_rxd_h rxdh)
1263 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1264 __hal_device_t *hldev;
1266 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1268 hldev = (__hal_device_t *) vp->vpath->hldev;
1270 vxge_hal_trace_log_ring("==> %s:%s:%d",
1271 __FILE__, __func__, __LINE__);
1273 vxge_hal_trace_log_ring(
1274 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1275 (ptr_t) vpath_handle, (ptr_t) rxdh);
1277 /* Do memory barrier before changing the ownership */
1280 vxge_hal_ring_rxd_post_post(vpath_handle, rxdh);
1282 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1283 __FILE__, __func__, __LINE__);
1287 * vxge_hal_ring_rxd_post_post_db - Post Doorbell after posting the rxd(s).
1288 * @vpath_handle: virtual Path handle.
1290 * Post Doorbell after posting the rxd(s).
1293 vxge_hal_ring_rxd_post_post_db(
1294 vxge_hal_vpath_h vpath_handle)
1296 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1297 __hal_device_t *hldev;
1300 vxge_assert(vpath_handle != NULL);
1302 hldev = (__hal_device_t *) vp->vpath->hldev;
1304 ring = (__hal_ring_t *) vp->vpath->ringh;
1306 vxge_hal_trace_log_ring("==> %s:%s:%d",
1307 __FILE__, __func__, __LINE__);
1309 vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
1310 (ptr_t) vpath_handle);
1312 #if defined(VXGE_HAL_RX_MULTI_POST)
1313 vxge_os_spin_lock(&ring->channel.post_lock);
1314 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1315 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1318 if (ring->db_byte_count <= ring->rxd_mem_avail) {
1319 __hal_rxd_db_post(vpath_handle, ring->db_byte_count);
1320 ring->rxd_mem_avail -= ring->db_byte_count;
1321 ring->db_byte_count = 0;
1323 __hal_rxd_db_post(vpath_handle, ring->rxd_mem_avail);
1324 ring->db_byte_count -= ring->rxd_mem_avail;
1325 ring->rxd_mem_avail = 0;
1328 #if defined(VXGE_HAL_RX_MULTI_POST)
1329 vxge_os_spin_unlock(&ring->channel.post_lock);
1330 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1331 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1334 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1335 __FILE__, __func__, __LINE__);
1339 * vxge_hal_ring_is_next_rxd_completed - Check if the next rxd is completed
1340 * @vpath_handle: Virtual Path handle.
1342 * Checks if the _next_ completed descriptor is in host memory
1344 * Returns: VXGE_HAL_OK - success.
1345 * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1346 * are currently available for processing.
1349 vxge_hal_ring_is_next_rxd_completed(
1350 vxge_hal_vpath_h vpath_handle)
1353 vxge_hal_rxd_h rxdh;
1354 vxge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
1355 __hal_device_t *hldev;
1356 vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1357 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1359 vxge_assert(vpath_handle != NULL);
1361 hldev = (__hal_device_t *) vp->vpath->hldev;
1363 vxge_hal_trace_log_ring("==> %s:%s:%d",
1364 __FILE__, __func__, __LINE__);
1366 vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
1367 (ptr_t) vpath_handle);
1369 ring = (__hal_ring_t *) vp->vpath->ringh;
1371 vxge_assert(ring != NULL);
1373 #if defined(VXGE_HAL_RX_MULTI_POST)
1374 vxge_os_spin_lock(&ring->channel.post_lock);
1375 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1376 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1379 __hal_channel_dtr_try_complete(&ring->channel, &rxdh);
1381 rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1385 /* check whether it is not the end */
1386 if ((!(rxdp->control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER)) &&
1387 (!(rxdp->control_1 &
1388 VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER))) {
1390 status = VXGE_HAL_OK;
1394 #if defined(VXGE_HAL_RX_MULTI_POST)
1395 vxge_os_spin_unlock(&ring->channel.post_lock);
1396 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1397 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1400 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
1401 __FILE__, __func__, __LINE__, status);
1406 * vxge_hal_ring_rxd_next_completed - Get the _next_ completed descriptor.
1407 * @channelh: Channel handle.
1408 * @rxdh: Descriptor handle. Returned by HAL.
1409 * @rxd_priv: Buffer to return a pointer to the per rxd space allocated
1410 * @t_code: Transfer code, as per X3100 User Guide,
1411 * Receive Descriptor Format. Returned by HAL.
1413 * Retrieve the _next_ completed descriptor.
1414 * HAL uses ring callback (*vxge_hal_ring_callback_f) to notifiy
1415 * upper-layer driver (ULD) of new completed descriptors. After that
1416 * the ULD can use vxge_hal_ring_rxd_next_completed to retrieve the rest
1417 * completions (the very first completion is passed by HAL via
1418 * vxge_hal_ring_callback_f).
1420 * Implementation-wise, the upper-layer driver is free to call
1421 * vxge_hal_ring_rxd_next_completed either immediately from inside the
1422 * ring callback, or in a deferred fashion and separate (from HAL)
1425 * Non-zero @t_code means failure to fill-in receive buffer(s)
1426 * of the descriptor.
1427 * For instance, parity error detected during the data transfer.
1428 * In this case X3100 will complete the descriptor and indicate
1429 * for the host that the received data is not to be used.
1430 * For details please refer to X3100 User Guide.
1432 * Returns: VXGE_HAL_OK - success.
1433 * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1434 * are currently available for processing.
1436 * See also: vxge_hal_ring_callback_f {},
1437 * vxge_hal_fifo_rxd_next_completed(), vxge_hal_status_e {}.
1440 vxge_hal_ring_rxd_next_completed(
1441 vxge_hal_vpath_h vpath_handle,
1442 vxge_hal_rxd_h *rxdh,
1447 vxge_hal_ring_rxd_5_t *rxdp; /* doesn't matter 1, 3 or 5... */
1448 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1449 __hal_ring_rxd_priv_t *priv;
1451 __hal_device_t *hldev;
1452 vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1453 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1454 u64 own, control_0, control_1;
1456 vxge_assert((vpath_handle != NULL) && (rxdh != NULL) &&
1457 (rxd_priv != NULL) && (t_code != NULL));
1459 hldev = (__hal_device_t *) vp->vpath->hldev;
1461 vxge_hal_trace_log_ring("==> %s:%s:%d",
1462 __FILE__, __func__, __LINE__);
1464 vxge_hal_trace_log_ring(
1465 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", "
1466 "rxd_priv = 0x"VXGE_OS_STXFMT", t_code = 0x"VXGE_OS_STXFMT,
1467 (ptr_t) vpath_handle, (ptr_t) rxdh, (ptr_t) rxd_priv,
1470 ring = (__hal_ring_t *) vp->vpath->ringh;
1472 vxge_assert(ring != NULL);
1477 #if defined(VXGE_HAL_RX_MULTI_POST)
1478 vxge_os_spin_lock(&ring->channel.post_lock);
1479 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1480 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1483 __hal_channel_dtr_try_complete(&ring->channel, rxdh);
1485 rxdp = (vxge_hal_ring_rxd_5_t *)*rxdh;
1488 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1490 * Note: 24 bytes at most means:
1491 * - Control_3 in case of 5-buffer mode
1492 * - Control_1 and Control_2
1494 * This is the only length needs to be invalidated
1497 priv = __hal_ring_rxd_priv(ring, rxdp);
1498 vxge_os_dma_sync(ring->channel.pdev,
1503 VXGE_OS_DMA_DIR_FROMDEVICE);
1505 *t_code = (u8) VXGE_HAL_RING_RXD_T_CODE_GET(rxdp->control_0);
1507 control_0 = rxdp->control_0;
1508 control_1 = rxdp->control_1;
1509 own = control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1511 /* check whether it is not the end */
1512 if ((!own && !(control_1 & VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER)) ||
1513 (*t_code == VXGE_HAL_RING_RXD_T_CODE_FRM_DROP)) {
1515 #ifndef VXGE_HAL_IRQ_POLLING
1516 if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
1518 * reset it. since we don't want to return
1519 * garbage to the ULD
1522 status = VXGE_HAL_COMPLETIONS_REMAIN;
1525 __hal_channel_dtr_complete(&ring->channel);
1527 *rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp);
1529 ring->rxd_mem_avail +=
1530 (VXGE_HAL_RING_HAL_PRIV(ring, rxdp))->db_bytes;
1532 ring->stats->common_stats.usage_cnt++;
1533 if (ring->stats->common_stats.usage_max <
1534 ring->stats->common_stats.usage_cnt)
1535 ring->stats->common_stats.usage_max =
1536 ring->stats->common_stats.usage_cnt;
1538 switch (ring->buffer_mode) {
1539 case VXGE_HAL_RING_RXD_BUFFER_MODE_1:
1540 ring->channel.poll_bytes +=
1541 (u32) VXGE_HAL_RING_RXD_1_BUFFER0_SIZE_GET(
1544 case VXGE_HAL_RING_RXD_BUFFER_MODE_3:
1545 ring->channel.poll_bytes +=
1546 (u32) VXGE_HAL_RING_RXD_3_BUFFER0_SIZE_GET(
1548 (u32) VXGE_HAL_RING_RXD_3_BUFFER1_SIZE_GET(
1550 (u32) VXGE_HAL_RING_RXD_3_BUFFER2_SIZE_GET(
1553 case VXGE_HAL_RING_RXD_BUFFER_MODE_5:
1554 ring->channel.poll_bytes +=
1555 (u32) VXGE_HAL_RING_RXD_5_BUFFER0_SIZE_GET(
1557 (u32) VXGE_HAL_RING_RXD_5_BUFFER1_SIZE_GET(
1559 (u32) VXGE_HAL_RING_RXD_5_BUFFER2_SIZE_GET(
1561 (u32) VXGE_HAL_RING_RXD_5_BUFFER3_SIZE_GET(
1563 (u32) VXGE_HAL_RING_RXD_5_BUFFER4_SIZE_GET(
1568 status = VXGE_HAL_OK;
1569 #ifndef VXGE_HAL_IRQ_POLLING
1575 #if defined(VXGE_HAL_RX_MULTI_POST)
1576 vxge_os_spin_unlock(&ring->channel.post_lock);
1577 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1578 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1581 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
1582 __FILE__, __func__, __LINE__, status);
1588 * vxge_hal_ring_handle_tcode - Handle transfer code.
1589 * @vpath_handle: Virtual Path handle.
1590 * @rxdh: Descriptor handle.
1591 * @t_code: One of the enumerated (and documented in the X3100 user guide)
1594 * Handle descriptor's transfer code. The latter comes with each completed
1597 * Returns: one of the vxge_hal_status_e {} enumerated types.
1598 * VXGE_HAL_OK - for success.
1599 * VXGE_HAL_ERR_CRITICAL - when encounters critical error.
1602 vxge_hal_ring_handle_tcode(
1603 vxge_hal_vpath_h vpath_handle,
1604 vxge_hal_rxd_h rxdh,
1607 __hal_device_t *hldev;
1608 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1610 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1612 hldev = (__hal_device_t *) vp->vpath->hldev;
1614 vxge_hal_trace_log_ring("==> %s:%s:%d",
1615 __FILE__, __func__, __LINE__);
1617 vxge_hal_trace_log_ring(
1618 "vpath_handle = 0x"VXGE_OS_STXFMT", "
1619 "rxdh = 0x"VXGE_OS_STXFMT", t_code = 0x%d",
1620 (ptr_t) vpath_handle, (ptr_t) rxdh, t_code);
1624 /* 0x0: Transfer ok. */
1628 * 0x1: Layer 3 checksum presentation
1629 * configuration mismatch.
1634 * 0x2: Layer 4 checksum presentation
1635 * configuration mismatch.
1640 * 0x3: Layer 3 and Layer 4 checksum
1641 * presentation configuration mismatch.
1645 /* 0x4: Reserved. */
1649 * 0x5: Layer 3 error unparseable packet,
1650 * such as unknown IPv6 header.
1655 * 0x6: Layer 2 error frame integrity
1656 * error, such as FCS or ECC).
1661 * 0x7: Buffer size error the RxD buffer(s)
1662 * were not appropriately sized and
1663 * data loss occurred.
1667 /* 0x8: Internal ECC error RxD corrupted. */
1668 __hal_device_handle_error(vp->vpath->hldev,
1669 vp->vpath->vp_id, VXGE_HAL_EVENT_ECCERR);
1673 * 0x9: Benign overflow the contents of
1674 * Segment1 exceeded the capacity of
1675 * Buffer1 and the remainder was placed
1676 * in Buffer2. Segment2 now starts in
1677 * Buffer3. No data loss or errors occurred.
1682 * 0xA: Buffer size 0 one of the RxDs
1683 * assigned buffers has a size of 0 bytes.
1687 /* 0xB: Reserved. */
1691 * 0xC: Frame dropped either due to
1692 * VPath Reset or because of a VPIN mismatch.
1696 /* 0xD: Reserved. */
1699 /* 0xE: Reserved. */
1703 * 0xF: Multiple errors more than one
1704 * transfer code condition occurred.
1708 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
1709 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_TCODE);
1710 return (VXGE_HAL_ERR_INVALID_TCODE);
1713 vp->vpath->sw_stats->ring_stats.rxd_t_code_err_cnt[t_code]++;
1715 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
1716 __FILE__, __func__, __LINE__, VXGE_HAL_OK);
1717 return (VXGE_HAL_OK);
1722 * vxge_hal_ring_rxd_private_get - Get ULD private per-descriptor data.
1723 * @vpath_handle: Virtual Path handle.
1724 * @rxdh: Descriptor handle.
1726 * Returns: private ULD info associated with the descriptor.
1727 * ULD requests per-descriptor space via vxge_hal_ring_attr.
1731 vxge_hal_ring_rxd_private_get(
1732 vxge_hal_vpath_h vpath_handle,
1733 vxge_hal_rxd_h rxdh)
1735 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1737 return (VXGE_HAL_RING_ULD_PRIV(
1738 ((__hal_ring_t *) vp->vpath->ringh), rxdh));
1743 * vxge_hal_ring_rxd_free - Free descriptor.
1744 * @vpath_handle: Virtual Path handle.
1745 * @rxdh: Descriptor handle.
1747 * Free the reserved descriptor. This operation is "symmetrical" to
1748 * vxge_hal_ring_rxd_reserve. The "free-ing" completes the descriptor's
1751 * After free-ing (see vxge_hal_ring_rxd_free()) the descriptor again can
1754 * - reserved (vxge_hal_ring_rxd_reserve);
1756 * - posted (vxge_hal_ring_rxd_post);
1758 * - completed (vxge_hal_ring_rxd_next_completed);
1760 * - and recycled again (vxge_hal_ring_rxd_free).
1762 * For alternative state transitions and more details please refer to
1767 vxge_hal_ring_rxd_free(
1768 vxge_hal_vpath_h vpath_handle,
1769 vxge_hal_rxd_h rxdh)
1771 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1772 unsigned long flags;
1776 __hal_device_t *hldev;
1777 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1779 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1781 hldev = (__hal_device_t *) vp->vpath->hldev;
1783 vxge_hal_trace_log_ring("==> %s:%s:%d",
1784 __FILE__, __func__, __LINE__);
1786 vxge_hal_trace_log_ring(
1787 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1788 (ptr_t) vpath_handle, (ptr_t) rxdh);
1790 ring = (__hal_ring_t *) vp->vpath->ringh;
1792 vxge_assert(ring != NULL);
1794 #if defined(VXGE_HAL_RX_MULTI_POST)
1795 vxge_os_spin_lock(&ring->channel.post_lock);
1796 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1797 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1800 __hal_channel_dtr_free(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1801 #if defined(VXGE_OS_MEMORY_CHECK)
1802 VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->allocated = 0;
1805 #if defined(VXGE_HAL_RX_MULTI_POST)
1806 vxge_os_spin_unlock(&ring->channel.post_lock);
1807 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1808 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1811 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1812 __FILE__, __func__, __LINE__);