2 * Copyright(c) 2002-2011 Exar Corp.
5 * Redistribution and use in source and binary forms, with or without
6 * modification are permitted provided the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Exar Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include <dev/vxge/vxgehal/vxgehal.h>
36 * __hal_ring_block_memblock_idx - Return the memblock index
37 * @block: Virtual address of memory block
39 * This function returns the index of memory block
42 __hal_ring_block_memblock_idx(
43 vxge_hal_ring_block_t block)
45 return (u32)*((u64 *) ((void *)((u8 *) block +
46 VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET)));
50 * __hal_ring_block_memblock_idx_set - Sets the memblock index
51 * @block: Virtual address of memory block
52 * @memblock_idx: Index of memory block
54 * This function sets index to a memory block
57 __hal_ring_block_memblock_idx_set(
58 vxge_hal_ring_block_t block,
61 *((u64 *) ((void *)((u8 *) block +
62 VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET))) = memblock_idx;
67 * __hal_ring_block_next_pointer - Returns the dma address of next block
70 * Returns the dma address of next block stored in the RxD block
72 static inline dma_addr_t
74 __hal_ring_block_next_pointer(
75 vxge_hal_ring_block_t *block)
77 return (dma_addr_t)*((u64 *) ((void *)((u8 *) block +
78 VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)));
83 * __hal_ring_block_next_pointer_set - Sets the next block pointer in RxD block
85 * @dma_next: dma address of next block
87 * Sets the next block pointer in RxD block
90 __hal_ring_block_next_pointer_set(
91 vxge_hal_ring_block_t *block,
94 *((u64 *) ((void *)((u8 *) block +
95 VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET))) = dma_next;
99 * __hal_ring_first_block_address_get - Returns the dma address of the
101 * @ringh: Handle to the ring
103 * Returns the dma address of the first RxD block
106 __hal_ring_first_block_address_get(
107 vxge_hal_ring_h ringh)
109 __hal_ring_t *ring = (__hal_ring_t *) ringh;
110 vxge_hal_mempool_dma_t *dma_object;
112 dma_object = __hal_mempool_memblock_dma(ring->mempool, 0);
114 vxge_assert(dma_object != NULL);
116 return (dma_object->addr);
120 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
122 * __hal_ring_item_dma_offset - Return the dma offset of an item
123 * @mempoolh: Handle to the memory pool of the ring
124 * @item: Item for which to get the dma offset
126 * This function returns the dma offset of a given item
129 __hal_ring_item_dma_offset(
130 vxge_hal_mempool_h mempoolh,
135 vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh;
136 __hal_device_t *hldev;
138 vxge_assert((mempoolh != NULL) && (item != NULL) &&
139 (dma_handle != NULL));
141 hldev = (__hal_device_t *) mempool->devh;
143 vxge_hal_trace_log_ring("==> %s:%s:%d",
144 __FILE__, __func__, __LINE__);
146 vxge_hal_trace_log_ring(
147 "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT,
148 (ptr_t) mempoolh, (ptr_t) item);
150 /* get owner memblock index */
151 memblock_idx = __hal_ring_block_memblock_idx(item);
153 /* get owner memblock by memblock index */
154 memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
156 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
157 __FILE__, __func__, __LINE__);
159 return ((u8 *) item - (u8 *) memblock);
164 * __hal_ring_item_dma_addr - Return the dma address of an item
165 * @mempoolh: Handle to the memory pool of the ring
166 * @item: Item for which to get the dma offset
167 * @dma_handle: dma handle
169 * This function returns the dma address of a given item
172 __hal_ring_item_dma_addr(
173 vxge_hal_mempool_h mempoolh,
175 pci_dma_h *dma_handle)
179 vxge_hal_mempool_dma_t *memblock_dma_object;
180 vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh;
181 __hal_device_t *hldev;
182 ptrdiff_t dma_item_offset;
184 vxge_assert((mempoolh != NULL) && (item != NULL) &&
185 (dma_handle != NULL));
187 hldev = (__hal_device_t *) mempool->devh;
189 vxge_hal_trace_log_ring("==> %s:%s:%d",
190 __FILE__, __func__, __LINE__);
192 vxge_hal_trace_log_ring(
193 "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT", "
194 "dma_handle = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh,
195 (ptr_t) item, (ptr_t) dma_handle);
197 /* get owner memblock index */
198 memblock_idx = __hal_ring_block_memblock_idx((u8 *) item);
200 /* get owner memblock by memblock index */
201 memblock = __hal_mempool_memblock(
202 (vxge_hal_mempool_t *) mempoolh, memblock_idx);
204 /* get memblock DMA object by memblock index */
205 memblock_dma_object = __hal_mempool_memblock_dma(
206 (vxge_hal_mempool_t *) mempoolh, memblock_idx);
208 /* calculate offset in the memblock of this item */
210 dma_item_offset = (u8 *) item - (u8 *) memblock;
212 *dma_handle = memblock_dma_object->handle;
214 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
215 __FILE__, __func__, __LINE__);
217 return (memblock_dma_object->addr + dma_item_offset);
221 * __hal_ring_rxdblock_link - Link the RxD blocks
222 * @mempoolh: Handle to the memory pool of the ring
224 * @from: RxD block from which to link
225 * @to: RxD block to which to link to
227 * This function returns the dma address of a given item
230 __hal_ring_rxdblock_link(
231 vxge_hal_mempool_h mempoolh,
236 vxge_hal_ring_block_t *to_item, *from_item;
237 dma_addr_t to_dma, from_dma;
238 pci_dma_h to_dma_handle, from_dma_handle;
239 __hal_device_t *hldev;
241 vxge_assert((mempoolh != NULL) && (ring != NULL));
243 hldev = (__hal_device_t *) ring->channel.devh;
245 vxge_hal_trace_log_ring("==> %s:%s:%d",
246 __FILE__, __func__, __LINE__);
248 vxge_hal_trace_log_ring(
249 "mempoolh = 0x"VXGE_OS_STXFMT", ring = 0x"VXGE_OS_STXFMT", "
250 "from = %d, to = %d", (ptr_t) mempoolh, (ptr_t) ring, from, to);
252 /* get "from" RxD block */
253 from_item = (vxge_hal_ring_block_t *) __hal_mempool_item(
254 (vxge_hal_mempool_t *) mempoolh, from);
255 vxge_assert(from_item);
257 /* get "to" RxD block */
258 to_item = (vxge_hal_ring_block_t *) __hal_mempool_item(
259 (vxge_hal_mempool_t *) mempoolh, to);
260 vxge_assert(to_item);
262 /* return address of the beginning of previous RxD block */
263 to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
266 * set next pointer for this RxD block to point on
267 * previous item's DMA start address
269 __hal_ring_block_next_pointer_set(from_item, to_dma);
271 /* return "from" RxD block's DMA start address */
272 from_dma = __hal_ring_item_dma_addr(
273 mempoolh, from_item, &from_dma_handle);
275 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
276 /* we must sync "from" RxD block, so hardware will see it */
277 vxge_os_dma_sync(ring->channel.pdev,
279 from_dma + VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
280 __hal_ring_item_dma_offset(mempoolh, from_item) +
281 VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
283 VXGE_OS_DMA_DIR_TODEVICE);
286 vxge_hal_info_log_ring(
287 "block%d:0x"VXGE_OS_STXFMT" => block%d:0x"VXGE_OS_STXFMT,
288 from, (ptr_t) from_dma, to, (ptr_t) to_dma);
290 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
291 __FILE__, __func__, __LINE__);
296 * __hal_ring_mempool_item_alloc - Allocate List blocks for RxD block callback
297 * @mempoolh: Handle to memory pool
298 * @memblock: Address of this memory block
299 * @memblock_index: Index of this memory block
300 * @dma_object: dma object for this block
301 * @item: Pointer to this item
302 * @index: Index of this item in memory block
303 * @is_last: If this is last item in the block
304 * @userdata: Specific data of user
306 * This function is callback passed to __hal_mempool_create to create memory
309 static vxge_hal_status_e
310 __hal_ring_mempool_item_alloc(
311 vxge_hal_mempool_h mempoolh,
314 vxge_hal_mempool_dma_t *dma_object,
321 __hal_ring_t *ring = (__hal_ring_t *) userdata;
322 __hal_device_t *hldev;
324 vxge_assert((item != NULL) && (ring != NULL));
326 hldev = (__hal_device_t *) ring->channel.devh;
328 vxge_hal_trace_log_pool("==> %s:%s:%d",
329 __FILE__, __func__, __LINE__);
331 vxge_hal_trace_log_pool(
332 "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", "
333 "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", "
334 "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, "
335 "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock,
336 memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
339 /* format rxds array */
340 for (i = 0; i < ring->rxds_per_block; i++) {
344 __hal_ring_rxd_priv_t *rxd_priv;
345 vxge_hal_ring_rxd_1_t *rxdp;
346 u32 memblock_item_idx;
347 u32 dtr_index = item_index * ring->rxds_per_block + i;
349 ring->channel.dtr_arr[dtr_index].dtr =
350 ((u8 *) item) + i * ring->rxd_size;
353 * Note: memblock_item_idx is index of the item within
354 * the memblock. For instance, in case of three RxD-blocks
355 * per memblock this value can be 0, 1 or 2.
357 rxdblock_priv = __hal_mempool_item_priv(
358 (vxge_hal_mempool_t *) mempoolh,
363 rxdp = (vxge_hal_ring_rxd_1_t *)
364 ring->channel.dtr_arr[dtr_index].dtr;
366 uld_priv = ((u8 *) rxdblock_priv + ring->rxd_priv_size * i);
368 (__hal_ring_rxd_priv_t *) ((void *)(((char *) uld_priv) +
369 ring->per_rxd_space));
371 ((vxge_hal_ring_rxd_5_t *) rxdp)->host_control = dtr_index;
373 ring->channel.dtr_arr[dtr_index].uld_priv = (void *)uld_priv;
374 ring->channel.dtr_arr[dtr_index].hal_priv = (void *)rxd_priv;
376 /* pre-format per-RxD Ring's private */
378 rxd_priv->dma_offset = (u8 *) rxdp - (u8 *) memblock;
379 rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset;
380 rxd_priv->dma_handle = dma_object->handle;
381 #if defined(VXGE_DEBUG_ASSERT)
382 rxd_priv->dma_object = dma_object;
384 rxd_priv->db_bytes = ring->rxd_size;
386 if (i == (ring->rxds_per_block - 1)) {
387 rxd_priv->db_bytes +=
388 (((vxge_hal_mempool_t *) mempoolh)->memblock_size -
389 (ring->rxds_per_block * ring->rxd_size));
393 __hal_ring_block_memblock_idx_set((u8 *) item, memblock_index);
395 /* link last one with first one */
396 __hal_ring_rxdblock_link(mempoolh, ring, item_index, 0);
399 if (item_index > 0) {
400 /* link this RxD block with previous one */
401 __hal_ring_rxdblock_link(mempoolh, ring, item_index - 1, item_index);
404 vxge_hal_trace_log_pool("<== %s:%s:%d Result: 0",
405 __FILE__, __func__, __LINE__);
407 return (VXGE_HAL_OK);
411 * __hal_ring_mempool_item_free - Free RxD blockt callback
412 * @mempoolh: Handle to memory pool
413 * @memblock: Address of this memory block
414 * @memblock_index: Index of this memory block
415 * @dma_object: dma object for this block
416 * @item: Pointer to this item
417 * @index: Index of this item in memory block
418 * @is_last: If this is last item in the block
419 * @userdata: Specific data of user
421 * This function is callback passed to __hal_mempool_free to destroy memory
424 static vxge_hal_status_e
425 __hal_ring_mempool_item_free(
426 vxge_hal_mempool_h mempoolh,
429 vxge_hal_mempool_dma_t *dma_object,
435 __hal_ring_t *ring = (__hal_ring_t *) userdata;
436 __hal_device_t *hldev;
438 vxge_assert((item != NULL) && (ring != NULL));
440 hldev = (__hal_device_t *) ring->channel.devh;
442 vxge_hal_trace_log_pool("==> %s:%s:%d",
443 __FILE__, __func__, __LINE__);
445 vxge_hal_trace_log_pool(
446 "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", "
447 "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", "
448 "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, "
449 "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock,
450 memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
453 vxge_hal_trace_log_pool("<== %s:%s:%d Result: 0",
454 __FILE__, __func__, __LINE__);
456 return (VXGE_HAL_OK);
460 * __hal_ring_initial_replenish - Initial replenish of RxDs
462 * @reopen: Flag to denote if it is open or repopen
464 * This function replenishes the RxDs from reserve array to work array
466 static vxge_hal_status_e
467 __hal_ring_initial_replenish(
469 vxge_hal_reopen_e reopen)
473 __hal_device_t *hldev;
474 vxge_hal_status_e status;
476 vxge_assert(ring != NULL);
478 hldev = (__hal_device_t *) ring->channel.devh;
480 vxge_hal_trace_log_ring("==> %s:%s:%d",
481 __FILE__, __func__, __LINE__);
483 vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d",
484 (ptr_t) ring, reopen);
486 while (vxge_hal_ring_rxd_reserve(ring->channel.vph, &rxd, &uld_priv) ==
489 if (ring->rxd_init) {
490 status = ring->rxd_init(ring->channel.vph,
493 VXGE_HAL_RING_RXD_INDEX(rxd),
494 ring->channel.userdata,
496 if (status != VXGE_HAL_OK) {
497 vxge_hal_ring_rxd_free(ring->channel.vph, rxd);
498 vxge_hal_trace_log_ring("<== %s:%s:%d \
500 __FILE__, __func__, __LINE__, status);
505 vxge_hal_ring_rxd_post(ring->channel.vph, rxd);
508 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
509 __FILE__, __func__, __LINE__);
510 return (VXGE_HAL_OK);
514 * __hal_ring_create - Create a Ring
515 * @vpath_handle: Handle returned by virtual path open
516 * @attr: Ring configuration parameters structure
518 * This function creates Ring and initializes it.
523 vxge_hal_vpath_h vpath_handle,
524 vxge_hal_ring_attr_t *attr)
526 vxge_hal_status_e status;
528 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
529 vxge_hal_ring_config_t *config;
530 __hal_device_t *hldev;
532 vxge_assert((vpath_handle != NULL) && (attr != NULL));
534 hldev = (__hal_device_t *) vp->vpath->hldev;
536 vxge_hal_trace_log_ring("==> %s:%s:%d",
537 __FILE__, __func__, __LINE__);
539 vxge_hal_trace_log_ring(
540 "vpath_handle = 0x"VXGE_OS_STXFMT", attr = 0x"VXGE_OS_STXFMT,
541 (ptr_t) vpath_handle, (ptr_t) attr);
543 if ((vpath_handle == NULL) || (attr == NULL)) {
544 vxge_hal_err_log_ring("null pointer passed == > %s : %d",
546 vxge_hal_trace_log_ring("<== %s:%s:%d Result:1",
547 __FILE__, __func__, __LINE__);
548 return (VXGE_HAL_FAIL);
552 &vp->vpath->hldev->header.config.vp_config[vp->vpath->vp_id].ring;
554 config->ring_length = ((config->ring_length +
555 vxge_hal_ring_rxds_per_block_get(config->buffer_mode) - 1) /
556 vxge_hal_ring_rxds_per_block_get(config->buffer_mode)) *
557 vxge_hal_ring_rxds_per_block_get(config->buffer_mode);
559 ring = (__hal_ring_t *) vxge_hal_channel_allocate(
560 (vxge_hal_device_h) vp->vpath->hldev,
562 VXGE_HAL_CHANNEL_TYPE_RING,
568 vxge_hal_err_log_ring("Memory allocation failed == > %s : %d",
570 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
571 __FILE__, __func__, __LINE__,
572 VXGE_HAL_ERR_OUT_OF_MEMORY);
573 return (VXGE_HAL_ERR_OUT_OF_MEMORY);
576 vp->vpath->ringh = (vxge_hal_ring_h) ring;
578 ring->stats = &vp->vpath->sw_stats->ring_stats;
580 ring->config = config;
581 ring->callback = attr->callback;
582 ring->rxd_init = attr->rxd_init;
583 ring->rxd_term = attr->rxd_term;
585 ring->indicate_max_pkts = config->indicate_max_pkts;
586 ring->buffer_mode = config->buffer_mode;
588 #if defined(VXGE_HAL_RX_MULTI_POST)
589 vxge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
590 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
591 vxge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
594 ring->rxd_size = vxge_hal_ring_rxd_size_get(config->buffer_mode);
595 ring->rxd_priv_size =
596 sizeof(__hal_ring_rxd_priv_t) + attr->per_rxd_space;
597 ring->per_rxd_space = attr->per_rxd_space;
599 ring->rxd_priv_size =
600 ((ring->rxd_priv_size + __vxge_os_cacheline_size - 1) /
601 __vxge_os_cacheline_size) * __vxge_os_cacheline_size;
604 * how many RxDs can fit into one block. Depends on configured
607 ring->rxds_per_block =
608 vxge_hal_ring_rxds_per_block_get(config->buffer_mode);
610 /* calculate actual RxD block private size */
611 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
613 ring->rxd_mem_avail =
614 ((__hal_vpath_handle_t *) ring->channel.vph)->vpath->rxd_mem_size;
616 ring->db_byte_count = 0;
618 ring->mempool = vxge_hal_mempool_create(
619 (vxge_hal_device_h) vp->vpath->hldev,
620 VXGE_OS_HOST_PAGE_SIZE,
621 VXGE_OS_HOST_PAGE_SIZE,
622 ring->rxdblock_priv_size,
623 ring->config->ring_length / ring->rxds_per_block,
624 ring->config->ring_length / ring->rxds_per_block,
625 __hal_ring_mempool_item_alloc,
626 __hal_ring_mempool_item_free,
629 if (ring->mempool == NULL) {
630 __hal_ring_delete(vpath_handle);
631 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
632 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY);
633 return (VXGE_HAL_ERR_OUT_OF_MEMORY);
636 status = vxge_hal_channel_initialize(&ring->channel);
637 if (status != VXGE_HAL_OK) {
638 __hal_ring_delete(vpath_handle);
639 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
640 __FILE__, __func__, __LINE__, status);
647 * Specifying rxd_init callback means two things:
648 * 1) rxds need to be initialized by ULD at channel-open time;
649 * 2) rxds need to be posted at channel-open time
650 * (that's what the initial_replenish() below does)
651 * Currently we don't have a case when the 1) is done without the 2).
653 if (ring->rxd_init) {
654 if ((status = __hal_ring_initial_replenish(
656 VXGE_HAL_OPEN_NORMAL))
658 __hal_ring_delete(vpath_handle);
659 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
660 __FILE__, __func__, __LINE__, status);
666 * initial replenish will increment the counter in its post() routine,
667 * we have to reset it
669 ring->stats->common_stats.usage_cnt = 0;
671 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
672 __FILE__, __func__, __LINE__);
673 return (VXGE_HAL_OK);
677 * __hal_ring_abort - Returns the RxD
678 * @ringh: Ring to be reset
679 * @reopen: See vxge_hal_reopen_e {}.
681 * This function terminates the RxDs of ring
685 vxge_hal_ring_h ringh,
686 vxge_hal_reopen_e reopen)
691 __hal_device_t *hldev;
692 __hal_ring_t *ring = (__hal_ring_t *) ringh;
694 vxge_assert(ringh != NULL);
696 hldev = (__hal_device_t *) ring->channel.devh;
698 vxge_hal_trace_log_ring("==> %s:%s:%d",
699 __FILE__, __func__, __LINE__);
701 vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d",
702 (ptr_t) ringh, reopen);
704 if (ring->rxd_term) {
705 __hal_channel_for_each_dtr(&ring->channel, rxdh, i) {
706 if (!__hal_channel_is_posted_dtr(&ring->channel, i)) {
707 ring->rxd_term(ring->channel.vph, rxdh,
708 VXGE_HAL_RING_ULD_PRIV(ring, rxdh),
709 VXGE_HAL_RXD_STATE_FREED,
710 ring->channel.userdata,
717 __hal_channel_dtr_try_complete(&ring->channel, &rxdh);
721 __hal_channel_dtr_complete(&ring->channel);
722 if (ring->rxd_term) {
723 ring->rxd_term(ring->channel.vph, rxdh,
724 VXGE_HAL_RING_ULD_PRIV(ring, rxdh),
725 VXGE_HAL_RXD_STATE_POSTED,
726 ring->channel.userdata,
729 __hal_channel_dtr_free(&ring->channel,
730 VXGE_HAL_RING_RXD_INDEX(rxdh));
733 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
734 __FILE__, __func__, __LINE__);
738 * __hal_ring_reset - Resets the ring
739 * @ringh: Ring to be reset
741 * This function resets the ring during vpath reset operation
745 vxge_hal_ring_h ringh)
747 __hal_ring_t *ring = (__hal_ring_t *) ringh;
748 __hal_device_t *hldev;
749 vxge_hal_status_e status;
750 __hal_vpath_handle_t *vph = (__hal_vpath_handle_t *) ring->channel.vph;
752 vxge_assert(ringh != NULL);
754 hldev = (__hal_device_t *) ring->channel.devh;
756 vxge_hal_trace_log_ring("==> %s:%s:%d",
757 __FILE__, __func__, __LINE__);
759 vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT,
762 __hal_ring_abort(ringh, VXGE_HAL_RESET_ONLY);
764 status = __hal_channel_reset(&ring->channel);
766 if (status != VXGE_HAL_OK) {
768 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
769 __FILE__, __func__, __LINE__, status);
773 ring->rxd_mem_avail = vph->vpath->rxd_mem_size;
774 ring->db_byte_count = 0;
777 if (ring->rxd_init) {
778 if ((status = __hal_ring_initial_replenish(
780 VXGE_HAL_RESET_ONLY))
782 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
783 __FILE__, __func__, __LINE__, status);
788 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
789 __FILE__, __func__, __LINE__);
791 return (VXGE_HAL_OK);
795 * __hal_ring_delete - Removes the ring
796 * @vpath_handle: Virtual path handle to which this queue belongs
798 * This function freeup the memory pool and removes the ring
802 vxge_hal_vpath_h vpath_handle)
804 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
805 __hal_device_t *hldev;
808 vxge_assert(vpath_handle != NULL);
810 hldev = (__hal_device_t *) vp->vpath->hldev;
812 vxge_hal_trace_log_ring("==> %s:%s:%d",
813 __FILE__, __func__, __LINE__);
815 vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
816 (ptr_t) vpath_handle);
818 ring = (__hal_ring_t *) vp->vpath->ringh;
820 vxge_assert(ring != NULL);
822 vxge_assert(ring->channel.pdev);
824 __hal_ring_abort(vp->vpath->ringh, VXGE_HAL_OPEN_NORMAL);
828 vxge_hal_mempool_destroy(ring->mempool);
831 vxge_hal_channel_terminate(&ring->channel);
833 #if defined(VXGE_HAL_RX_MULTI_POST)
834 vxge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
835 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
836 vxge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
839 vxge_hal_channel_free(&ring->channel);
841 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
842 __FILE__, __func__, __LINE__);
847 * __hal_ring_frame_length_set - Set the maximum frame length of recv frames.
848 * @vpath: virtual Path
849 * @new_frmlen: New frame length
852 * Returns: VXGE_HAL_OK - success.
853 * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
857 __hal_ring_frame_length_set(
858 __hal_virtualpath_t *vpath,
862 __hal_device_t *hldev;
864 vxge_assert(vpath != NULL);
866 hldev = (__hal_device_t *) vpath->hldev;
868 vxge_hal_trace_log_ring("==> %s:%s:%d",
869 __FILE__, __func__, __LINE__);
871 vxge_hal_trace_log_ring(
872 "vpath = 0x"VXGE_OS_STXFMT", new_frmlen = %d",
873 (ptr_t) vpath, new_frmlen);
875 if (vpath->vp_open == VXGE_HAL_VP_NOT_OPEN) {
877 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
878 __FILE__, __func__, __LINE__,
879 VXGE_HAL_ERR_VPATH_NOT_OPEN);
880 return (VXGE_HAL_ERR_VPATH_NOT_OPEN);
884 val64 = vxge_os_pio_mem_read64(
885 vpath->hldev->header.pdev,
886 vpath->hldev->header.regh0,
887 &vpath->vp_reg->rxmac_vcfg0);
889 val64 &= ~VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
891 if (vpath->vp_config->ring.max_frm_len !=
892 VXGE_HAL_MAX_RING_FRM_LEN_USE_MTU) {
894 val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
895 vpath->vp_config->ring.max_frm_len +
896 VXGE_HAL_MAC_HEADER_MAX_SIZE);
900 val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_frmlen +
901 VXGE_HAL_MAC_HEADER_MAX_SIZE);
904 vxge_os_pio_mem_write64(
905 vpath->hldev->header.pdev,
906 vpath->hldev->header.regh0,
908 &vpath->vp_reg->rxmac_vcfg0);
910 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
911 __FILE__, __func__, __LINE__);
913 return (VXGE_HAL_OK);
917 * vxge_hal_ring_rxd_reserve - Reserve ring descriptor.
918 * @vpath_handle: virtual Path handle.
919 * @rxdh: Reserved descriptor. On success HAL fills this "out" parameter
920 * with a valid handle.
921 * @rxd_priv: Buffer to return pointer to per rxd private space
923 * Reserve Rx descriptor for the subsequent filling-in (by upper layer
924 * driver (ULD)) and posting on the corresponding channel (@channelh)
925 * via vxge_hal_ring_rxd_post().
927 * Returns: VXGE_HAL_OK - success.
928 * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
932 vxge_hal_ring_rxd_reserve(
933 vxge_hal_vpath_h vpath_handle,
934 vxge_hal_rxd_h * rxdh,
937 vxge_hal_status_e status;
938 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
941 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
942 __hal_device_t *hldev;
945 vxge_assert((vpath_handle != NULL) && (rxdh != NULL) &&
948 hldev = (__hal_device_t *) vp->vpath->hldev;
950 vxge_hal_trace_log_ring("==> %s:%s:%d",
951 __FILE__, __func__, __LINE__);
953 vxge_hal_trace_log_ring(
954 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", "
955 "rxd_priv = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle,
956 (ptr_t) rxdh, (ptr_t) rxd_priv);
958 ring = (__hal_ring_t *) vp->vpath->ringh;
960 vxge_assert(ring != NULL);
962 #if defined(VXGE_HAL_RX_MULTI_POST)
963 vxge_os_spin_lock(&ring->channel.post_lock);
964 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
965 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
968 status = __hal_channel_dtr_reserve(&ring->channel, rxdh);
970 #if defined(VXGE_HAL_RX_MULTI_POST)
971 vxge_os_spin_unlock(&ring->channel.post_lock);
972 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
973 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
976 if (status == VXGE_HAL_OK) {
977 vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *)*rxdh;
979 /* instead of memset: reset this RxD */
980 rxdp->control_0 = rxdp->control_1 = 0;
982 *rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp);
984 #if defined(VXGE_OS_MEMORY_CHECK)
985 VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->allocated = 1;
989 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
990 __FILE__, __func__, __LINE__);
995 * vxge_hal_ring_rxd_pre_post - Prepare rxd and post
996 * @vpath_handle: virtual Path handle.
997 * @rxdh: Descriptor handle.
999 * This routine prepares a rxd and posts
1002 vxge_hal_ring_rxd_pre_post(
1003 vxge_hal_vpath_h vpath_handle,
1004 vxge_hal_rxd_h rxdh)
1007 #if defined(VXGE_DEBUG_ASSERT)
1008 vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1012 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1013 unsigned long flags;
1016 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1017 __hal_device_t *hldev;
1020 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1022 hldev = (__hal_device_t *) vp->vpath->hldev;
1024 vxge_hal_trace_log_ring("==> %s:%s:%d",
1025 __FILE__, __func__, __LINE__);
1027 vxge_hal_trace_log_ring(
1028 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1029 (ptr_t) vpath_handle, (ptr_t) rxdh);
1031 ring = (__hal_ring_t *) vp->vpath->ringh;
1033 vxge_assert(ring != NULL);
1035 #if defined(VXGE_DEBUG_ASSERT)
1036 /* make sure device overwrites the (illegal) t_code on completion */
1038 VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED);
1041 #if defined(VXGE_HAL_RX_MULTI_POST)
1042 vxge_os_spin_lock(&ring->channel.post_lock);
1043 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1044 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1047 #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER)
1049 if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) {
1050 vxge_hal_rxd_h prev_rxdh;
1051 __hal_ring_rxd_priv_t *rxdp_priv;
1054 rxdp_priv = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1056 if (VXGE_HAL_RING_RXD_INDEX(rxdp) == 0)
1057 index = ring->channel.length;
1059 index = VXGE_HAL_RING_RXD_INDEX(rxdp) - 1;
1061 prev_rxdh = ring->channel.dtr_arr[index].dtr;
1063 if (prev_rxdh != NULL &&
1064 (rxdp_priv->dma_offset & (~0xFFF)) !=
1065 rxdp_priv->dma_offset) {
1066 vxge_assert((char *) prev_rxdh +
1067 ring->rxd_size == rxdh);
1073 __hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1075 ring->db_byte_count +=
1076 VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->db_bytes;
1078 #if defined(VXGE_HAL_RX_MULTI_POST)
1079 vxge_os_spin_unlock(&ring->channel.post_lock);
1080 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1081 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1084 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1085 __FILE__, __func__, __LINE__);
1089 * vxge_hal_ring_rxd_post_post - Process rxd after post.
1090 * @vpath_handle: virtual Path handle.
1091 * @rxdh: Descriptor handle.
1093 * Processes rxd after post
1096 vxge_hal_ring_rxd_post_post(
1097 vxge_hal_vpath_h vpath_handle,
1098 vxge_hal_rxd_h rxdh)
1100 vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1102 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1103 __hal_ring_rxd_priv_t *priv;
1106 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1107 __hal_device_t *hldev;
1110 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1112 hldev = (__hal_device_t *) vp->vpath->hldev;
1114 vxge_hal_trace_log_ring("==> %s:%s:%d",
1115 __FILE__, __func__, __LINE__);
1117 vxge_hal_trace_log_ring(
1118 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1119 (ptr_t) vpath_handle, (ptr_t) rxdh);
1121 ring = (__hal_ring_t *) vp->vpath->ringh;
1123 vxge_assert(ring != NULL);
1126 rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1128 rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER;
1130 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1131 priv = __hal_ring_rxd_priv(ring, rxdp);
1132 vxge_os_dma_sync(ring->channel.pdev,
1137 VXGE_OS_DMA_DIR_TODEVICE);
1139 if (ring->stats->common_stats.usage_cnt > 0)
1140 ring->stats->common_stats.usage_cnt--;
1142 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1143 __FILE__, __func__, __LINE__);
1147 * vxge_hal_ring_rxd_post - Post descriptor on the ring.
1148 * @vpath_handle: virtual Path handle.
1149 * @rxdh: Descriptor obtained via vxge_hal_ring_rxd_reserve().
1151 * Post descriptor on the ring.
1152 * Prior to posting the descriptor should be filled in accordance with
1153 * Host/X3100 interface specification for a given service (LL, etc.).
1157 vxge_hal_ring_rxd_post(
1158 vxge_hal_vpath_h vpath_handle,
1159 vxge_hal_rxd_h rxdh)
1161 vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1163 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1164 unsigned long flags;
1167 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1168 __hal_device_t *hldev;
1171 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1173 hldev = (__hal_device_t *) vp->vpath->hldev;
1175 vxge_hal_trace_log_ring("==> %s:%s:%d",
1176 __FILE__, __func__, __LINE__);
1178 vxge_hal_trace_log_ring(
1179 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1180 (ptr_t) vpath_handle, (ptr_t) rxdh);
1182 ring = (__hal_ring_t *) vp->vpath->ringh;
1184 vxge_assert(ring != NULL);
1186 /* Based on Titan HW bugzilla # 3039, we need to reset the tcode */
1187 rxdp->control_0 = 0;
1189 #if defined(VXGE_DEBUG_ASSERT)
1190 /* make sure device overwrites the (illegal) t_code on completion */
1192 VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED);
1195 rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER;
1196 rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1198 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1200 __hal_ring_rxd_priv_t *rxdp_temp1;
1201 rxdp_temp1 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1202 vxge_os_dma_sync(ring->channel.pdev,
1203 rxdp_temp1->dma_handle,
1204 rxdp_temp1->dma_addr,
1205 rxdp_temp1->dma_offset,
1207 VXGE_OS_DMA_DIR_TODEVICE);
1211 #if defined(VXGE_HAL_RX_MULTI_POST)
1212 vxge_os_spin_lock(&ring->channel.post_lock);
1213 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1214 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1217 #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER)
1219 if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) {
1221 vxge_hal_rxd_h prev_rxdh;
1222 __hal_ring_rxd_priv_t *rxdp_temp2;
1224 rxdp_temp2 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1226 ring->channel.dtr_arr[VXGE_HAL_RING_RXD_INDEX(rxdp) - 1].dtr;
1228 if (prev_rxdh != NULL &&
1229 (rxdp_temp2->dma_offset & (~0xFFF)) != rxdp_temp2->dma_offset)
1230 vxge_assert((char *) prev_rxdh + ring->rxd_size == rxdh);
1235 __hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1237 ring->db_byte_count +=
1238 VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->db_bytes;
1240 #if defined(VXGE_HAL_RX_MULTI_POST)
1241 vxge_os_spin_unlock(&ring->channel.post_lock);
1242 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1243 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1246 if (ring->stats->common_stats.usage_cnt > 0)
1247 ring->stats->common_stats.usage_cnt--;
1249 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1250 __FILE__, __func__, __LINE__);
1254 * vxge_hal_ring_rxd_post_post_wmb - Process rxd after post with memory barrier
1255 * @vpath_handle: virtual Path handle.
1256 * @rxdh: Descriptor handle.
1258 * Processes rxd after post with memory barrier.
1261 vxge_hal_ring_rxd_post_post_wmb(
1262 vxge_hal_vpath_h vpath_handle,
1263 vxge_hal_rxd_h rxdh)
1265 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1266 __hal_device_t *hldev;
1268 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1270 hldev = (__hal_device_t *) vp->vpath->hldev;
1272 vxge_hal_trace_log_ring("==> %s:%s:%d",
1273 __FILE__, __func__, __LINE__);
1275 vxge_hal_trace_log_ring(
1276 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1277 (ptr_t) vpath_handle, (ptr_t) rxdh);
1279 /* Do memory barrier before changing the ownership */
1282 vxge_hal_ring_rxd_post_post(vpath_handle, rxdh);
1284 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1285 __FILE__, __func__, __LINE__);
1289 * vxge_hal_ring_rxd_post_post_db - Post Doorbell after posting the rxd(s).
1290 * @vpath_handle: virtual Path handle.
1292 * Post Doorbell after posting the rxd(s).
1295 vxge_hal_ring_rxd_post_post_db(
1296 vxge_hal_vpath_h vpath_handle)
1298 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1299 __hal_device_t *hldev;
1302 vxge_assert(vpath_handle != NULL);
1304 hldev = (__hal_device_t *) vp->vpath->hldev;
1306 ring = (__hal_ring_t *) vp->vpath->ringh;
1308 vxge_hal_trace_log_ring("==> %s:%s:%d",
1309 __FILE__, __func__, __LINE__);
1311 vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
1312 (ptr_t) vpath_handle);
1314 #if defined(VXGE_HAL_RX_MULTI_POST)
1315 vxge_os_spin_lock(&ring->channel.post_lock);
1316 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1317 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1320 if (ring->db_byte_count <= ring->rxd_mem_avail) {
1321 __hal_rxd_db_post(vpath_handle, ring->db_byte_count);
1322 ring->rxd_mem_avail -= ring->db_byte_count;
1323 ring->db_byte_count = 0;
1325 __hal_rxd_db_post(vpath_handle, ring->rxd_mem_avail);
1326 ring->db_byte_count -= ring->rxd_mem_avail;
1327 ring->rxd_mem_avail = 0;
1330 #if defined(VXGE_HAL_RX_MULTI_POST)
1331 vxge_os_spin_unlock(&ring->channel.post_lock);
1332 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1333 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1336 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1337 __FILE__, __func__, __LINE__);
1341 * vxge_hal_ring_is_next_rxd_completed - Check if the next rxd is completed
1342 * @vpath_handle: Virtual Path handle.
1344 * Checks if the _next_ completed descriptor is in host memory
1346 * Returns: VXGE_HAL_OK - success.
1347 * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1348 * are currently available for processing.
1351 vxge_hal_ring_is_next_rxd_completed(
1352 vxge_hal_vpath_h vpath_handle)
1355 vxge_hal_rxd_h rxdh;
1356 vxge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
1357 __hal_device_t *hldev;
1358 vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1359 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1361 vxge_assert(vpath_handle != NULL);
1363 hldev = (__hal_device_t *) vp->vpath->hldev;
1365 vxge_hal_trace_log_ring("==> %s:%s:%d",
1366 __FILE__, __func__, __LINE__);
1368 vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
1369 (ptr_t) vpath_handle);
1371 ring = (__hal_ring_t *) vp->vpath->ringh;
1373 vxge_assert(ring != NULL);
1375 #if defined(VXGE_HAL_RX_MULTI_POST)
1376 vxge_os_spin_lock(&ring->channel.post_lock);
1377 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1378 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1381 __hal_channel_dtr_try_complete(&ring->channel, &rxdh);
1383 rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1387 /* check whether it is not the end */
1388 if ((!(rxdp->control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER)) &&
1389 (!(rxdp->control_1 &
1390 VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER))) {
1392 status = VXGE_HAL_OK;
1396 #if defined(VXGE_HAL_RX_MULTI_POST)
1397 vxge_os_spin_unlock(&ring->channel.post_lock);
1398 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1399 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1402 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
1403 __FILE__, __func__, __LINE__, status);
1408 * vxge_hal_ring_rxd_next_completed - Get the _next_ completed descriptor.
1409 * @channelh: Channel handle.
1410 * @rxdh: Descriptor handle. Returned by HAL.
1411 * @rxd_priv: Buffer to return a pointer to the per rxd space allocated
1412 * @t_code: Transfer code, as per X3100 User Guide,
1413 * Receive Descriptor Format. Returned by HAL.
1415 * Retrieve the _next_ completed descriptor.
1416 * HAL uses ring callback (*vxge_hal_ring_callback_f) to notifiy
1417 * upper-layer driver (ULD) of new completed descriptors. After that
1418 * the ULD can use vxge_hal_ring_rxd_next_completed to retrieve the rest
1419 * completions (the very first completion is passed by HAL via
1420 * vxge_hal_ring_callback_f).
1422 * Implementation-wise, the upper-layer driver is free to call
1423 * vxge_hal_ring_rxd_next_completed either immediately from inside the
1424 * ring callback, or in a deferred fashion and separate (from HAL)
1427 * Non-zero @t_code means failure to fill-in receive buffer(s)
1428 * of the descriptor.
1429 * For instance, parity error detected during the data transfer.
1430 * In this case X3100 will complete the descriptor and indicate
1431 * for the host that the received data is not to be used.
1432 * For details please refer to X3100 User Guide.
1434 * Returns: VXGE_HAL_OK - success.
1435 * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1436 * are currently available for processing.
1438 * See also: vxge_hal_ring_callback_f {},
1439 * vxge_hal_fifo_rxd_next_completed(), vxge_hal_status_e {}.
1442 vxge_hal_ring_rxd_next_completed(
1443 vxge_hal_vpath_h vpath_handle,
1444 vxge_hal_rxd_h *rxdh,
1449 vxge_hal_ring_rxd_5_t *rxdp; /* doesn't matter 1, 3 or 5... */
1450 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1451 __hal_ring_rxd_priv_t *priv;
1453 __hal_device_t *hldev;
1454 vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1455 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1456 u64 own, control_0, control_1;
1458 vxge_assert((vpath_handle != NULL) && (rxdh != NULL) &&
1459 (rxd_priv != NULL) && (t_code != NULL));
1461 hldev = (__hal_device_t *) vp->vpath->hldev;
1463 vxge_hal_trace_log_ring("==> %s:%s:%d",
1464 __FILE__, __func__, __LINE__);
1466 vxge_hal_trace_log_ring(
1467 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", "
1468 "rxd_priv = 0x"VXGE_OS_STXFMT", t_code = 0x"VXGE_OS_STXFMT,
1469 (ptr_t) vpath_handle, (ptr_t) rxdh, (ptr_t) rxd_priv,
1472 ring = (__hal_ring_t *) vp->vpath->ringh;
1474 vxge_assert(ring != NULL);
1479 #if defined(VXGE_HAL_RX_MULTI_POST)
1480 vxge_os_spin_lock(&ring->channel.post_lock);
1481 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1482 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1485 __hal_channel_dtr_try_complete(&ring->channel, rxdh);
1487 rxdp = (vxge_hal_ring_rxd_5_t *)*rxdh;
1490 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1492 * Note: 24 bytes at most means:
1493 * - Control_3 in case of 5-buffer mode
1494 * - Control_1 and Control_2
1496 * This is the only length needs to be invalidated
1499 priv = __hal_ring_rxd_priv(ring, rxdp);
1500 vxge_os_dma_sync(ring->channel.pdev,
1505 VXGE_OS_DMA_DIR_FROMDEVICE);
1507 *t_code = (u8) VXGE_HAL_RING_RXD_T_CODE_GET(rxdp->control_0);
1509 control_0 = rxdp->control_0;
1510 control_1 = rxdp->control_1;
1511 own = control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1513 /* check whether it is not the end */
1514 if ((!own && !(control_1 & VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER)) ||
1515 (*t_code == VXGE_HAL_RING_RXD_T_CODE_FRM_DROP)) {
1517 #ifndef VXGE_HAL_IRQ_POLLING
1518 if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
1520 * reset it. since we don't want to return
1521 * garbage to the ULD
1524 status = VXGE_HAL_COMPLETIONS_REMAIN;
1527 __hal_channel_dtr_complete(&ring->channel);
1529 *rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp);
1531 ring->rxd_mem_avail +=
1532 (VXGE_HAL_RING_HAL_PRIV(ring, rxdp))->db_bytes;
1534 ring->stats->common_stats.usage_cnt++;
1535 if (ring->stats->common_stats.usage_max <
1536 ring->stats->common_stats.usage_cnt)
1537 ring->stats->common_stats.usage_max =
1538 ring->stats->common_stats.usage_cnt;
1540 switch (ring->buffer_mode) {
1541 case VXGE_HAL_RING_RXD_BUFFER_MODE_1:
1542 ring->channel.poll_bytes +=
1543 (u32) VXGE_HAL_RING_RXD_1_BUFFER0_SIZE_GET(
1546 case VXGE_HAL_RING_RXD_BUFFER_MODE_3:
1547 ring->channel.poll_bytes +=
1548 (u32) VXGE_HAL_RING_RXD_3_BUFFER0_SIZE_GET(
1550 (u32) VXGE_HAL_RING_RXD_3_BUFFER1_SIZE_GET(
1552 (u32) VXGE_HAL_RING_RXD_3_BUFFER2_SIZE_GET(
1555 case VXGE_HAL_RING_RXD_BUFFER_MODE_5:
1556 ring->channel.poll_bytes +=
1557 (u32) VXGE_HAL_RING_RXD_5_BUFFER0_SIZE_GET(
1559 (u32) VXGE_HAL_RING_RXD_5_BUFFER1_SIZE_GET(
1561 (u32) VXGE_HAL_RING_RXD_5_BUFFER2_SIZE_GET(
1563 (u32) VXGE_HAL_RING_RXD_5_BUFFER3_SIZE_GET(
1565 (u32) VXGE_HAL_RING_RXD_5_BUFFER4_SIZE_GET(
1570 status = VXGE_HAL_OK;
1571 #ifndef VXGE_HAL_IRQ_POLLING
1577 #if defined(VXGE_HAL_RX_MULTI_POST)
1578 vxge_os_spin_unlock(&ring->channel.post_lock);
1579 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1580 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1583 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
1584 __FILE__, __func__, __LINE__, status);
1590 * vxge_hal_ring_handle_tcode - Handle transfer code.
1591 * @vpath_handle: Virtual Path handle.
1592 * @rxdh: Descriptor handle.
1593 * @t_code: One of the enumerated (and documented in the X3100 user guide)
1596 * Handle descriptor's transfer code. The latter comes with each completed
1599 * Returns: one of the vxge_hal_status_e {} enumerated types.
1600 * VXGE_HAL_OK - for success.
1601 * VXGE_HAL_ERR_CRITICAL - when encounters critical error.
1604 vxge_hal_ring_handle_tcode(
1605 vxge_hal_vpath_h vpath_handle,
1606 vxge_hal_rxd_h rxdh,
1609 __hal_device_t *hldev;
1610 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1612 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1614 hldev = (__hal_device_t *) vp->vpath->hldev;
1616 vxge_hal_trace_log_ring("==> %s:%s:%d",
1617 __FILE__, __func__, __LINE__);
1619 vxge_hal_trace_log_ring(
1620 "vpath_handle = 0x"VXGE_OS_STXFMT", "
1621 "rxdh = 0x"VXGE_OS_STXFMT", t_code = 0x%d",
1622 (ptr_t) vpath_handle, (ptr_t) rxdh, t_code);
1626 /* 0x0: Transfer ok. */
1630 * 0x1: Layer 3 checksum presentation
1631 * configuration mismatch.
1636 * 0x2: Layer 4 checksum presentation
1637 * configuration mismatch.
1642 * 0x3: Layer 3 and Layer 4 checksum
1643 * presentation configuration mismatch.
1647 /* 0x4: Reserved. */
1651 * 0x5: Layer 3 error unparseable packet,
1652 * such as unknown IPv6 header.
1657 * 0x6: Layer 2 error frame integrity
1658 * error, such as FCS or ECC).
1663 * 0x7: Buffer size error the RxD buffer(s)
1664 * were not appropriately sized and
1665 * data loss occurred.
1669 /* 0x8: Internal ECC error RxD corrupted. */
1670 __hal_device_handle_error(vp->vpath->hldev,
1671 vp->vpath->vp_id, VXGE_HAL_EVENT_ECCERR);
1675 * 0x9: Benign overflow the contents of
1676 * Segment1 exceeded the capacity of
1677 * Buffer1 and the remainder was placed
1678 * in Buffer2. Segment2 now starts in
1679 * Buffer3. No data loss or errors occurred.
1684 * 0xA: Buffer size 0 one of the RxDs
1685 * assigned buffers has a size of 0 bytes.
1689 /* 0xB: Reserved. */
1693 * 0xC: Frame dropped either due to
1694 * VPath Reset or because of a VPIN mismatch.
1698 /* 0xD: Reserved. */
1701 /* 0xE: Reserved. */
1705 * 0xF: Multiple errors more than one
1706 * transfer code condition occurred.
1710 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
1711 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_TCODE);
1712 return (VXGE_HAL_ERR_INVALID_TCODE);
1715 vp->vpath->sw_stats->ring_stats.rxd_t_code_err_cnt[t_code]++;
1717 vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d",
1718 __FILE__, __func__, __LINE__, VXGE_HAL_OK);
1719 return (VXGE_HAL_OK);
1724 * vxge_hal_ring_rxd_private_get - Get ULD private per-descriptor data.
1725 * @vpath_handle: Virtual Path handle.
1726 * @rxdh: Descriptor handle.
1728 * Returns: private ULD info associated with the descriptor.
1729 * ULD requests per-descriptor space via vxge_hal_ring_attr.
1733 vxge_hal_ring_rxd_private_get(
1734 vxge_hal_vpath_h vpath_handle,
1735 vxge_hal_rxd_h rxdh)
1737 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1739 return (VXGE_HAL_RING_ULD_PRIV(
1740 ((__hal_ring_t *) vp->vpath->ringh), rxdh));
1745 * vxge_hal_ring_rxd_free - Free descriptor.
1746 * @vpath_handle: Virtual Path handle.
1747 * @rxdh: Descriptor handle.
1749 * Free the reserved descriptor. This operation is "symmetrical" to
1750 * vxge_hal_ring_rxd_reserve. The "free-ing" completes the descriptor's
1753 * After free-ing (see vxge_hal_ring_rxd_free()) the descriptor again can
1756 * - reserved (vxge_hal_ring_rxd_reserve);
1758 * - posted (vxge_hal_ring_rxd_post);
1760 * - completed (vxge_hal_ring_rxd_next_completed);
1762 * - and recycled again (vxge_hal_ring_rxd_free).
1764 * For alternative state transitions and more details please refer to
1769 vxge_hal_ring_rxd_free(
1770 vxge_hal_vpath_h vpath_handle,
1771 vxge_hal_rxd_h rxdh)
1773 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1774 unsigned long flags;
1778 __hal_device_t *hldev;
1779 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1781 vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1783 hldev = (__hal_device_t *) vp->vpath->hldev;
1785 vxge_hal_trace_log_ring("==> %s:%s:%d",
1786 __FILE__, __func__, __LINE__);
1788 vxge_hal_trace_log_ring(
1789 "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1790 (ptr_t) vpath_handle, (ptr_t) rxdh);
1792 ring = (__hal_ring_t *) vp->vpath->ringh;
1794 vxge_assert(ring != NULL);
1796 #if defined(VXGE_HAL_RX_MULTI_POST)
1797 vxge_os_spin_lock(&ring->channel.post_lock);
1798 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1799 vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1802 __hal_channel_dtr_free(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1803 #if defined(VXGE_OS_MEMORY_CHECK)
1804 VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->allocated = 0;
1807 #if defined(VXGE_HAL_RX_MULTI_POST)
1808 vxge_os_spin_unlock(&ring->channel.post_lock);
1809 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1810 vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1813 vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
1814 __FILE__, __func__, __LINE__);