]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/vxge/vxgehal/vxgehal-ring.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / vxge / vxgehal / vxgehal-ring.c
1 /*-
2  * Copyright(c) 2002-2011 Exar Corp.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification are permitted provided the following conditions are met:
7  *
8  *    1. Redistributions of source code must retain the above copyright notice,
9  *       this list of conditions and the following disclaimer.
10  *
11  *    2. Redistributions in binary form must reproduce the above copyright
12  *       notice, this list of conditions and the following disclaimer in the
13  *       documentation and/or other materials provided with the distribution.
14  *
15  *    3. Neither the name of the Exar Corporation nor the names of its
16  *       contributors may be used to endorse or promote products derived from
17  *       this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32
33 #include <dev/vxge/vxgehal/vxgehal.h>
34
35 /*
36  * __hal_ring_block_memblock_idx - Return the memblock index
37  * @block: Virtual address of memory block
38  *
39  * This function returns the index of memory block
40  */
41 static inline u32
42 __hal_ring_block_memblock_idx(
43     vxge_hal_ring_block_t block)
44 {
45         return (u32)*((u64 *) ((void *)((u8 *) block +
46             VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET)));
47 }
48
49 /*
50  * __hal_ring_block_memblock_idx_set - Sets the memblock index
51  * @block: Virtual address of memory block
52  * @memblock_idx: Index of memory block
53  *
54  * This function sets index to a memory block
55  */
56 static inline void
57 __hal_ring_block_memblock_idx_set(
58     vxge_hal_ring_block_t block,
59     u32 memblock_idx)
60 {
61         *((u64 *) ((void *)((u8 *) block +
62             VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET))) = memblock_idx;
63 }
64
65 /*
66  * __hal_ring_block_next_pointer - Returns the dma address of next block
67  * @block: RxD block
68  *
69  * Returns the dma address of next block stored in the RxD block
70  */
71 static inline dma_addr_t
72 /* LINTED */
73 __hal_ring_block_next_pointer(
74     vxge_hal_ring_block_t *block)
75 {
76         return (dma_addr_t)*((u64 *) ((void *)((u8 *) block +
77             VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)));
78 }
79
80 /*
81  * __hal_ring_block_next_pointer_set - Sets the next block pointer in RxD block
82  * @block: RxD block
83  * @dma_next: dma address of next block
84  *
85  * Sets the next block pointer in RxD block
86  */
87 static inline void
88 __hal_ring_block_next_pointer_set(
89     vxge_hal_ring_block_t *block,
90     dma_addr_t dma_next)
91 {
92         *((u64 *) ((void *)((u8 *) block +
93             VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET))) = dma_next;
94 }
95
96 /*
97  * __hal_ring_first_block_address_get - Returns the dma address of the
98  *              first block
99  * @ringh: Handle to the ring
100  *
101  * Returns the dma address of the first RxD block
102  */
103 u64
104 __hal_ring_first_block_address_get(
105     vxge_hal_ring_h ringh)
106 {
107         __hal_ring_t *ring = (__hal_ring_t *) ringh;
108         vxge_hal_mempool_dma_t *dma_object;
109
110         dma_object = __hal_mempool_memblock_dma(ring->mempool, 0);
111
112         vxge_assert(dma_object != NULL);
113
114         return (dma_object->addr);
115 }
116
117
118 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
119 /*
120  * __hal_ring_item_dma_offset - Return the dma offset of an item
121  * @mempoolh: Handle to the memory pool of the ring
122  * @item: Item for which to get the dma offset
123  *
124  * This function returns the dma offset of a given item
125  */
126 static ptrdiff_t
127 __hal_ring_item_dma_offset(
128     vxge_hal_mempool_h mempoolh,
129     void *item)
130 {
131         u32 memblock_idx;
132         void *memblock;
133         vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh;
134         __hal_device_t *hldev;
135
136         vxge_assert((mempoolh != NULL) && (item != NULL) &&
137             (dma_handle != NULL));
138
139         hldev = (__hal_device_t *) mempool->devh;
140
141         vxge_hal_trace_log_ring("==> %s:%s:%d",
142             __FILE__, __func__, __LINE__);
143
144         vxge_hal_trace_log_ring(
145             "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT,
146             (ptr_t) mempoolh, (ptr_t) item);
147
148         /* get owner memblock index */
149         memblock_idx = __hal_ring_block_memblock_idx(item);
150
151         /* get owner memblock by memblock index */
152         memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
153
154         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
155             __FILE__, __func__, __LINE__);
156
157         return ((u8 *) item - (u8 *) memblock);
158 }
159 #endif
160
161 /*
162  * __hal_ring_item_dma_addr - Return the dma address of an item
163  * @mempoolh: Handle to the memory pool of the ring
164  * @item: Item for which to get the dma offset
165  * @dma_handle: dma handle
166  *
167  * This function returns the dma address of a given item
168  */
169 static dma_addr_t
170 __hal_ring_item_dma_addr(
171     vxge_hal_mempool_h mempoolh,
172     void *item,
173     pci_dma_h *dma_handle)
174 {
175         u32 memblock_idx;
176         void *memblock;
177         vxge_hal_mempool_dma_t *memblock_dma_object;
178         vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh;
179         __hal_device_t *hldev;
180         ptrdiff_t dma_item_offset;
181
182         vxge_assert((mempoolh != NULL) && (item != NULL) &&
183             (dma_handle != NULL));
184
185         hldev = (__hal_device_t *) mempool->devh;
186
187         vxge_hal_trace_log_ring("==> %s:%s:%d",
188             __FILE__, __func__, __LINE__);
189
190         vxge_hal_trace_log_ring(
191             "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT", "
192             "dma_handle = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh,
193             (ptr_t) item, (ptr_t) dma_handle);
194
195         /* get owner memblock index */
196         memblock_idx = __hal_ring_block_memblock_idx((u8 *) item);
197
198         /* get owner memblock by memblock index */
199         memblock = __hal_mempool_memblock(
200             (vxge_hal_mempool_t *) mempoolh, memblock_idx);
201
202         /* get memblock DMA object by memblock index */
203         memblock_dma_object = __hal_mempool_memblock_dma(
204             (vxge_hal_mempool_t *) mempoolh, memblock_idx);
205
206         /* calculate offset in the memblock of this item */
207         /* LINTED */
208         dma_item_offset = (u8 *) item - (u8 *) memblock;
209
210         *dma_handle = memblock_dma_object->handle;
211
212         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
213             __FILE__, __func__, __LINE__);
214
215         return (memblock_dma_object->addr + dma_item_offset);
216 }
217
218 /*
219  * __hal_ring_rxdblock_link - Link the RxD blocks
220  * @mempoolh: Handle to the memory pool of the ring
221  * @ring: ring
222  * @from: RxD block from which to link
223  * @to: RxD block to which to link to
224  *
225  * This function returns the dma address of a given item
226  */
227 static void
228 __hal_ring_rxdblock_link(
229     vxge_hal_mempool_h mempoolh,
230     __hal_ring_t *ring,
231     u32 from,
232     u32 to)
233 {
234         vxge_hal_ring_block_t *to_item, *from_item;
235         dma_addr_t to_dma, from_dma;
236         pci_dma_h to_dma_handle, from_dma_handle;
237         __hal_device_t *hldev;
238
239         vxge_assert((mempoolh != NULL) && (ring != NULL));
240
241         hldev = (__hal_device_t *) ring->channel.devh;
242
243         vxge_hal_trace_log_ring("==> %s:%s:%d",
244             __FILE__, __func__, __LINE__);
245
246         vxge_hal_trace_log_ring(
247             "mempoolh = 0x"VXGE_OS_STXFMT", ring = 0x"VXGE_OS_STXFMT", "
248             "from = %d, to = %d", (ptr_t) mempoolh, (ptr_t) ring, from, to);
249
250         /* get "from" RxD block */
251         from_item = (vxge_hal_ring_block_t *) __hal_mempool_item(
252             (vxge_hal_mempool_t *) mempoolh, from);
253         vxge_assert(from_item);
254
255         /* get "to" RxD block */
256         to_item = (vxge_hal_ring_block_t *) __hal_mempool_item(
257             (vxge_hal_mempool_t *) mempoolh, to);
258         vxge_assert(to_item);
259
260         /* return address of the beginning of previous RxD block */
261         to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
262
263         /*
264          * set next pointer for this RxD block to point on
265          * previous item's DMA start address
266          */
267         __hal_ring_block_next_pointer_set(from_item, to_dma);
268
269         /* return "from" RxD block's DMA start address */
270         from_dma = __hal_ring_item_dma_addr(
271             mempoolh, from_item, &from_dma_handle);
272
273 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
274         /* we must sync "from" RxD block, so hardware will see it */
275         vxge_os_dma_sync(ring->channel.pdev,
276             from_dma_handle,
277             from_dma + VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
278             __hal_ring_item_dma_offset(mempoolh, from_item) +
279             VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
280             sizeof(u64),
281             VXGE_OS_DMA_DIR_TODEVICE);
282 #endif
283
284         vxge_hal_info_log_ring(
285             "block%d:0x"VXGE_OS_STXFMT" => block%d:0x"VXGE_OS_STXFMT,
286             from, (ptr_t) from_dma, to, (ptr_t) to_dma);
287
288         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
289             __FILE__, __func__, __LINE__);
290
291 }
292
293 /*
294  * __hal_ring_mempool_item_alloc - Allocate List blocks for RxD block callback
295  * @mempoolh: Handle to memory pool
296  * @memblock: Address of this memory block
297  * @memblock_index: Index of this memory block
298  * @dma_object: dma object for this block
299  * @item: Pointer to this item
300  * @index: Index of this item in memory block
301  * @is_last: If this is last item in the block
302  * @userdata: Specific data of user
303  *
304  * This function is callback passed to __hal_mempool_create to create memory
305  * pool for RxD block
306  */
307 static vxge_hal_status_e
308 __hal_ring_mempool_item_alloc(
309     vxge_hal_mempool_h mempoolh,
310     void *memblock,
311     u32 memblock_index,
312     vxge_hal_mempool_dma_t *dma_object,
313     void *item,
314     u32 item_index,
315     u32 is_last,
316     void *userdata)
317 {
318         u32 i;
319         __hal_ring_t *ring = (__hal_ring_t *) userdata;
320         __hal_device_t *hldev;
321
322         vxge_assert((item != NULL) && (ring != NULL));
323
324         hldev = (__hal_device_t *) ring->channel.devh;
325
326         vxge_hal_trace_log_pool("==> %s:%s:%d",
327             __FILE__, __func__, __LINE__);
328
329         vxge_hal_trace_log_pool(
330             "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", "
331             "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", "
332             "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, "
333             "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock,
334             memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
335             (ptr_t) userdata);
336
337         /* format rxds array */
338         for (i = 0; i < ring->rxds_per_block; i++) {
339
340                 void *uld_priv;
341                 void *rxdblock_priv;
342                 __hal_ring_rxd_priv_t *rxd_priv;
343                 vxge_hal_ring_rxd_1_t *rxdp;
344                 u32 memblock_item_idx;
345                 u32 dtr_index = item_index * ring->rxds_per_block + i;
346
347                 ring->channel.dtr_arr[dtr_index].dtr =
348                     ((u8 *) item) + i * ring->rxd_size;
349
350                 /*
351                  * Note: memblock_item_idx is index of the item within
352                  * the memblock. For instance, in case of three RxD-blocks
353                  * per memblock this value can be 0, 1 or 2.
354                  */
355                 rxdblock_priv = __hal_mempool_item_priv(
356                     (vxge_hal_mempool_t *) mempoolh,
357                     memblock_index,
358                     item,
359                     &memblock_item_idx);
360
361                 rxdp = (vxge_hal_ring_rxd_1_t *)
362                     ring->channel.dtr_arr[dtr_index].dtr;
363
364                 uld_priv = ((u8 *) rxdblock_priv + ring->rxd_priv_size * i);
365                 rxd_priv =
366                     (__hal_ring_rxd_priv_t *) ((void *)(((char *) uld_priv) +
367                     ring->per_rxd_space));
368
369                 ((vxge_hal_ring_rxd_5_t *) rxdp)->host_control = dtr_index;
370
371                 ring->channel.dtr_arr[dtr_index].uld_priv = (void *)uld_priv;
372                 ring->channel.dtr_arr[dtr_index].hal_priv = (void *)rxd_priv;
373
374                 /* pre-format per-RxD Ring's private */
375                 /* LINTED */
376                 rxd_priv->dma_offset = (u8 *) rxdp - (u8 *) memblock;
377                 rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset;
378                 rxd_priv->dma_handle = dma_object->handle;
379 #if defined(VXGE_DEBUG_ASSERT)
380                 rxd_priv->dma_object = dma_object;
381 #endif
382                 rxd_priv->db_bytes = ring->rxd_size;
383
384                 if (i == (ring->rxds_per_block - 1)) {
385                         rxd_priv->db_bytes +=
386                             (((vxge_hal_mempool_t *) mempoolh)->memblock_size -
387                             (ring->rxds_per_block * ring->rxd_size));
388                 }
389         }
390
391         __hal_ring_block_memblock_idx_set((u8 *) item, memblock_index);
392         if (is_last) {
393                 /* link last one with first one */
394                 __hal_ring_rxdblock_link(mempoolh, ring, item_index, 0);
395         }
396
397         if (item_index > 0) {
398                 /* link this RxD block with previous one */
399                 __hal_ring_rxdblock_link(mempoolh, ring, item_index - 1, item_index);
400         }
401
402         vxge_hal_trace_log_pool("<== %s:%s:%d  Result: 0",
403             __FILE__, __func__, __LINE__);
404
405         return (VXGE_HAL_OK);
406 }
407
408 /*
409  * __hal_ring_mempool_item_free - Free RxD blockt callback
410  * @mempoolh: Handle to memory pool
411  * @memblock: Address of this memory block
412  * @memblock_index: Index of this memory block
413  * @dma_object: dma object for this block
414  * @item: Pointer to this item
415  * @index: Index of this item in memory block
416  * @is_last: If this is last item in the block
417  * @userdata: Specific data of user
418  *
419  * This function is callback passed to __hal_mempool_free to destroy memory
420  * pool for RxD block
421  */
422 static vxge_hal_status_e
423 __hal_ring_mempool_item_free(
424     vxge_hal_mempool_h mempoolh,
425     void *memblock,
426     u32 memblock_index,
427     vxge_hal_mempool_dma_t *dma_object,
428     void *item,
429     u32 item_index,
430     u32 is_last,
431     void *userdata)
432 {
433         __hal_ring_t *ring = (__hal_ring_t *) userdata;
434         __hal_device_t *hldev;
435
436         vxge_assert((item != NULL) && (ring != NULL));
437
438         hldev = (__hal_device_t *) ring->channel.devh;
439
440         vxge_hal_trace_log_pool("==> %s:%s:%d",
441             __FILE__, __func__, __LINE__);
442
443         vxge_hal_trace_log_pool(
444             "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", "
445             "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", "
446             "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, "
447             "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock,
448             memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
449             (ptr_t) userdata);
450
451         vxge_hal_trace_log_pool("<== %s:%s:%d  Result: 0",
452             __FILE__, __func__, __LINE__);
453
454         return (VXGE_HAL_OK);
455 }
456
457 /*
458  * __hal_ring_initial_replenish - Initial replenish of RxDs
459  * @ring: ring
460  * @reopen: Flag to denote if it is open or repopen
461  *
462  * This function replenishes the RxDs from reserve array to work array
463  */
464 static vxge_hal_status_e
465 __hal_ring_initial_replenish(
466     __hal_ring_t *ring,
467     vxge_hal_reopen_e reopen)
468 {
469         vxge_hal_rxd_h rxd;
470         void *uld_priv;
471         __hal_device_t *hldev;
472         vxge_hal_status_e status;
473
474         vxge_assert(ring != NULL);
475
476         hldev = (__hal_device_t *) ring->channel.devh;
477
478         vxge_hal_trace_log_ring("==> %s:%s:%d",
479             __FILE__, __func__, __LINE__);
480
481         vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d",
482             (ptr_t) ring, reopen);
483
484         while (vxge_hal_ring_rxd_reserve(ring->channel.vph, &rxd, &uld_priv) ==
485             VXGE_HAL_OK) {
486
487                 if (ring->rxd_init) {
488                         status = ring->rxd_init(ring->channel.vph,
489                             rxd,
490                             uld_priv,
491                             VXGE_HAL_RING_RXD_INDEX(rxd),
492                             ring->channel.userdata,
493                             reopen);
494                         if (status != VXGE_HAL_OK) {
495                                 vxge_hal_ring_rxd_free(ring->channel.vph, rxd);
496                                 vxge_hal_trace_log_ring("<== %s:%s:%d \
497                                     Result: %d",
498                                     __FILE__, __func__, __LINE__, status);
499                                 return (status);
500                         }
501                 }
502
503                 vxge_hal_ring_rxd_post(ring->channel.vph, rxd);
504         }
505
506         vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
507             __FILE__, __func__, __LINE__);
508         return (VXGE_HAL_OK);
509 }
510
511 /*
512  * __hal_ring_create - Create a Ring
513  * @vpath_handle: Handle returned by virtual path open
514  * @attr: Ring configuration parameters structure
515  *
516  * This function creates Ring and initializes it.
517  *
518  */
519 vxge_hal_status_e
520 __hal_ring_create(
521     vxge_hal_vpath_h vpath_handle,
522     vxge_hal_ring_attr_t *attr)
523 {
524         vxge_hal_status_e status;
525         __hal_ring_t *ring;
526         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
527         vxge_hal_ring_config_t *config;
528         __hal_device_t *hldev;
529
530         vxge_assert((vpath_handle != NULL) && (attr != NULL));
531
532         hldev = (__hal_device_t *) vp->vpath->hldev;
533
534         vxge_hal_trace_log_ring("==> %s:%s:%d",
535             __FILE__, __func__, __LINE__);
536
537         vxge_hal_trace_log_ring(
538             "vpath_handle = 0x"VXGE_OS_STXFMT", attr = 0x"VXGE_OS_STXFMT,
539             (ptr_t) vpath_handle, (ptr_t) attr);
540
541         if ((vpath_handle == NULL) || (attr == NULL)) {
542                 vxge_hal_err_log_ring("null pointer passed == > %s : %d",
543                     __func__, __LINE__);
544                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result:1",
545                     __FILE__, __func__, __LINE__);
546                 return (VXGE_HAL_FAIL);
547         }
548
549         config =
550             &vp->vpath->hldev->header.config.vp_config[vp->vpath->vp_id].ring;
551
552         config->ring_length = ((config->ring_length +
553             vxge_hal_ring_rxds_per_block_get(config->buffer_mode) - 1) /
554             vxge_hal_ring_rxds_per_block_get(config->buffer_mode)) *
555             vxge_hal_ring_rxds_per_block_get(config->buffer_mode);
556
557         ring = (__hal_ring_t *) vxge_hal_channel_allocate(
558             (vxge_hal_device_h) vp->vpath->hldev,
559             vpath_handle,
560             VXGE_HAL_CHANNEL_TYPE_RING,
561             config->ring_length,
562             attr->per_rxd_space,
563             attr->userdata);
564
565         if (ring == NULL) {
566                 vxge_hal_err_log_ring("Memory allocation failed == > %s : %d",
567                     __func__, __LINE__);
568                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
569                     __FILE__, __func__, __LINE__,
570                     VXGE_HAL_ERR_OUT_OF_MEMORY);
571                 return (VXGE_HAL_ERR_OUT_OF_MEMORY);
572         }
573
574         vp->vpath->ringh = (vxge_hal_ring_h) ring;
575
576         ring->stats = &vp->vpath->sw_stats->ring_stats;
577
578         ring->config = config;
579         ring->callback = attr->callback;
580         ring->rxd_init = attr->rxd_init;
581         ring->rxd_term = attr->rxd_term;
582
583         ring->indicate_max_pkts = config->indicate_max_pkts;
584         ring->buffer_mode = config->buffer_mode;
585
586 #if defined(VXGE_HAL_RX_MULTI_POST)
587         vxge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
588 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
589         vxge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
590 #endif
591
592         ring->rxd_size = vxge_hal_ring_rxd_size_get(config->buffer_mode);
593         ring->rxd_priv_size =
594             sizeof(__hal_ring_rxd_priv_t) + attr->per_rxd_space;
595         ring->per_rxd_space = attr->per_rxd_space;
596
597         ring->rxd_priv_size =
598             ((ring->rxd_priv_size + __vxge_os_cacheline_size - 1) /
599             __vxge_os_cacheline_size) * __vxge_os_cacheline_size;
600
601         /*
602          * how many RxDs can fit into one block. Depends on configured
603          * buffer_mode.
604          */
605         ring->rxds_per_block =
606             vxge_hal_ring_rxds_per_block_get(config->buffer_mode);
607
608         /* calculate actual RxD block private size */
609         ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
610
611         ring->rxd_mem_avail =
612             ((__hal_vpath_handle_t *) ring->channel.vph)->vpath->rxd_mem_size;
613
614         ring->db_byte_count = 0;
615
616         ring->mempool = vxge_hal_mempool_create(
617             (vxge_hal_device_h) vp->vpath->hldev,
618             VXGE_OS_HOST_PAGE_SIZE,
619             VXGE_OS_HOST_PAGE_SIZE,
620             ring->rxdblock_priv_size,
621             ring->config->ring_length / ring->rxds_per_block,
622             ring->config->ring_length / ring->rxds_per_block,
623             __hal_ring_mempool_item_alloc,
624             __hal_ring_mempool_item_free,
625             ring);
626
627         if (ring->mempool == NULL) {
628                 __hal_ring_delete(vpath_handle);
629                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
630                     __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY);
631                 return (VXGE_HAL_ERR_OUT_OF_MEMORY);
632         }
633
634         status = vxge_hal_channel_initialize(&ring->channel);
635         if (status != VXGE_HAL_OK) {
636                 __hal_ring_delete(vpath_handle);
637                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
638                     __FILE__, __func__, __LINE__, status);
639                 return (status);
640         }
641
642
643         /*
644          * Note:
645          * Specifying rxd_init callback means two things:
646          * 1) rxds need to be initialized by ULD at channel-open time;
647          * 2) rxds need to be posted at channel-open time
648          *      (that's what the initial_replenish() below does)
649          * Currently we don't have a case when the 1) is done without the 2).
650          */
651         if (ring->rxd_init) {
652                 if ((status = __hal_ring_initial_replenish(
653                     ring,
654                     VXGE_HAL_OPEN_NORMAL))
655                     != VXGE_HAL_OK) {
656                         __hal_ring_delete(vpath_handle);
657                         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
658                             __FILE__, __func__, __LINE__, status);
659                         return (status);
660                 }
661         }
662
663         /*
664          * initial replenish will increment the counter in its post() routine,
665          * we have to reset it
666          */
667         ring->stats->common_stats.usage_cnt = 0;
668
669         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
670             __FILE__, __func__, __LINE__);
671         return (VXGE_HAL_OK);
672 }
673
674 /*
675  * __hal_ring_abort - Returns the RxD
676  * @ringh: Ring to be reset
677  * @reopen: See  vxge_hal_reopen_e {}.
678  *
679  * This function terminates the RxDs of ring
680  */
681 void
682 __hal_ring_abort(
683     vxge_hal_ring_h ringh,
684     vxge_hal_reopen_e reopen)
685 {
686         u32 i = 0;
687         vxge_hal_rxd_h rxdh;
688
689         __hal_device_t *hldev;
690         __hal_ring_t *ring = (__hal_ring_t *) ringh;
691
692         vxge_assert(ringh != NULL);
693
694         hldev = (__hal_device_t *) ring->channel.devh;
695
696         vxge_hal_trace_log_ring("==> %s:%s:%d",
697             __FILE__, __func__, __LINE__);
698
699         vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d",
700             (ptr_t) ringh, reopen);
701
702         if (ring->rxd_term) {
703                 __hal_channel_for_each_dtr(&ring->channel, rxdh, i) {
704                         if (!__hal_channel_is_posted_dtr(&ring->channel, i)) {
705                                 ring->rxd_term(ring->channel.vph, rxdh,
706                                     VXGE_HAL_RING_ULD_PRIV(ring, rxdh),
707                                     VXGE_HAL_RXD_STATE_FREED,
708                                     ring->channel.userdata,
709                                     reopen);
710                         }
711                 }
712         }
713
714         for (;;) {
715                 __hal_channel_dtr_try_complete(&ring->channel, &rxdh);
716                 if (rxdh == NULL)
717                         break;
718
719                 __hal_channel_dtr_complete(&ring->channel);
720                 if (ring->rxd_term) {
721                         ring->rxd_term(ring->channel.vph, rxdh,
722                             VXGE_HAL_RING_ULD_PRIV(ring, rxdh),
723                             VXGE_HAL_RXD_STATE_POSTED,
724                             ring->channel.userdata,
725                             reopen);
726                 }
727                 __hal_channel_dtr_free(&ring->channel,
728                     VXGE_HAL_RING_RXD_INDEX(rxdh));
729         }
730
731         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
732             __FILE__, __func__, __LINE__);
733 }
734
735 /*
736  * __hal_ring_reset - Resets the ring
737  * @ringh: Ring to be reset
738  *
739  * This function resets the ring during vpath reset operation
740  */
741 vxge_hal_status_e
742 __hal_ring_reset(
743     vxge_hal_ring_h ringh)
744 {
745         __hal_ring_t *ring = (__hal_ring_t *) ringh;
746         __hal_device_t *hldev;
747         vxge_hal_status_e status;
748         __hal_vpath_handle_t *vph = (__hal_vpath_handle_t *) ring->channel.vph;
749
750         vxge_assert(ringh != NULL);
751
752         hldev = (__hal_device_t *) ring->channel.devh;
753
754         vxge_hal_trace_log_ring("==> %s:%s:%d",
755             __FILE__, __func__, __LINE__);
756
757         vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT,
758             (ptr_t) ringh);
759
760         __hal_ring_abort(ringh, VXGE_HAL_RESET_ONLY);
761
762         status = __hal_channel_reset(&ring->channel);
763
764         if (status != VXGE_HAL_OK) {
765
766                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
767                     __FILE__, __func__, __LINE__, status);
768                 return (status);
769
770         }
771         ring->rxd_mem_avail = vph->vpath->rxd_mem_size;
772         ring->db_byte_count = 0;
773
774
775         if (ring->rxd_init) {
776                 if ((status = __hal_ring_initial_replenish(
777                     ring,
778                     VXGE_HAL_RESET_ONLY))
779                     != VXGE_HAL_OK) {
780                         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
781                             __FILE__, __func__, __LINE__, status);
782                         return (status);
783                 }
784         }
785
786         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
787             __FILE__, __func__, __LINE__);
788
789         return (VXGE_HAL_OK);
790 }
791
792 /*
793  * __hal_ring_delete - Removes the ring
794  * @vpath_handle: Virtual path handle to which this queue belongs
795  *
796  * This function freeup the memory pool and removes the ring
797  */
798 void
799 __hal_ring_delete(
800     vxge_hal_vpath_h vpath_handle)
801 {
802         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
803         __hal_device_t *hldev;
804         __hal_ring_t *ring;
805
806         vxge_assert(vpath_handle != NULL);
807
808         hldev = (__hal_device_t *) vp->vpath->hldev;
809
810         vxge_hal_trace_log_ring("==> %s:%s:%d",
811             __FILE__, __func__, __LINE__);
812
813         vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
814             (ptr_t) vpath_handle);
815
816         ring = (__hal_ring_t *) vp->vpath->ringh;
817
818         vxge_assert(ring != NULL);
819
820         vxge_assert(ring->channel.pdev);
821
822         __hal_ring_abort(vp->vpath->ringh, VXGE_HAL_OPEN_NORMAL);
823
824
825         if (ring->mempool) {
826                 vxge_hal_mempool_destroy(ring->mempool);
827         }
828
829         vxge_hal_channel_terminate(&ring->channel);
830
831 #if defined(VXGE_HAL_RX_MULTI_POST)
832         vxge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
833 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
834         vxge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
835 #endif
836
837         vxge_hal_channel_free(&ring->channel);
838
839         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
840             __FILE__, __func__, __LINE__);
841
842 }
843
844 /*
845  * __hal_ring_frame_length_set  - Set the maximum frame length of recv frames.
846  * @vpath: virtual Path
847  * @new_frmlen: New frame length
848  *
849  *
850  * Returns: VXGE_HAL_OK - success.
851  * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
852  *
853  */
854 vxge_hal_status_e
855 __hal_ring_frame_length_set(
856     __hal_virtualpath_t *vpath,
857     u32 new_frmlen)
858 {
859         u64 val64;
860         __hal_device_t *hldev;
861
862         vxge_assert(vpath != NULL);
863
864         hldev = (__hal_device_t *) vpath->hldev;
865
866         vxge_hal_trace_log_ring("==> %s:%s:%d",
867             __FILE__, __func__, __LINE__);
868
869         vxge_hal_trace_log_ring(
870             "vpath = 0x"VXGE_OS_STXFMT", new_frmlen = %d",
871             (ptr_t) vpath, new_frmlen);
872
873         if (vpath->vp_open == VXGE_HAL_VP_NOT_OPEN) {
874
875                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
876                     __FILE__, __func__, __LINE__,
877                     VXGE_HAL_ERR_VPATH_NOT_OPEN);
878                 return (VXGE_HAL_ERR_VPATH_NOT_OPEN);
879
880         }
881
882         val64 = vxge_os_pio_mem_read64(
883             vpath->hldev->header.pdev,
884             vpath->hldev->header.regh0,
885             &vpath->vp_reg->rxmac_vcfg0);
886
887         val64 &= ~VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
888
889         if (vpath->vp_config->ring.max_frm_len !=
890             VXGE_HAL_MAX_RING_FRM_LEN_USE_MTU) {
891
892                 val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
893                     vpath->vp_config->ring.max_frm_len +
894                     VXGE_HAL_MAC_HEADER_MAX_SIZE);
895
896         } else {
897
898                 val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_frmlen +
899                     VXGE_HAL_MAC_HEADER_MAX_SIZE);
900         }
901
902         vxge_os_pio_mem_write64(
903             vpath->hldev->header.pdev,
904             vpath->hldev->header.regh0,
905             val64,
906             &vpath->vp_reg->rxmac_vcfg0);
907
908         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
909             __FILE__, __func__, __LINE__);
910
911         return (VXGE_HAL_OK);
912 }
913
914 /*
915  * vxge_hal_ring_rxd_reserve - Reserve ring descriptor.
916  * @vpath_handle: virtual Path handle.
917  * @rxdh: Reserved descriptor. On success HAL fills this "out" parameter
918  *              with a valid handle.
919  * @rxd_priv: Buffer to return pointer to per rxd private space
920  *
921  * Reserve Rx descriptor for the subsequent filling-in (by upper layer
922  * driver (ULD)) and posting on the corresponding channel (@channelh)
923  * via vxge_hal_ring_rxd_post().
924  *
925  * Returns: VXGE_HAL_OK - success.
926  * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
927  *
928  */
929 vxge_hal_status_e
930 vxge_hal_ring_rxd_reserve(
931     vxge_hal_vpath_h vpath_handle,
932     vxge_hal_rxd_h * rxdh,
933     void **rxd_priv)
934 {
935         vxge_hal_status_e status;
936 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
937         unsigned long flags;
938 #endif
939         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
940         __hal_device_t *hldev;
941         __hal_ring_t *ring;
942
943         vxge_assert((vpath_handle != NULL) && (rxdh != NULL) &&
944             (rxd_priv != NULL));
945
946         hldev = (__hal_device_t *) vp->vpath->hldev;
947
948         vxge_hal_trace_log_ring("==> %s:%s:%d",
949             __FILE__, __func__, __LINE__);
950
951         vxge_hal_trace_log_ring(
952             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", "
953             "rxd_priv = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle,
954             (ptr_t) rxdh, (ptr_t) rxd_priv);
955
956         ring = (__hal_ring_t *) vp->vpath->ringh;
957
958         vxge_assert(ring != NULL);
959
960 #if defined(VXGE_HAL_RX_MULTI_POST)
961         vxge_os_spin_lock(&ring->channel.post_lock);
962 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
963         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
964 #endif
965
966         status = __hal_channel_dtr_reserve(&ring->channel, rxdh);
967
968 #if defined(VXGE_HAL_RX_MULTI_POST)
969         vxge_os_spin_unlock(&ring->channel.post_lock);
970 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
971         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
972 #endif
973
974         if (status == VXGE_HAL_OK) {
975                 vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *)*rxdh;
976
977                 /* instead of memset: reset     this RxD */
978                 rxdp->control_0 = rxdp->control_1 = 0;
979
980                 *rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp);
981
982 #if defined(VXGE_OS_MEMORY_CHECK)
983                 VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->allocated = 1;
984 #endif
985         }
986
987         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
988             __FILE__, __func__, __LINE__);
989         return (status);
990 }
991
992 /*
993  * vxge_hal_ring_rxd_pre_post - Prepare rxd and post
994  * @vpath_handle: virtual Path handle.
995  * @rxdh: Descriptor handle.
996  *
997  * This routine prepares a rxd and posts
998  */
999 void
1000 vxge_hal_ring_rxd_pre_post(
1001     vxge_hal_vpath_h vpath_handle,
1002     vxge_hal_rxd_h rxdh)
1003 {
1004
1005 #if defined(VXGE_DEBUG_ASSERT)
1006         vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1007
1008 #endif
1009
1010 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1011         unsigned long flags;
1012
1013 #endif
1014         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1015         __hal_device_t *hldev;
1016         __hal_ring_t *ring;
1017
1018         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1019
1020         hldev = (__hal_device_t *) vp->vpath->hldev;
1021
1022         vxge_hal_trace_log_ring("==> %s:%s:%d",
1023             __FILE__, __func__, __LINE__);
1024
1025         vxge_hal_trace_log_ring(
1026             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1027             (ptr_t) vpath_handle, (ptr_t) rxdh);
1028
1029         ring = (__hal_ring_t *) vp->vpath->ringh;
1030
1031         vxge_assert(ring != NULL);
1032
1033 #if defined(VXGE_DEBUG_ASSERT)
1034         /* make sure device overwrites the (illegal) t_code on completion */
1035         rxdp->control_0 |=
1036             VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED);
1037 #endif
1038
1039 #if defined(VXGE_HAL_RX_MULTI_POST)
1040         vxge_os_spin_lock(&ring->channel.post_lock);
1041 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1042         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1043 #endif
1044
1045 #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER)
1046         if (TRUE) {
1047                 if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) {
1048                         vxge_hal_rxd_h prev_rxdh;
1049                         __hal_ring_rxd_priv_t *rxdp_priv;
1050                         u32 index;
1051
1052                         rxdp_priv = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1053
1054                         if (VXGE_HAL_RING_RXD_INDEX(rxdp) == 0)
1055                                 index = ring->channel.length;
1056                         else
1057                                 index = VXGE_HAL_RING_RXD_INDEX(rxdp) - 1;
1058
1059                         prev_rxdh = ring->channel.dtr_arr[index].dtr;
1060
1061                         if (prev_rxdh != NULL &&
1062                             (rxdp_priv->dma_offset & (~0xFFF)) !=
1063                             rxdp_priv->dma_offset) {
1064                                 vxge_assert((char *) prev_rxdh +
1065                                     ring->rxd_size == rxdh);
1066                         }
1067                 }
1068         }
1069 #endif
1070
1071         __hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1072
1073         ring->db_byte_count +=
1074             VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->db_bytes;
1075
1076 #if defined(VXGE_HAL_RX_MULTI_POST)
1077         vxge_os_spin_unlock(&ring->channel.post_lock);
1078 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1079         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1080 #endif
1081
1082         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1083             __FILE__, __func__, __LINE__);
1084 }
1085
1086 /*
1087  * vxge_hal_ring_rxd_post_post - Process rxd after post.
1088  * @vpath_handle: virtual Path handle.
1089  * @rxdh: Descriptor handle.
1090  *
1091  * Processes rxd after post
1092  */
1093 void
1094 vxge_hal_ring_rxd_post_post(
1095     vxge_hal_vpath_h vpath_handle,
1096     vxge_hal_rxd_h rxdh)
1097 {
1098         vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1099
1100 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1101         __hal_ring_rxd_priv_t *priv;
1102
1103 #endif
1104         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1105         __hal_device_t *hldev;
1106         __hal_ring_t *ring;
1107
1108         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1109
1110         hldev = (__hal_device_t *) vp->vpath->hldev;
1111
1112         vxge_hal_trace_log_ring("==> %s:%s:%d",
1113             __FILE__, __func__, __LINE__);
1114
1115         vxge_hal_trace_log_ring(
1116             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1117             (ptr_t) vpath_handle, (ptr_t) rxdh);
1118
1119         ring = (__hal_ring_t *) vp->vpath->ringh;
1120
1121         vxge_assert(ring != NULL);
1122
1123         /* do POST */
1124         rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1125
1126         rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER;
1127
1128 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1129         priv = __hal_ring_rxd_priv(ring, rxdp);
1130         vxge_os_dma_sync(ring->channel.pdev,
1131             priv->dma_handle,
1132             priv->dma_addr,
1133             priv->dma_offset,
1134             ring->rxd_size,
1135             VXGE_OS_DMA_DIR_TODEVICE);
1136 #endif
1137         if (ring->stats->common_stats.usage_cnt > 0)
1138                 ring->stats->common_stats.usage_cnt--;
1139
1140         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1141             __FILE__, __func__, __LINE__);
1142 }
1143
1144 /*
1145  * vxge_hal_ring_rxd_post - Post descriptor on the ring.
1146  * @vpath_handle: virtual Path handle.
1147  * @rxdh: Descriptor obtained via vxge_hal_ring_rxd_reserve().
1148  *
1149  * Post descriptor on the ring.
1150  * Prior to posting the descriptor should be filled in accordance with
1151  * Host/X3100 interface specification for a given service (LL, etc.).
1152  *
1153  */
1154 void
1155 vxge_hal_ring_rxd_post(
1156     vxge_hal_vpath_h vpath_handle,
1157     vxge_hal_rxd_h rxdh)
1158 {
1159         vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1160
1161 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1162         unsigned long flags;
1163 #endif
1164
1165         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1166         __hal_device_t *hldev;
1167         __hal_ring_t *ring;
1168
1169         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1170
1171         hldev = (__hal_device_t *) vp->vpath->hldev;
1172
1173         vxge_hal_trace_log_ring("==> %s:%s:%d",
1174             __FILE__, __func__, __LINE__);
1175
1176         vxge_hal_trace_log_ring(
1177             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1178             (ptr_t) vpath_handle, (ptr_t) rxdh);
1179
1180         ring = (__hal_ring_t *) vp->vpath->ringh;
1181
1182         vxge_assert(ring != NULL);
1183
1184         /* Based on Titan HW bugzilla # 3039, we need to reset the tcode */
1185         rxdp->control_0 = 0;
1186
1187 #if defined(VXGE_DEBUG_ASSERT)
1188         /* make sure device overwrites the (illegal) t_code on completion */
1189         rxdp->control_0 |=
1190             VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED);
1191 #endif
1192
1193         rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER;
1194         rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1195
1196 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1197         {
1198                 __hal_ring_rxd_priv_t *rxdp_temp1;
1199                 rxdp_temp1 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1200                 vxge_os_dma_sync(ring->channel.pdev,
1201                     rxdp_temp1->dma_handle,
1202                     rxdp_temp1->dma_addr,
1203                     rxdp_temp1->dma_offset,
1204                     ring->rxd_size,
1205                     VXGE_OS_DMA_DIR_TODEVICE);
1206         }
1207 #endif
1208
1209 #if defined(VXGE_HAL_RX_MULTI_POST)
1210         vxge_os_spin_lock(&ring->channel.post_lock);
1211 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1212         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1213 #endif
1214
1215 #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER)
1216         if (TRUE) {
1217                 if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) {
1218
1219                         vxge_hal_rxd_h prev_rxdh;
1220                         __hal_ring_rxd_priv_t *rxdp_temp2;
1221
1222                         rxdp_temp2 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1223                         prev_rxdh =
1224                             ring->channel.dtr_arr[VXGE_HAL_RING_RXD_INDEX(rxdp) - 1].dtr;
1225
1226                         if (prev_rxdh != NULL &&
1227                             (rxdp_temp2->dma_offset & (~0xFFF)) != rxdp_temp2->dma_offset)
1228                                 vxge_assert((char *) prev_rxdh + ring->rxd_size == rxdh);
1229                 }
1230         }
1231 #endif
1232
1233         __hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1234
1235         ring->db_byte_count +=
1236             VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->db_bytes;
1237
1238 #if defined(VXGE_HAL_RX_MULTI_POST)
1239         vxge_os_spin_unlock(&ring->channel.post_lock);
1240 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1241         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1242 #endif
1243
1244         if (ring->stats->common_stats.usage_cnt > 0)
1245                 ring->stats->common_stats.usage_cnt--;
1246
1247         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1248             __FILE__, __func__, __LINE__);
1249 }
1250
1251 /*
1252  * vxge_hal_ring_rxd_post_post_wmb - Process rxd after post with memory barrier
1253  * @vpath_handle: virtual Path handle.
1254  * @rxdh: Descriptor handle.
1255  *
1256  * Processes rxd after post with memory barrier.
1257  */
1258 void
1259 vxge_hal_ring_rxd_post_post_wmb(
1260     vxge_hal_vpath_h vpath_handle,
1261     vxge_hal_rxd_h rxdh)
1262 {
1263         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1264         __hal_device_t *hldev;
1265
1266         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1267
1268         hldev = (__hal_device_t *) vp->vpath->hldev;
1269
1270         vxge_hal_trace_log_ring("==> %s:%s:%d",
1271             __FILE__, __func__, __LINE__);
1272
1273         vxge_hal_trace_log_ring(
1274             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1275             (ptr_t) vpath_handle, (ptr_t) rxdh);
1276
1277         /* Do memory barrier before changing the ownership */
1278         vxge_os_wmb();
1279
1280         vxge_hal_ring_rxd_post_post(vpath_handle, rxdh);
1281
1282         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1283             __FILE__, __func__, __LINE__);
1284 }
1285
1286 /*
1287  * vxge_hal_ring_rxd_post_post_db - Post Doorbell after posting the rxd(s).
1288  * @vpath_handle: virtual Path handle.
1289  *
1290  * Post Doorbell after posting the rxd(s).
1291  */
1292 void
1293 vxge_hal_ring_rxd_post_post_db(
1294     vxge_hal_vpath_h vpath_handle)
1295 {
1296         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1297         __hal_device_t *hldev;
1298         __hal_ring_t *ring;
1299
1300         vxge_assert(vpath_handle != NULL);
1301
1302         hldev = (__hal_device_t *) vp->vpath->hldev;
1303
1304         ring = (__hal_ring_t *) vp->vpath->ringh;
1305
1306         vxge_hal_trace_log_ring("==> %s:%s:%d",
1307             __FILE__, __func__, __LINE__);
1308
1309         vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
1310             (ptr_t) vpath_handle);
1311
1312 #if defined(VXGE_HAL_RX_MULTI_POST)
1313         vxge_os_spin_lock(&ring->channel.post_lock);
1314 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1315         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1316 #endif
1317
1318         if (ring->db_byte_count <= ring->rxd_mem_avail) {
1319                 __hal_rxd_db_post(vpath_handle, ring->db_byte_count);
1320                 ring->rxd_mem_avail -= ring->db_byte_count;
1321                 ring->db_byte_count = 0;
1322         } else {
1323                 __hal_rxd_db_post(vpath_handle, ring->rxd_mem_avail);
1324                 ring->db_byte_count -= ring->rxd_mem_avail;
1325                 ring->rxd_mem_avail = 0;
1326         }
1327
1328 #if defined(VXGE_HAL_RX_MULTI_POST)
1329         vxge_os_spin_unlock(&ring->channel.post_lock);
1330 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1331         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1332 #endif
1333
1334         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1335             __FILE__, __func__, __LINE__);
1336 }
1337
1338 /*
1339  * vxge_hal_ring_is_next_rxd_completed - Check if the next rxd is completed
1340  * @vpath_handle: Virtual Path handle.
1341  *
1342  * Checks if the _next_ completed descriptor is in host memory
1343  *
1344  * Returns: VXGE_HAL_OK - success.
1345  * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed    descriptors
1346  * are currently available for processing.
1347  */
1348 vxge_hal_status_e
1349 vxge_hal_ring_is_next_rxd_completed(
1350     vxge_hal_vpath_h vpath_handle)
1351 {
1352         __hal_ring_t *ring;
1353         vxge_hal_rxd_h rxdh;
1354         vxge_hal_ring_rxd_1_t *rxdp;    /* doesn't matter 1, 3 or 5... */
1355         __hal_device_t *hldev;
1356         vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1357         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1358
1359         vxge_assert(vpath_handle != NULL);
1360
1361         hldev = (__hal_device_t *) vp->vpath->hldev;
1362
1363         vxge_hal_trace_log_ring("==> %s:%s:%d",
1364             __FILE__, __func__, __LINE__);
1365
1366         vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
1367             (ptr_t) vpath_handle);
1368
1369         ring = (__hal_ring_t *) vp->vpath->ringh;
1370
1371         vxge_assert(ring != NULL);
1372
1373 #if defined(VXGE_HAL_RX_MULTI_POST)
1374         vxge_os_spin_lock(&ring->channel.post_lock);
1375 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1376         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1377 #endif
1378
1379         __hal_channel_dtr_try_complete(&ring->channel, &rxdh);
1380
1381         rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1382
1383         if (rxdp != NULL) {
1384
1385                 /* check whether it is not the end */
1386                 if ((!(rxdp->control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER)) &&
1387                     (!(rxdp->control_1 &
1388                     VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER))) {
1389
1390                         status = VXGE_HAL_OK;
1391                 }
1392         }
1393
1394 #if defined(VXGE_HAL_RX_MULTI_POST)
1395         vxge_os_spin_unlock(&ring->channel.post_lock);
1396 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1397         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1398 #endif
1399
1400         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1401             __FILE__, __func__, __LINE__, status);
1402         return (status);
1403 }
1404
1405 /*
1406  * vxge_hal_ring_rxd_next_completed - Get the _next_ completed descriptor.
1407  * @channelh: Channel handle.
1408  * @rxdh: Descriptor handle. Returned by HAL.
1409  * @rxd_priv: Buffer to return a pointer to the per rxd space allocated
1410  * @t_code:     Transfer code, as per X3100 User Guide,
1411  *                      Receive Descriptor Format. Returned     by HAL.
1412  *
1413  * Retrieve the _next_ completed descriptor.
1414  * HAL uses ring callback (*vxge_hal_ring_callback_f) to notifiy
1415  * upper-layer driver (ULD) of new completed descriptors. After that
1416  * the ULD can use vxge_hal_ring_rxd_next_completed to retrieve the rest
1417  * completions (the very first completion is passed by HAL via
1418  * vxge_hal_ring_callback_f).
1419  *
1420  * Implementation-wise, the upper-layer driver is free to call
1421  * vxge_hal_ring_rxd_next_completed either immediately from inside the
1422  * ring callback, or in a deferred fashion and separate (from HAL)
1423  * context.
1424  *
1425  * Non-zero @t_code means failure to fill-in receive buffer(s)
1426  * of the descriptor.
1427  * For instance, parity error detected during the data transfer.
1428  * In this case X3100 will      complete the descriptor and     indicate
1429  * for the host that the received data is not to be     used.
1430  * For details please refer     to X3100 User Guide.
1431  *
1432  * Returns: VXGE_HAL_OK - success.
1433  * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed    descriptors
1434  * are currently available for processing.
1435  *
1436  * See also: vxge_hal_ring_callback_f {},
1437  * vxge_hal_fifo_rxd_next_completed(), vxge_hal_status_e {}.
1438  */
1439 vxge_hal_status_e
1440 vxge_hal_ring_rxd_next_completed(
1441     vxge_hal_vpath_h vpath_handle,
1442     vxge_hal_rxd_h *rxdh,
1443     void **rxd_priv,
1444     u8 *t_code)
1445 {
1446         __hal_ring_t *ring;
1447         vxge_hal_ring_rxd_5_t *rxdp;    /* doesn't matter 1, 3 or 5... */
1448 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1449         __hal_ring_rxd_priv_t *priv;
1450 #endif
1451         __hal_device_t *hldev;
1452         vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1453         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1454         u64 own, control_0, control_1;
1455
1456         vxge_assert((vpath_handle != NULL) && (rxdh != NULL) &&
1457             (rxd_priv != NULL) && (t_code != NULL));
1458
1459         hldev = (__hal_device_t *) vp->vpath->hldev;
1460
1461         vxge_hal_trace_log_ring("==> %s:%s:%d",
1462             __FILE__, __func__, __LINE__);
1463
1464         vxge_hal_trace_log_ring(
1465             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", "
1466             "rxd_priv = 0x"VXGE_OS_STXFMT", t_code = 0x"VXGE_OS_STXFMT,
1467             (ptr_t) vpath_handle, (ptr_t) rxdh, (ptr_t) rxd_priv,
1468             (ptr_t) t_code);
1469
1470         ring = (__hal_ring_t *) vp->vpath->ringh;
1471
1472         vxge_assert(ring != NULL);
1473
1474         *rxdh = 0;
1475         *rxd_priv = NULL;
1476
1477 #if defined(VXGE_HAL_RX_MULTI_POST)
1478         vxge_os_spin_lock(&ring->channel.post_lock);
1479 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1480         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1481 #endif
1482
1483         __hal_channel_dtr_try_complete(&ring->channel, rxdh);
1484
1485         rxdp = (vxge_hal_ring_rxd_5_t *)*rxdh;
1486         if (rxdp != NULL) {
1487
1488 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1489                 /*
1490                  * Note: 24 bytes at most means:
1491                  *      - Control_3 in case of 5-buffer mode
1492                  *      - Control_1 and Control_2
1493                  *
1494                  * This is the only length needs to be invalidated
1495                  * type of channels.
1496                  */
1497                 priv = __hal_ring_rxd_priv(ring, rxdp);
1498                 vxge_os_dma_sync(ring->channel.pdev,
1499                     priv->dma_handle,
1500                     priv->dma_addr,
1501                     priv->dma_offset,
1502                     24,
1503                     VXGE_OS_DMA_DIR_FROMDEVICE);
1504 #endif
1505                 *t_code = (u8) VXGE_HAL_RING_RXD_T_CODE_GET(rxdp->control_0);
1506
1507                 control_0 = rxdp->control_0;
1508                 control_1 = rxdp->control_1;
1509                 own = control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1510
1511                 /* check whether it is not the end */
1512                 if ((!own && !(control_1 & VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER)) ||
1513                     (*t_code == VXGE_HAL_RING_RXD_T_CODE_FRM_DROP)) {
1514
1515 #ifndef VXGE_HAL_IRQ_POLLING
1516                         if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
1517                                 /*
1518                                  * reset it. since we don't want to return
1519                                  * garbage to the ULD
1520                                  */
1521                                 *rxdh = 0;
1522                                 status = VXGE_HAL_COMPLETIONS_REMAIN;
1523                         } else {
1524 #endif
1525                                 __hal_channel_dtr_complete(&ring->channel);
1526
1527                                 *rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp);
1528
1529                                 ring->rxd_mem_avail +=
1530                                     (VXGE_HAL_RING_HAL_PRIV(ring, rxdp))->db_bytes;
1531
1532                                 ring->stats->common_stats.usage_cnt++;
1533                                 if (ring->stats->common_stats.usage_max <
1534                                     ring->stats->common_stats.usage_cnt)
1535                                         ring->stats->common_stats.usage_max =
1536                                             ring->stats->common_stats.usage_cnt;
1537
1538                                 switch (ring->buffer_mode) {
1539                                 case VXGE_HAL_RING_RXD_BUFFER_MODE_1:
1540                                         ring->channel.poll_bytes +=
1541                                             (u32) VXGE_HAL_RING_RXD_1_BUFFER0_SIZE_GET(
1542                                             rxdp->control_1);
1543                                         break;
1544                                 case VXGE_HAL_RING_RXD_BUFFER_MODE_3:
1545                                         ring->channel.poll_bytes +=
1546                                             (u32) VXGE_HAL_RING_RXD_3_BUFFER0_SIZE_GET(
1547                                             rxdp->control_1) +
1548                                             (u32) VXGE_HAL_RING_RXD_3_BUFFER1_SIZE_GET(
1549                                             rxdp->control_1) +
1550                                             (u32) VXGE_HAL_RING_RXD_3_BUFFER2_SIZE_GET(
1551                                             rxdp->control_1);
1552                                         break;
1553                                 case VXGE_HAL_RING_RXD_BUFFER_MODE_5:
1554                                         ring->channel.poll_bytes +=
1555                                             (u32) VXGE_HAL_RING_RXD_5_BUFFER0_SIZE_GET(
1556                                             rxdp->control_1) +
1557                                             (u32) VXGE_HAL_RING_RXD_5_BUFFER1_SIZE_GET(
1558                                             rxdp->control_1) +
1559                                             (u32) VXGE_HAL_RING_RXD_5_BUFFER2_SIZE_GET(
1560                                             rxdp->control_1) +
1561                                             (u32) VXGE_HAL_RING_RXD_5_BUFFER3_SIZE_GET(
1562                                             rxdp->control_2) +
1563                                             (u32) VXGE_HAL_RING_RXD_5_BUFFER4_SIZE_GET(
1564                                             rxdp->control_2);
1565                                         break;
1566                                 }
1567
1568                                 status = VXGE_HAL_OK;
1569 #ifndef VXGE_HAL_IRQ_POLLING
1570                         }
1571 #endif
1572                 }
1573         }
1574
1575 #if defined(VXGE_HAL_RX_MULTI_POST)
1576         vxge_os_spin_unlock(&ring->channel.post_lock);
1577 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1578         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1579 #endif
1580
1581         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1582             __FILE__, __func__, __LINE__, status);
1583         return (status);
1584 }
1585
1586
1587 /*
1588  * vxge_hal_ring_handle_tcode - Handle transfer code.
1589  * @vpath_handle: Virtual Path handle.
1590  * @rxdh: Descriptor handle.
1591  * @t_code: One of the enumerated (and documented in the X3100 user guide)
1592  *       "transfer codes".
1593  *
1594  * Handle descriptor's transfer code. The latter comes with each completed
1595  * descriptor.
1596  *
1597  * Returns: one of the vxge_hal_status_e {} enumerated types.
1598  * VXGE_HAL_OK                  - for success.
1599  * VXGE_HAL_ERR_CRITICAL        - when encounters critical error.
1600  */
1601 vxge_hal_status_e
1602 vxge_hal_ring_handle_tcode(
1603     vxge_hal_vpath_h vpath_handle,
1604     vxge_hal_rxd_h rxdh,
1605     u8 t_code)
1606 {
1607         __hal_device_t *hldev;
1608         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1609
1610         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1611
1612         hldev = (__hal_device_t *) vp->vpath->hldev;
1613
1614         vxge_hal_trace_log_ring("==> %s:%s:%d",
1615             __FILE__, __func__, __LINE__);
1616
1617         vxge_hal_trace_log_ring(
1618             "vpath_handle = 0x"VXGE_OS_STXFMT", "
1619             "rxdh = 0x"VXGE_OS_STXFMT", t_code = 0x%d",
1620             (ptr_t) vpath_handle, (ptr_t) rxdh, t_code);
1621
1622         switch (t_code) {
1623         case 0x0:
1624                 /* 0x0: Transfer ok. */
1625                 break;
1626         case 0x1:
1627                 /*
1628                  * 0x1: Layer 3 checksum presentation
1629                  *      configuration mismatch.
1630                  */
1631                 break;
1632         case 0x2:
1633                 /*
1634                  * 0x2: Layer 4 checksum presentation
1635                  *      configuration mismatch.
1636                  */
1637                 break;
1638         case 0x3:
1639                 /*
1640                  * 0x3: Layer 3 and Layer 4 checksum
1641                  *      presentation configuration mismatch.
1642                  */
1643                 break;
1644         case 0x4:
1645                 /* 0x4: Reserved. */
1646                 break;
1647         case 0x5:
1648                 /*
1649                  * 0x5: Layer 3 error unparseable packet,
1650                  *      such as unknown IPv6 header.
1651                  */
1652                 break;
1653         case 0x6:
1654                 /*
1655                  * 0x6: Layer 2 error frame integrity
1656                  *      error, such as FCS or ECC).
1657                  */
1658                 break;
1659         case 0x7:
1660                 /*
1661                  * 0x7: Buffer size error the RxD buffer(s)
1662                  *      were not appropriately sized and
1663                  *      data loss occurred.
1664                  */
1665                 break;
1666         case 0x8:
1667                 /* 0x8: Internal ECC error RxD corrupted. */
1668                 __hal_device_handle_error(vp->vpath->hldev,
1669                     vp->vpath->vp_id, VXGE_HAL_EVENT_ECCERR);
1670                 break;
1671         case 0x9:
1672                 /*
1673                  * 0x9: Benign overflow the contents of
1674                  *      Segment1 exceeded the capacity of
1675                  *      Buffer1 and the remainder was placed
1676                  *      in Buffer2. Segment2 now starts in
1677                  *      Buffer3. No data loss or errors occurred.
1678                  */
1679                 break;
1680         case 0xA:
1681                 /*
1682                  * 0xA: Buffer size 0 one of the RxDs
1683                  *      assigned buffers has a size of 0 bytes.
1684                  */
1685                 break;
1686         case 0xB:
1687                 /* 0xB: Reserved. */
1688                 break;
1689         case 0xC:
1690                 /*
1691                  * 0xC: Frame dropped either due to
1692                  *      VPath Reset or because of a VPIN mismatch.
1693                  */
1694                 break;
1695         case 0xD:
1696                 /* 0xD: Reserved. */
1697                 break;
1698         case 0xE:
1699                 /* 0xE: Reserved. */
1700                 break;
1701         case 0xF:
1702                 /*
1703                  * 0xF: Multiple errors more than one
1704                  *      transfer code condition occurred.
1705                  */
1706                 break;
1707         default:
1708                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1709                     __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_TCODE);
1710                 return (VXGE_HAL_ERR_INVALID_TCODE);
1711         }
1712
1713         vp->vpath->sw_stats->ring_stats.rxd_t_code_err_cnt[t_code]++;
1714
1715         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1716             __FILE__, __func__, __LINE__, VXGE_HAL_OK);
1717         return (VXGE_HAL_OK);
1718 }
1719
1720
1721 /*
1722  * vxge_hal_ring_rxd_private_get - Get ULD private per-descriptor data.
1723  * @vpath_handle: Virtual Path handle.
1724  * @rxdh: Descriptor handle.
1725  *
1726  * Returns: private ULD info associated with the descriptor.
1727  * ULD requests per-descriptor space via vxge_hal_ring_attr.
1728  *
1729  */
1730 void *
1731 vxge_hal_ring_rxd_private_get(
1732     vxge_hal_vpath_h vpath_handle,
1733     vxge_hal_rxd_h rxdh)
1734 {
1735         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1736
1737         return (VXGE_HAL_RING_ULD_PRIV(
1738             ((__hal_ring_t *) vp->vpath->ringh), rxdh));
1739
1740 }
1741
1742 /*
1743  * vxge_hal_ring_rxd_free - Free descriptor.
1744  * @vpath_handle: Virtual Path handle.
1745  * @rxdh: Descriptor handle.
1746  *
1747  * Free the reserved descriptor. This operation is "symmetrical" to
1748  * vxge_hal_ring_rxd_reserve. The "free-ing" completes the descriptor's
1749  * lifecycle.
1750  *
1751  * After free-ing (see vxge_hal_ring_rxd_free()) the descriptor again can
1752  * be:
1753  *
1754  * - reserved (vxge_hal_ring_rxd_reserve);
1755  *
1756  * - posted     (vxge_hal_ring_rxd_post);
1757  *
1758  * - completed (vxge_hal_ring_rxd_next_completed);
1759  *
1760  * - and recycled again (vxge_hal_ring_rxd_free).
1761  *
1762  * For alternative state transitions and more details please refer to
1763  * the design doc.
1764  *
1765  */
1766 void
1767 vxge_hal_ring_rxd_free(
1768     vxge_hal_vpath_h vpath_handle,
1769     vxge_hal_rxd_h rxdh)
1770 {
1771 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1772         unsigned long flags;
1773
1774 #endif
1775         __hal_ring_t *ring;
1776         __hal_device_t *hldev;
1777         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1778
1779         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1780
1781         hldev = (__hal_device_t *) vp->vpath->hldev;
1782
1783         vxge_hal_trace_log_ring("==> %s:%s:%d",
1784             __FILE__, __func__, __LINE__);
1785
1786         vxge_hal_trace_log_ring(
1787             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1788             (ptr_t) vpath_handle, (ptr_t) rxdh);
1789
1790         ring = (__hal_ring_t *) vp->vpath->ringh;
1791
1792         vxge_assert(ring != NULL);
1793
1794 #if defined(VXGE_HAL_RX_MULTI_POST)
1795         vxge_os_spin_lock(&ring->channel.post_lock);
1796 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1797         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1798 #endif
1799
1800         __hal_channel_dtr_free(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1801 #if defined(VXGE_OS_MEMORY_CHECK)
1802         VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->allocated = 0;
1803 #endif
1804
1805 #if defined(VXGE_HAL_RX_MULTI_POST)
1806         vxge_os_spin_unlock(&ring->channel.post_lock);
1807 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1808         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1809 #endif
1810
1811         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1812             __FILE__, __func__, __LINE__);
1813 }