]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/dev/vxge/vxgehal/vxgehal-ring.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / dev / vxge / vxgehal / vxgehal-ring.c
1 /*-
2  * Copyright(c) 2002-2011 Exar Corp.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification are permitted provided the following conditions are met:
7  *
8  *    1. Redistributions of source code must retain the above copyright notice,
9  *       this list of conditions and the following disclaimer.
10  *
11  *    2. Redistributions in binary form must reproduce the above copyright
12  *       notice, this list of conditions and the following disclaimer in the
13  *       documentation and/or other materials provided with the distribution.
14  *
15  *    3. Neither the name of the Exar Corporation nor the names of its
16  *       contributors may be used to endorse or promote products derived from
17  *       this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32
33 #include <dev/vxge/vxgehal/vxgehal.h>
34
35 /*
36  * __hal_ring_block_memblock_idx - Return the memblock index
37  * @block: Virtual address of memory block
38  *
39  * This function returns the index of memory block
40  */
41 static inline u32
42 __hal_ring_block_memblock_idx(
43     vxge_hal_ring_block_t block)
44 {
45         return (u32)*((u64 *) ((void *)((u8 *) block +
46             VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET)));
47 }
48
49 /*
50  * __hal_ring_block_memblock_idx_set - Sets the memblock index
51  * @block: Virtual address of memory block
52  * @memblock_idx: Index of memory block
53  *
54  * This function sets index to a memory block
55  */
56 static inline void
57 __hal_ring_block_memblock_idx_set(
58     vxge_hal_ring_block_t block,
59     u32 memblock_idx)
60 {
61         *((u64 *) ((void *)((u8 *) block +
62             VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET))) = memblock_idx;
63 }
64
65 #if 0
66 /*
67  * __hal_ring_block_next_pointer - Returns the dma address of next block
68  * @block: RxD block
69  *
70  * Returns the dma address of next block stored in the RxD block
71  */
72 static inline dma_addr_t
73 /* LINTED */
74 __hal_ring_block_next_pointer(
75     vxge_hal_ring_block_t *block)
76 {
77         return (dma_addr_t)*((u64 *) ((void *)((u8 *) block +
78             VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)));
79 }
80 #endif
81
82 /*
83  * __hal_ring_block_next_pointer_set - Sets the next block pointer in RxD block
84  * @block: RxD block
85  * @dma_next: dma address of next block
86  *
87  * Sets the next block pointer in RxD block
88  */
89 static inline void
90 __hal_ring_block_next_pointer_set(
91     vxge_hal_ring_block_t *block,
92     dma_addr_t dma_next)
93 {
94         *((u64 *) ((void *)((u8 *) block +
95             VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET))) = dma_next;
96 }
97
98 /*
99  * __hal_ring_first_block_address_get - Returns the dma address of the
100  *              first block
101  * @ringh: Handle to the ring
102  *
103  * Returns the dma address of the first RxD block
104  */
105 u64
106 __hal_ring_first_block_address_get(
107     vxge_hal_ring_h ringh)
108 {
109         __hal_ring_t *ring = (__hal_ring_t *) ringh;
110         vxge_hal_mempool_dma_t *dma_object;
111
112         dma_object = __hal_mempool_memblock_dma(ring->mempool, 0);
113
114         vxge_assert(dma_object != NULL);
115
116         return (dma_object->addr);
117 }
118
119
120 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
121 /*
122  * __hal_ring_item_dma_offset - Return the dma offset of an item
123  * @mempoolh: Handle to the memory pool of the ring
124  * @item: Item for which to get the dma offset
125  *
126  * This function returns the dma offset of a given item
127  */
128 static ptrdiff_t
129 __hal_ring_item_dma_offset(
130     vxge_hal_mempool_h mempoolh,
131     void *item)
132 {
133         u32 memblock_idx;
134         void *memblock;
135         vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh;
136         __hal_device_t *hldev;
137
138         vxge_assert((mempoolh != NULL) && (item != NULL) &&
139             (dma_handle != NULL));
140
141         hldev = (__hal_device_t *) mempool->devh;
142
143         vxge_hal_trace_log_ring("==> %s:%s:%d",
144             __FILE__, __func__, __LINE__);
145
146         vxge_hal_trace_log_ring(
147             "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT,
148             (ptr_t) mempoolh, (ptr_t) item);
149
150         /* get owner memblock index */
151         memblock_idx = __hal_ring_block_memblock_idx(item);
152
153         /* get owner memblock by memblock index */
154         memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
155
156         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
157             __FILE__, __func__, __LINE__);
158
159         return ((u8 *) item - (u8 *) memblock);
160 }
161 #endif
162
163 /*
164  * __hal_ring_item_dma_addr - Return the dma address of an item
165  * @mempoolh: Handle to the memory pool of the ring
166  * @item: Item for which to get the dma offset
167  * @dma_handle: dma handle
168  *
169  * This function returns the dma address of a given item
170  */
171 static dma_addr_t
172 __hal_ring_item_dma_addr(
173     vxge_hal_mempool_h mempoolh,
174     void *item,
175     pci_dma_h *dma_handle)
176 {
177         u32 memblock_idx;
178         void *memblock;
179         vxge_hal_mempool_dma_t *memblock_dma_object;
180         vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh;
181         __hal_device_t *hldev;
182         ptrdiff_t dma_item_offset;
183
184         vxge_assert((mempoolh != NULL) && (item != NULL) &&
185             (dma_handle != NULL));
186
187         hldev = (__hal_device_t *) mempool->devh;
188
189         vxge_hal_trace_log_ring("==> %s:%s:%d",
190             __FILE__, __func__, __LINE__);
191
192         vxge_hal_trace_log_ring(
193             "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT", "
194             "dma_handle = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh,
195             (ptr_t) item, (ptr_t) dma_handle);
196
197         /* get owner memblock index */
198         memblock_idx = __hal_ring_block_memblock_idx((u8 *) item);
199
200         /* get owner memblock by memblock index */
201         memblock = __hal_mempool_memblock(
202             (vxge_hal_mempool_t *) mempoolh, memblock_idx);
203
204         /* get memblock DMA object by memblock index */
205         memblock_dma_object = __hal_mempool_memblock_dma(
206             (vxge_hal_mempool_t *) mempoolh, memblock_idx);
207
208         /* calculate offset in the memblock of this item */
209         /* LINTED */
210         dma_item_offset = (u8 *) item - (u8 *) memblock;
211
212         *dma_handle = memblock_dma_object->handle;
213
214         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
215             __FILE__, __func__, __LINE__);
216
217         return (memblock_dma_object->addr + dma_item_offset);
218 }
219
220 /*
221  * __hal_ring_rxdblock_link - Link the RxD blocks
222  * @mempoolh: Handle to the memory pool of the ring
223  * @ring: ring
224  * @from: RxD block from which to link
225  * @to: RxD block to which to link to
226  *
227  * This function returns the dma address of a given item
228  */
229 static void
230 __hal_ring_rxdblock_link(
231     vxge_hal_mempool_h mempoolh,
232     __hal_ring_t *ring,
233     u32 from,
234     u32 to)
235 {
236         vxge_hal_ring_block_t *to_item, *from_item;
237         dma_addr_t to_dma, from_dma;
238         pci_dma_h to_dma_handle, from_dma_handle;
239         __hal_device_t *hldev;
240
241         vxge_assert((mempoolh != NULL) && (ring != NULL));
242
243         hldev = (__hal_device_t *) ring->channel.devh;
244
245         vxge_hal_trace_log_ring("==> %s:%s:%d",
246             __FILE__, __func__, __LINE__);
247
248         vxge_hal_trace_log_ring(
249             "mempoolh = 0x"VXGE_OS_STXFMT", ring = 0x"VXGE_OS_STXFMT", "
250             "from = %d, to = %d", (ptr_t) mempoolh, (ptr_t) ring, from, to);
251
252         /* get "from" RxD block */
253         from_item = (vxge_hal_ring_block_t *) __hal_mempool_item(
254             (vxge_hal_mempool_t *) mempoolh, from);
255         vxge_assert(from_item);
256
257         /* get "to" RxD block */
258         to_item = (vxge_hal_ring_block_t *) __hal_mempool_item(
259             (vxge_hal_mempool_t *) mempoolh, to);
260         vxge_assert(to_item);
261
262         /* return address of the beginning of previous RxD block */
263         to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
264
265         /*
266          * set next pointer for this RxD block to point on
267          * previous item's DMA start address
268          */
269         __hal_ring_block_next_pointer_set(from_item, to_dma);
270
271         /* return "from" RxD block's DMA start address */
272         from_dma = __hal_ring_item_dma_addr(
273             mempoolh, from_item, &from_dma_handle);
274
275 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
276         /* we must sync "from" RxD block, so hardware will see it */
277         vxge_os_dma_sync(ring->channel.pdev,
278             from_dma_handle,
279             from_dma + VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
280             __hal_ring_item_dma_offset(mempoolh, from_item) +
281             VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
282             sizeof(u64),
283             VXGE_OS_DMA_DIR_TODEVICE);
284 #endif
285
286         vxge_hal_info_log_ring(
287             "block%d:0x"VXGE_OS_STXFMT" => block%d:0x"VXGE_OS_STXFMT,
288             from, (ptr_t) from_dma, to, (ptr_t) to_dma);
289
290         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
291             __FILE__, __func__, __LINE__);
292
293 }
294
295 /*
296  * __hal_ring_mempool_item_alloc - Allocate List blocks for RxD block callback
297  * @mempoolh: Handle to memory pool
298  * @memblock: Address of this memory block
299  * @memblock_index: Index of this memory block
300  * @dma_object: dma object for this block
301  * @item: Pointer to this item
302  * @index: Index of this item in memory block
303  * @is_last: If this is last item in the block
304  * @userdata: Specific data of user
305  *
306  * This function is callback passed to __hal_mempool_create to create memory
307  * pool for RxD block
308  */
309 static vxge_hal_status_e
310 __hal_ring_mempool_item_alloc(
311     vxge_hal_mempool_h mempoolh,
312     void *memblock,
313     u32 memblock_index,
314     vxge_hal_mempool_dma_t *dma_object,
315     void *item,
316     u32 item_index,
317     u32 is_last,
318     void *userdata)
319 {
320         u32 i;
321         __hal_ring_t *ring = (__hal_ring_t *) userdata;
322         __hal_device_t *hldev;
323
324         vxge_assert((item != NULL) && (ring != NULL));
325
326         hldev = (__hal_device_t *) ring->channel.devh;
327
328         vxge_hal_trace_log_pool("==> %s:%s:%d",
329             __FILE__, __func__, __LINE__);
330
331         vxge_hal_trace_log_pool(
332             "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", "
333             "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", "
334             "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, "
335             "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock,
336             memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
337             (ptr_t) userdata);
338
339         /* format rxds array */
340         for (i = 0; i < ring->rxds_per_block; i++) {
341
342                 void *uld_priv;
343                 void *rxdblock_priv;
344                 __hal_ring_rxd_priv_t *rxd_priv;
345                 vxge_hal_ring_rxd_1_t *rxdp;
346                 u32 memblock_item_idx;
347                 u32 dtr_index = item_index * ring->rxds_per_block + i;
348
349                 ring->channel.dtr_arr[dtr_index].dtr =
350                     ((u8 *) item) + i * ring->rxd_size;
351
352                 /*
353                  * Note: memblock_item_idx is index of the item within
354                  * the memblock. For instance, in case of three RxD-blocks
355                  * per memblock this value can be 0, 1 or 2.
356                  */
357                 rxdblock_priv = __hal_mempool_item_priv(
358                     (vxge_hal_mempool_t *) mempoolh,
359                     memblock_index,
360                     item,
361                     &memblock_item_idx);
362
363                 rxdp = (vxge_hal_ring_rxd_1_t *)
364                     ring->channel.dtr_arr[dtr_index].dtr;
365
366                 uld_priv = ((u8 *) rxdblock_priv + ring->rxd_priv_size * i);
367                 rxd_priv =
368                     (__hal_ring_rxd_priv_t *) ((void *)(((char *) uld_priv) +
369                     ring->per_rxd_space));
370
371                 ((vxge_hal_ring_rxd_5_t *) rxdp)->host_control = dtr_index;
372
373                 ring->channel.dtr_arr[dtr_index].uld_priv = (void *)uld_priv;
374                 ring->channel.dtr_arr[dtr_index].hal_priv = (void *)rxd_priv;
375
376                 /* pre-format per-RxD Ring's private */
377                 /* LINTED */
378                 rxd_priv->dma_offset = (u8 *) rxdp - (u8 *) memblock;
379                 rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset;
380                 rxd_priv->dma_handle = dma_object->handle;
381 #if defined(VXGE_DEBUG_ASSERT)
382                 rxd_priv->dma_object = dma_object;
383 #endif
384                 rxd_priv->db_bytes = ring->rxd_size;
385
386                 if (i == (ring->rxds_per_block - 1)) {
387                         rxd_priv->db_bytes +=
388                             (((vxge_hal_mempool_t *) mempoolh)->memblock_size -
389                             (ring->rxds_per_block * ring->rxd_size));
390                 }
391         }
392
393         __hal_ring_block_memblock_idx_set((u8 *) item, memblock_index);
394         if (is_last) {
395                 /* link last one with first one */
396                 __hal_ring_rxdblock_link(mempoolh, ring, item_index, 0);
397         }
398
399         if (item_index > 0) {
400                 /* link this RxD block with previous one */
401                 __hal_ring_rxdblock_link(mempoolh, ring, item_index - 1, item_index);
402         }
403
404         vxge_hal_trace_log_pool("<== %s:%s:%d  Result: 0",
405             __FILE__, __func__, __LINE__);
406
407         return (VXGE_HAL_OK);
408 }
409
410 /*
411  * __hal_ring_mempool_item_free - Free RxD blockt callback
412  * @mempoolh: Handle to memory pool
413  * @memblock: Address of this memory block
414  * @memblock_index: Index of this memory block
415  * @dma_object: dma object for this block
416  * @item: Pointer to this item
417  * @index: Index of this item in memory block
418  * @is_last: If this is last item in the block
419  * @userdata: Specific data of user
420  *
421  * This function is callback passed to __hal_mempool_free to destroy memory
422  * pool for RxD block
423  */
424 static vxge_hal_status_e
425 __hal_ring_mempool_item_free(
426     vxge_hal_mempool_h mempoolh,
427     void *memblock,
428     u32 memblock_index,
429     vxge_hal_mempool_dma_t *dma_object,
430     void *item,
431     u32 item_index,
432     u32 is_last,
433     void *userdata)
434 {
435         __hal_ring_t *ring = (__hal_ring_t *) userdata;
436         __hal_device_t *hldev;
437
438         vxge_assert((item != NULL) && (ring != NULL));
439
440         hldev = (__hal_device_t *) ring->channel.devh;
441
442         vxge_hal_trace_log_pool("==> %s:%s:%d",
443             __FILE__, __func__, __LINE__);
444
445         vxge_hal_trace_log_pool(
446             "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", "
447             "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", "
448             "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, "
449             "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock,
450             memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
451             (ptr_t) userdata);
452
453         vxge_hal_trace_log_pool("<== %s:%s:%d  Result: 0",
454             __FILE__, __func__, __LINE__);
455
456         return (VXGE_HAL_OK);
457 }
458
459 /*
460  * __hal_ring_initial_replenish - Initial replenish of RxDs
461  * @ring: ring
462  * @reopen: Flag to denote if it is open or repopen
463  *
464  * This function replenishes the RxDs from reserve array to work array
465  */
466 static vxge_hal_status_e
467 __hal_ring_initial_replenish(
468     __hal_ring_t *ring,
469     vxge_hal_reopen_e reopen)
470 {
471         vxge_hal_rxd_h rxd;
472         void *uld_priv;
473         __hal_device_t *hldev;
474         vxge_hal_status_e status;
475
476         vxge_assert(ring != NULL);
477
478         hldev = (__hal_device_t *) ring->channel.devh;
479
480         vxge_hal_trace_log_ring("==> %s:%s:%d",
481             __FILE__, __func__, __LINE__);
482
483         vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d",
484             (ptr_t) ring, reopen);
485
486         while (vxge_hal_ring_rxd_reserve(ring->channel.vph, &rxd, &uld_priv) ==
487             VXGE_HAL_OK) {
488
489                 if (ring->rxd_init) {
490                         status = ring->rxd_init(ring->channel.vph,
491                             rxd,
492                             uld_priv,
493                             VXGE_HAL_RING_RXD_INDEX(rxd),
494                             ring->channel.userdata,
495                             reopen);
496                         if (status != VXGE_HAL_OK) {
497                                 vxge_hal_ring_rxd_free(ring->channel.vph, rxd);
498                                 vxge_hal_trace_log_ring("<== %s:%s:%d \
499                                     Result: %d",
500                                     __FILE__, __func__, __LINE__, status);
501                                 return (status);
502                         }
503                 }
504
505                 vxge_hal_ring_rxd_post(ring->channel.vph, rxd);
506         }
507
508         vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
509             __FILE__, __func__, __LINE__);
510         return (VXGE_HAL_OK);
511 }
512
513 /*
514  * __hal_ring_create - Create a Ring
515  * @vpath_handle: Handle returned by virtual path open
516  * @attr: Ring configuration parameters structure
517  *
518  * This function creates Ring and initializes it.
519  *
520  */
521 vxge_hal_status_e
522 __hal_ring_create(
523     vxge_hal_vpath_h vpath_handle,
524     vxge_hal_ring_attr_t *attr)
525 {
526         vxge_hal_status_e status;
527         __hal_ring_t *ring;
528         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
529         vxge_hal_ring_config_t *config;
530         __hal_device_t *hldev;
531
532         vxge_assert((vpath_handle != NULL) && (attr != NULL));
533
534         hldev = (__hal_device_t *) vp->vpath->hldev;
535
536         vxge_hal_trace_log_ring("==> %s:%s:%d",
537             __FILE__, __func__, __LINE__);
538
539         vxge_hal_trace_log_ring(
540             "vpath_handle = 0x"VXGE_OS_STXFMT", attr = 0x"VXGE_OS_STXFMT,
541             (ptr_t) vpath_handle, (ptr_t) attr);
542
543         if ((vpath_handle == NULL) || (attr == NULL)) {
544                 vxge_hal_err_log_ring("null pointer passed == > %s : %d",
545                     __func__, __LINE__);
546                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result:1",
547                     __FILE__, __func__, __LINE__);
548                 return (VXGE_HAL_FAIL);
549         }
550
551         config =
552             &vp->vpath->hldev->header.config.vp_config[vp->vpath->vp_id].ring;
553
554         config->ring_length = ((config->ring_length +
555             vxge_hal_ring_rxds_per_block_get(config->buffer_mode) - 1) /
556             vxge_hal_ring_rxds_per_block_get(config->buffer_mode)) *
557             vxge_hal_ring_rxds_per_block_get(config->buffer_mode);
558
559         ring = (__hal_ring_t *) vxge_hal_channel_allocate(
560             (vxge_hal_device_h) vp->vpath->hldev,
561             vpath_handle,
562             VXGE_HAL_CHANNEL_TYPE_RING,
563             config->ring_length,
564             attr->per_rxd_space,
565             attr->userdata);
566
567         if (ring == NULL) {
568                 vxge_hal_err_log_ring("Memory allocation failed == > %s : %d",
569                     __func__, __LINE__);
570                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
571                     __FILE__, __func__, __LINE__,
572                     VXGE_HAL_ERR_OUT_OF_MEMORY);
573                 return (VXGE_HAL_ERR_OUT_OF_MEMORY);
574         }
575
576         vp->vpath->ringh = (vxge_hal_ring_h) ring;
577
578         ring->stats = &vp->vpath->sw_stats->ring_stats;
579
580         ring->config = config;
581         ring->callback = attr->callback;
582         ring->rxd_init = attr->rxd_init;
583         ring->rxd_term = attr->rxd_term;
584
585         ring->indicate_max_pkts = config->indicate_max_pkts;
586         ring->buffer_mode = config->buffer_mode;
587
588 #if defined(VXGE_HAL_RX_MULTI_POST)
589         vxge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
590 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
591         vxge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
592 #endif
593
594         ring->rxd_size = vxge_hal_ring_rxd_size_get(config->buffer_mode);
595         ring->rxd_priv_size =
596             sizeof(__hal_ring_rxd_priv_t) + attr->per_rxd_space;
597         ring->per_rxd_space = attr->per_rxd_space;
598
599         ring->rxd_priv_size =
600             ((ring->rxd_priv_size + __vxge_os_cacheline_size - 1) /
601             __vxge_os_cacheline_size) * __vxge_os_cacheline_size;
602
603         /*
604          * how many RxDs can fit into one block. Depends on configured
605          * buffer_mode.
606          */
607         ring->rxds_per_block =
608             vxge_hal_ring_rxds_per_block_get(config->buffer_mode);
609
610         /* calculate actual RxD block private size */
611         ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
612
613         ring->rxd_mem_avail =
614             ((__hal_vpath_handle_t *) ring->channel.vph)->vpath->rxd_mem_size;
615
616         ring->db_byte_count = 0;
617
618         ring->mempool = vxge_hal_mempool_create(
619             (vxge_hal_device_h) vp->vpath->hldev,
620             VXGE_OS_HOST_PAGE_SIZE,
621             VXGE_OS_HOST_PAGE_SIZE,
622             ring->rxdblock_priv_size,
623             ring->config->ring_length / ring->rxds_per_block,
624             ring->config->ring_length / ring->rxds_per_block,
625             __hal_ring_mempool_item_alloc,
626             __hal_ring_mempool_item_free,
627             ring);
628
629         if (ring->mempool == NULL) {
630                 __hal_ring_delete(vpath_handle);
631                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
632                     __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY);
633                 return (VXGE_HAL_ERR_OUT_OF_MEMORY);
634         }
635
636         status = vxge_hal_channel_initialize(&ring->channel);
637         if (status != VXGE_HAL_OK) {
638                 __hal_ring_delete(vpath_handle);
639                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
640                     __FILE__, __func__, __LINE__, status);
641                 return (status);
642         }
643
644
645         /*
646          * Note:
647          * Specifying rxd_init callback means two things:
648          * 1) rxds need to be initialized by ULD at channel-open time;
649          * 2) rxds need to be posted at channel-open time
650          *      (that's what the initial_replenish() below does)
651          * Currently we don't have a case when the 1) is done without the 2).
652          */
653         if (ring->rxd_init) {
654                 if ((status = __hal_ring_initial_replenish(
655                     ring,
656                     VXGE_HAL_OPEN_NORMAL))
657                     != VXGE_HAL_OK) {
658                         __hal_ring_delete(vpath_handle);
659                         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
660                             __FILE__, __func__, __LINE__, status);
661                         return (status);
662                 }
663         }
664
665         /*
666          * initial replenish will increment the counter in its post() routine,
667          * we have to reset it
668          */
669         ring->stats->common_stats.usage_cnt = 0;
670
671         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
672             __FILE__, __func__, __LINE__);
673         return (VXGE_HAL_OK);
674 }
675
676 /*
677  * __hal_ring_abort - Returns the RxD
678  * @ringh: Ring to be reset
679  * @reopen: See  vxge_hal_reopen_e {}.
680  *
681  * This function terminates the RxDs of ring
682  */
683 void
684 __hal_ring_abort(
685     vxge_hal_ring_h ringh,
686     vxge_hal_reopen_e reopen)
687 {
688         u32 i = 0;
689         vxge_hal_rxd_h rxdh;
690
691         __hal_device_t *hldev;
692         __hal_ring_t *ring = (__hal_ring_t *) ringh;
693
694         vxge_assert(ringh != NULL);
695
696         hldev = (__hal_device_t *) ring->channel.devh;
697
698         vxge_hal_trace_log_ring("==> %s:%s:%d",
699             __FILE__, __func__, __LINE__);
700
701         vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d",
702             (ptr_t) ringh, reopen);
703
704         if (ring->rxd_term) {
705                 __hal_channel_for_each_dtr(&ring->channel, rxdh, i) {
706                         if (!__hal_channel_is_posted_dtr(&ring->channel, i)) {
707                                 ring->rxd_term(ring->channel.vph, rxdh,
708                                     VXGE_HAL_RING_ULD_PRIV(ring, rxdh),
709                                     VXGE_HAL_RXD_STATE_FREED,
710                                     ring->channel.userdata,
711                                     reopen);
712                         }
713                 }
714         }
715
716         for (;;) {
717                 __hal_channel_dtr_try_complete(&ring->channel, &rxdh);
718                 if (rxdh == NULL)
719                         break;
720
721                 __hal_channel_dtr_complete(&ring->channel);
722                 if (ring->rxd_term) {
723                         ring->rxd_term(ring->channel.vph, rxdh,
724                             VXGE_HAL_RING_ULD_PRIV(ring, rxdh),
725                             VXGE_HAL_RXD_STATE_POSTED,
726                             ring->channel.userdata,
727                             reopen);
728                 }
729                 __hal_channel_dtr_free(&ring->channel,
730                     VXGE_HAL_RING_RXD_INDEX(rxdh));
731         }
732
733         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
734             __FILE__, __func__, __LINE__);
735 }
736
737 /*
738  * __hal_ring_reset - Resets the ring
739  * @ringh: Ring to be reset
740  *
741  * This function resets the ring during vpath reset operation
742  */
743 vxge_hal_status_e
744 __hal_ring_reset(
745     vxge_hal_ring_h ringh)
746 {
747         __hal_ring_t *ring = (__hal_ring_t *) ringh;
748         __hal_device_t *hldev;
749         vxge_hal_status_e status;
750         __hal_vpath_handle_t *vph = (__hal_vpath_handle_t *) ring->channel.vph;
751
752         vxge_assert(ringh != NULL);
753
754         hldev = (__hal_device_t *) ring->channel.devh;
755
756         vxge_hal_trace_log_ring("==> %s:%s:%d",
757             __FILE__, __func__, __LINE__);
758
759         vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT,
760             (ptr_t) ringh);
761
762         __hal_ring_abort(ringh, VXGE_HAL_RESET_ONLY);
763
764         status = __hal_channel_reset(&ring->channel);
765
766         if (status != VXGE_HAL_OK) {
767
768                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
769                     __FILE__, __func__, __LINE__, status);
770                 return (status);
771
772         }
773         ring->rxd_mem_avail = vph->vpath->rxd_mem_size;
774         ring->db_byte_count = 0;
775
776
777         if (ring->rxd_init) {
778                 if ((status = __hal_ring_initial_replenish(
779                     ring,
780                     VXGE_HAL_RESET_ONLY))
781                     != VXGE_HAL_OK) {
782                         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
783                             __FILE__, __func__, __LINE__, status);
784                         return (status);
785                 }
786         }
787
788         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
789             __FILE__, __func__, __LINE__);
790
791         return (VXGE_HAL_OK);
792 }
793
794 /*
795  * __hal_ring_delete - Removes the ring
796  * @vpath_handle: Virtual path handle to which this queue belongs
797  *
798  * This function freeup the memory pool and removes the ring
799  */
800 void
801 __hal_ring_delete(
802     vxge_hal_vpath_h vpath_handle)
803 {
804         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
805         __hal_device_t *hldev;
806         __hal_ring_t *ring;
807
808         vxge_assert(vpath_handle != NULL);
809
810         hldev = (__hal_device_t *) vp->vpath->hldev;
811
812         vxge_hal_trace_log_ring("==> %s:%s:%d",
813             __FILE__, __func__, __LINE__);
814
815         vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
816             (ptr_t) vpath_handle);
817
818         ring = (__hal_ring_t *) vp->vpath->ringh;
819
820         vxge_assert(ring != NULL);
821
822         vxge_assert(ring->channel.pdev);
823
824         __hal_ring_abort(vp->vpath->ringh, VXGE_HAL_OPEN_NORMAL);
825
826
827         if (ring->mempool) {
828                 vxge_hal_mempool_destroy(ring->mempool);
829         }
830
831         vxge_hal_channel_terminate(&ring->channel);
832
833 #if defined(VXGE_HAL_RX_MULTI_POST)
834         vxge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
835 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
836         vxge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
837 #endif
838
839         vxge_hal_channel_free(&ring->channel);
840
841         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
842             __FILE__, __func__, __LINE__);
843
844 }
845
846 /*
847  * __hal_ring_frame_length_set  - Set the maximum frame length of recv frames.
848  * @vpath: virtual Path
849  * @new_frmlen: New frame length
850  *
851  *
852  * Returns: VXGE_HAL_OK - success.
853  * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
854  *
855  */
856 vxge_hal_status_e
857 __hal_ring_frame_length_set(
858     __hal_virtualpath_t *vpath,
859     u32 new_frmlen)
860 {
861         u64 val64;
862         __hal_device_t *hldev;
863
864         vxge_assert(vpath != NULL);
865
866         hldev = (__hal_device_t *) vpath->hldev;
867
868         vxge_hal_trace_log_ring("==> %s:%s:%d",
869             __FILE__, __func__, __LINE__);
870
871         vxge_hal_trace_log_ring(
872             "vpath = 0x"VXGE_OS_STXFMT", new_frmlen = %d",
873             (ptr_t) vpath, new_frmlen);
874
875         if (vpath->vp_open == VXGE_HAL_VP_NOT_OPEN) {
876
877                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
878                     __FILE__, __func__, __LINE__,
879                     VXGE_HAL_ERR_VPATH_NOT_OPEN);
880                 return (VXGE_HAL_ERR_VPATH_NOT_OPEN);
881
882         }
883
884         val64 = vxge_os_pio_mem_read64(
885             vpath->hldev->header.pdev,
886             vpath->hldev->header.regh0,
887             &vpath->vp_reg->rxmac_vcfg0);
888
889         val64 &= ~VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
890
891         if (vpath->vp_config->ring.max_frm_len !=
892             VXGE_HAL_MAX_RING_FRM_LEN_USE_MTU) {
893
894                 val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
895                     vpath->vp_config->ring.max_frm_len +
896                     VXGE_HAL_MAC_HEADER_MAX_SIZE);
897
898         } else {
899
900                 val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_frmlen +
901                     VXGE_HAL_MAC_HEADER_MAX_SIZE);
902         }
903
904         vxge_os_pio_mem_write64(
905             vpath->hldev->header.pdev,
906             vpath->hldev->header.regh0,
907             val64,
908             &vpath->vp_reg->rxmac_vcfg0);
909
910         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
911             __FILE__, __func__, __LINE__);
912
913         return (VXGE_HAL_OK);
914 }
915
916 /*
917  * vxge_hal_ring_rxd_reserve - Reserve ring descriptor.
918  * @vpath_handle: virtual Path handle.
919  * @rxdh: Reserved descriptor. On success HAL fills this "out" parameter
920  *              with a valid handle.
921  * @rxd_priv: Buffer to return pointer to per rxd private space
922  *
923  * Reserve Rx descriptor for the subsequent filling-in (by upper layer
924  * driver (ULD)) and posting on the corresponding channel (@channelh)
925  * via vxge_hal_ring_rxd_post().
926  *
927  * Returns: VXGE_HAL_OK - success.
928  * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
929  *
930  */
931 vxge_hal_status_e
932 vxge_hal_ring_rxd_reserve(
933     vxge_hal_vpath_h vpath_handle,
934     vxge_hal_rxd_h * rxdh,
935     void **rxd_priv)
936 {
937         vxge_hal_status_e status;
938 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
939         unsigned long flags;
940 #endif
941         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
942         __hal_device_t *hldev;
943         __hal_ring_t *ring;
944
945         vxge_assert((vpath_handle != NULL) && (rxdh != NULL) &&
946             (rxd_priv != NULL));
947
948         hldev = (__hal_device_t *) vp->vpath->hldev;
949
950         vxge_hal_trace_log_ring("==> %s:%s:%d",
951             __FILE__, __func__, __LINE__);
952
953         vxge_hal_trace_log_ring(
954             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", "
955             "rxd_priv = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle,
956             (ptr_t) rxdh, (ptr_t) rxd_priv);
957
958         ring = (__hal_ring_t *) vp->vpath->ringh;
959
960         vxge_assert(ring != NULL);
961
962 #if defined(VXGE_HAL_RX_MULTI_POST)
963         vxge_os_spin_lock(&ring->channel.post_lock);
964 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
965         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
966 #endif
967
968         status = __hal_channel_dtr_reserve(&ring->channel, rxdh);
969
970 #if defined(VXGE_HAL_RX_MULTI_POST)
971         vxge_os_spin_unlock(&ring->channel.post_lock);
972 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
973         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
974 #endif
975
976         if (status == VXGE_HAL_OK) {
977                 vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *)*rxdh;
978
979                 /* instead of memset: reset     this RxD */
980                 rxdp->control_0 = rxdp->control_1 = 0;
981
982                 *rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp);
983
984 #if defined(VXGE_OS_MEMORY_CHECK)
985                 VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->allocated = 1;
986 #endif
987         }
988
989         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
990             __FILE__, __func__, __LINE__);
991         return (status);
992 }
993
994 /*
995  * vxge_hal_ring_rxd_pre_post - Prepare rxd and post
996  * @vpath_handle: virtual Path handle.
997  * @rxdh: Descriptor handle.
998  *
999  * This routine prepares a rxd and posts
1000  */
1001 void
1002 vxge_hal_ring_rxd_pre_post(
1003     vxge_hal_vpath_h vpath_handle,
1004     vxge_hal_rxd_h rxdh)
1005 {
1006
1007 #if defined(VXGE_DEBUG_ASSERT)
1008         vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1009
1010 #endif
1011
1012 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1013         unsigned long flags;
1014
1015 #endif
1016         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1017         __hal_device_t *hldev;
1018         __hal_ring_t *ring;
1019
1020         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1021
1022         hldev = (__hal_device_t *) vp->vpath->hldev;
1023
1024         vxge_hal_trace_log_ring("==> %s:%s:%d",
1025             __FILE__, __func__, __LINE__);
1026
1027         vxge_hal_trace_log_ring(
1028             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1029             (ptr_t) vpath_handle, (ptr_t) rxdh);
1030
1031         ring = (__hal_ring_t *) vp->vpath->ringh;
1032
1033         vxge_assert(ring != NULL);
1034
1035 #if defined(VXGE_DEBUG_ASSERT)
1036         /* make sure device overwrites the (illegal) t_code on completion */
1037         rxdp->control_0 |=
1038             VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED);
1039 #endif
1040
1041 #if defined(VXGE_HAL_RX_MULTI_POST)
1042         vxge_os_spin_lock(&ring->channel.post_lock);
1043 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1044         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1045 #endif
1046
1047 #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER)
1048         if (TRUE) {
1049                 if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) {
1050                         vxge_hal_rxd_h prev_rxdh;
1051                         __hal_ring_rxd_priv_t *rxdp_priv;
1052                         u32 index;
1053
1054                         rxdp_priv = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1055
1056                         if (VXGE_HAL_RING_RXD_INDEX(rxdp) == 0)
1057                                 index = ring->channel.length;
1058                         else
1059                                 index = VXGE_HAL_RING_RXD_INDEX(rxdp) - 1;
1060
1061                         prev_rxdh = ring->channel.dtr_arr[index].dtr;
1062
1063                         if (prev_rxdh != NULL &&
1064                             (rxdp_priv->dma_offset & (~0xFFF)) !=
1065                             rxdp_priv->dma_offset) {
1066                                 vxge_assert((char *) prev_rxdh +
1067                                     ring->rxd_size == rxdh);
1068                         }
1069                 }
1070         }
1071 #endif
1072
1073         __hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1074
1075         ring->db_byte_count +=
1076             VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->db_bytes;
1077
1078 #if defined(VXGE_HAL_RX_MULTI_POST)
1079         vxge_os_spin_unlock(&ring->channel.post_lock);
1080 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1081         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1082 #endif
1083
1084         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1085             __FILE__, __func__, __LINE__);
1086 }
1087
1088 /*
1089  * vxge_hal_ring_rxd_post_post - Process rxd after post.
1090  * @vpath_handle: virtual Path handle.
1091  * @rxdh: Descriptor handle.
1092  *
1093  * Processes rxd after post
1094  */
1095 void
1096 vxge_hal_ring_rxd_post_post(
1097     vxge_hal_vpath_h vpath_handle,
1098     vxge_hal_rxd_h rxdh)
1099 {
1100         vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1101
1102 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1103         __hal_ring_rxd_priv_t *priv;
1104
1105 #endif
1106         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1107         __hal_device_t *hldev;
1108         __hal_ring_t *ring;
1109
1110         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1111
1112         hldev = (__hal_device_t *) vp->vpath->hldev;
1113
1114         vxge_hal_trace_log_ring("==> %s:%s:%d",
1115             __FILE__, __func__, __LINE__);
1116
1117         vxge_hal_trace_log_ring(
1118             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1119             (ptr_t) vpath_handle, (ptr_t) rxdh);
1120
1121         ring = (__hal_ring_t *) vp->vpath->ringh;
1122
1123         vxge_assert(ring != NULL);
1124
1125         /* do POST */
1126         rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1127
1128         rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER;
1129
1130 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1131         priv = __hal_ring_rxd_priv(ring, rxdp);
1132         vxge_os_dma_sync(ring->channel.pdev,
1133             priv->dma_handle,
1134             priv->dma_addr,
1135             priv->dma_offset,
1136             ring->rxd_size,
1137             VXGE_OS_DMA_DIR_TODEVICE);
1138 #endif
1139         if (ring->stats->common_stats.usage_cnt > 0)
1140                 ring->stats->common_stats.usage_cnt--;
1141
1142         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1143             __FILE__, __func__, __LINE__);
1144 }
1145
1146 /*
1147  * vxge_hal_ring_rxd_post - Post descriptor on the ring.
1148  * @vpath_handle: virtual Path handle.
1149  * @rxdh: Descriptor obtained via vxge_hal_ring_rxd_reserve().
1150  *
1151  * Post descriptor on the ring.
1152  * Prior to posting the descriptor should be filled in accordance with
1153  * Host/X3100 interface specification for a given service (LL, etc.).
1154  *
1155  */
1156 void
1157 vxge_hal_ring_rxd_post(
1158     vxge_hal_vpath_h vpath_handle,
1159     vxge_hal_rxd_h rxdh)
1160 {
1161         vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1162
1163 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1164         unsigned long flags;
1165 #endif
1166
1167         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1168         __hal_device_t *hldev;
1169         __hal_ring_t *ring;
1170
1171         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1172
1173         hldev = (__hal_device_t *) vp->vpath->hldev;
1174
1175         vxge_hal_trace_log_ring("==> %s:%s:%d",
1176             __FILE__, __func__, __LINE__);
1177
1178         vxge_hal_trace_log_ring(
1179             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1180             (ptr_t) vpath_handle, (ptr_t) rxdh);
1181
1182         ring = (__hal_ring_t *) vp->vpath->ringh;
1183
1184         vxge_assert(ring != NULL);
1185
1186         /* Based on Titan HW bugzilla # 3039, we need to reset the tcode */
1187         rxdp->control_0 = 0;
1188
1189 #if defined(VXGE_DEBUG_ASSERT)
1190         /* make sure device overwrites the (illegal) t_code on completion */
1191         rxdp->control_0 |=
1192             VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED);
1193 #endif
1194
1195         rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER;
1196         rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1197
1198 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1199         {
1200                 __hal_ring_rxd_priv_t *rxdp_temp1;
1201                 rxdp_temp1 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1202                 vxge_os_dma_sync(ring->channel.pdev,
1203                     rxdp_temp1->dma_handle,
1204                     rxdp_temp1->dma_addr,
1205                     rxdp_temp1->dma_offset,
1206                     ring->rxd_size,
1207                     VXGE_OS_DMA_DIR_TODEVICE);
1208         }
1209 #endif
1210
1211 #if defined(VXGE_HAL_RX_MULTI_POST)
1212         vxge_os_spin_lock(&ring->channel.post_lock);
1213 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1214         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1215 #endif
1216
1217 #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER)
1218         if (TRUE) {
1219                 if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) {
1220
1221                         vxge_hal_rxd_h prev_rxdh;
1222                         __hal_ring_rxd_priv_t *rxdp_temp2;
1223
1224                         rxdp_temp2 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1225                         prev_rxdh =
1226                             ring->channel.dtr_arr[VXGE_HAL_RING_RXD_INDEX(rxdp) - 1].dtr;
1227
1228                         if (prev_rxdh != NULL &&
1229                             (rxdp_temp2->dma_offset & (~0xFFF)) != rxdp_temp2->dma_offset)
1230                                 vxge_assert((char *) prev_rxdh + ring->rxd_size == rxdh);
1231                 }
1232         }
1233 #endif
1234
1235         __hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1236
1237         ring->db_byte_count +=
1238             VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->db_bytes;
1239
1240 #if defined(VXGE_HAL_RX_MULTI_POST)
1241         vxge_os_spin_unlock(&ring->channel.post_lock);
1242 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1243         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1244 #endif
1245
1246         if (ring->stats->common_stats.usage_cnt > 0)
1247                 ring->stats->common_stats.usage_cnt--;
1248
1249         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1250             __FILE__, __func__, __LINE__);
1251 }
1252
1253 /*
1254  * vxge_hal_ring_rxd_post_post_wmb - Process rxd after post with memory barrier
1255  * @vpath_handle: virtual Path handle.
1256  * @rxdh: Descriptor handle.
1257  *
1258  * Processes rxd after post with memory barrier.
1259  */
1260 void
1261 vxge_hal_ring_rxd_post_post_wmb(
1262     vxge_hal_vpath_h vpath_handle,
1263     vxge_hal_rxd_h rxdh)
1264 {
1265         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1266         __hal_device_t *hldev;
1267
1268         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1269
1270         hldev = (__hal_device_t *) vp->vpath->hldev;
1271
1272         vxge_hal_trace_log_ring("==> %s:%s:%d",
1273             __FILE__, __func__, __LINE__);
1274
1275         vxge_hal_trace_log_ring(
1276             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1277             (ptr_t) vpath_handle, (ptr_t) rxdh);
1278
1279         /* Do memory barrier before changing the ownership */
1280         vxge_os_wmb();
1281
1282         vxge_hal_ring_rxd_post_post(vpath_handle, rxdh);
1283
1284         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1285             __FILE__, __func__, __LINE__);
1286 }
1287
1288 /*
1289  * vxge_hal_ring_rxd_post_post_db - Post Doorbell after posting the rxd(s).
1290  * @vpath_handle: virtual Path handle.
1291  *
1292  * Post Doorbell after posting the rxd(s).
1293  */
1294 void
1295 vxge_hal_ring_rxd_post_post_db(
1296     vxge_hal_vpath_h vpath_handle)
1297 {
1298         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1299         __hal_device_t *hldev;
1300         __hal_ring_t *ring;
1301
1302         vxge_assert(vpath_handle != NULL);
1303
1304         hldev = (__hal_device_t *) vp->vpath->hldev;
1305
1306         ring = (__hal_ring_t *) vp->vpath->ringh;
1307
1308         vxge_hal_trace_log_ring("==> %s:%s:%d",
1309             __FILE__, __func__, __LINE__);
1310
1311         vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
1312             (ptr_t) vpath_handle);
1313
1314 #if defined(VXGE_HAL_RX_MULTI_POST)
1315         vxge_os_spin_lock(&ring->channel.post_lock);
1316 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1317         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1318 #endif
1319
1320         if (ring->db_byte_count <= ring->rxd_mem_avail) {
1321                 __hal_rxd_db_post(vpath_handle, ring->db_byte_count);
1322                 ring->rxd_mem_avail -= ring->db_byte_count;
1323                 ring->db_byte_count = 0;
1324         } else {
1325                 __hal_rxd_db_post(vpath_handle, ring->rxd_mem_avail);
1326                 ring->db_byte_count -= ring->rxd_mem_avail;
1327                 ring->rxd_mem_avail = 0;
1328         }
1329
1330 #if defined(VXGE_HAL_RX_MULTI_POST)
1331         vxge_os_spin_unlock(&ring->channel.post_lock);
1332 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1333         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1334 #endif
1335
1336         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1337             __FILE__, __func__, __LINE__);
1338 }
1339
1340 /*
1341  * vxge_hal_ring_is_next_rxd_completed - Check if the next rxd is completed
1342  * @vpath_handle: Virtual Path handle.
1343  *
1344  * Checks if the _next_ completed descriptor is in host memory
1345  *
1346  * Returns: VXGE_HAL_OK - success.
1347  * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed    descriptors
1348  * are currently available for processing.
1349  */
1350 vxge_hal_status_e
1351 vxge_hal_ring_is_next_rxd_completed(
1352     vxge_hal_vpath_h vpath_handle)
1353 {
1354         __hal_ring_t *ring;
1355         vxge_hal_rxd_h rxdh;
1356         vxge_hal_ring_rxd_1_t *rxdp;    /* doesn't matter 1, 3 or 5... */
1357         __hal_device_t *hldev;
1358         vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1359         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1360
1361         vxge_assert(vpath_handle != NULL);
1362
1363         hldev = (__hal_device_t *) vp->vpath->hldev;
1364
1365         vxge_hal_trace_log_ring("==> %s:%s:%d",
1366             __FILE__, __func__, __LINE__);
1367
1368         vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
1369             (ptr_t) vpath_handle);
1370
1371         ring = (__hal_ring_t *) vp->vpath->ringh;
1372
1373         vxge_assert(ring != NULL);
1374
1375 #if defined(VXGE_HAL_RX_MULTI_POST)
1376         vxge_os_spin_lock(&ring->channel.post_lock);
1377 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1378         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1379 #endif
1380
1381         __hal_channel_dtr_try_complete(&ring->channel, &rxdh);
1382
1383         rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1384
1385         if (rxdp != NULL) {
1386
1387                 /* check whether it is not the end */
1388                 if ((!(rxdp->control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER)) &&
1389                     (!(rxdp->control_1 &
1390                     VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER))) {
1391
1392                         status = VXGE_HAL_OK;
1393                 }
1394         }
1395
1396 #if defined(VXGE_HAL_RX_MULTI_POST)
1397         vxge_os_spin_unlock(&ring->channel.post_lock);
1398 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1399         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1400 #endif
1401
1402         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1403             __FILE__, __func__, __LINE__, status);
1404         return (status);
1405 }
1406
1407 /*
1408  * vxge_hal_ring_rxd_next_completed - Get the _next_ completed descriptor.
1409  * @channelh: Channel handle.
1410  * @rxdh: Descriptor handle. Returned by HAL.
1411  * @rxd_priv: Buffer to return a pointer to the per rxd space allocated
1412  * @t_code:     Transfer code, as per X3100 User Guide,
1413  *                      Receive Descriptor Format. Returned     by HAL.
1414  *
1415  * Retrieve the _next_ completed descriptor.
1416  * HAL uses ring callback (*vxge_hal_ring_callback_f) to notifiy
1417  * upper-layer driver (ULD) of new completed descriptors. After that
1418  * the ULD can use vxge_hal_ring_rxd_next_completed to retrieve the rest
1419  * completions (the very first completion is passed by HAL via
1420  * vxge_hal_ring_callback_f).
1421  *
1422  * Implementation-wise, the upper-layer driver is free to call
1423  * vxge_hal_ring_rxd_next_completed either immediately from inside the
1424  * ring callback, or in a deferred fashion and separate (from HAL)
1425  * context.
1426  *
1427  * Non-zero @t_code means failure to fill-in receive buffer(s)
1428  * of the descriptor.
1429  * For instance, parity error detected during the data transfer.
1430  * In this case X3100 will      complete the descriptor and     indicate
1431  * for the host that the received data is not to be     used.
1432  * For details please refer     to X3100 User Guide.
1433  *
1434  * Returns: VXGE_HAL_OK - success.
1435  * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed    descriptors
1436  * are currently available for processing.
1437  *
1438  * See also: vxge_hal_ring_callback_f {},
1439  * vxge_hal_fifo_rxd_next_completed(), vxge_hal_status_e {}.
1440  */
1441 vxge_hal_status_e
1442 vxge_hal_ring_rxd_next_completed(
1443     vxge_hal_vpath_h vpath_handle,
1444     vxge_hal_rxd_h *rxdh,
1445     void **rxd_priv,
1446     u8 *t_code)
1447 {
1448         __hal_ring_t *ring;
1449         vxge_hal_ring_rxd_5_t *rxdp;    /* doesn't matter 1, 3 or 5... */
1450 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1451         __hal_ring_rxd_priv_t *priv;
1452 #endif
1453         __hal_device_t *hldev;
1454         vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1455         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1456         u64 own, control_0, control_1;
1457
1458         vxge_assert((vpath_handle != NULL) && (rxdh != NULL) &&
1459             (rxd_priv != NULL) && (t_code != NULL));
1460
1461         hldev = (__hal_device_t *) vp->vpath->hldev;
1462
1463         vxge_hal_trace_log_ring("==> %s:%s:%d",
1464             __FILE__, __func__, __LINE__);
1465
1466         vxge_hal_trace_log_ring(
1467             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", "
1468             "rxd_priv = 0x"VXGE_OS_STXFMT", t_code = 0x"VXGE_OS_STXFMT,
1469             (ptr_t) vpath_handle, (ptr_t) rxdh, (ptr_t) rxd_priv,
1470             (ptr_t) t_code);
1471
1472         ring = (__hal_ring_t *) vp->vpath->ringh;
1473
1474         vxge_assert(ring != NULL);
1475
1476         *rxdh = 0;
1477         *rxd_priv = NULL;
1478
1479 #if defined(VXGE_HAL_RX_MULTI_POST)
1480         vxge_os_spin_lock(&ring->channel.post_lock);
1481 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1482         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1483 #endif
1484
1485         __hal_channel_dtr_try_complete(&ring->channel, rxdh);
1486
1487         rxdp = (vxge_hal_ring_rxd_5_t *)*rxdh;
1488         if (rxdp != NULL) {
1489
1490 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1491                 /*
1492                  * Note: 24 bytes at most means:
1493                  *      - Control_3 in case of 5-buffer mode
1494                  *      - Control_1 and Control_2
1495                  *
1496                  * This is the only length needs to be invalidated
1497                  * type of channels.
1498                  */
1499                 priv = __hal_ring_rxd_priv(ring, rxdp);
1500                 vxge_os_dma_sync(ring->channel.pdev,
1501                     priv->dma_handle,
1502                     priv->dma_addr,
1503                     priv->dma_offset,
1504                     24,
1505                     VXGE_OS_DMA_DIR_FROMDEVICE);
1506 #endif
1507                 *t_code = (u8) VXGE_HAL_RING_RXD_T_CODE_GET(rxdp->control_0);
1508
1509                 control_0 = rxdp->control_0;
1510                 control_1 = rxdp->control_1;
1511                 own = control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1512
1513                 /* check whether it is not the end */
1514                 if ((!own && !(control_1 & VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER)) ||
1515                     (*t_code == VXGE_HAL_RING_RXD_T_CODE_FRM_DROP)) {
1516
1517 #ifndef VXGE_HAL_IRQ_POLLING
1518                         if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
1519                                 /*
1520                                  * reset it. since we don't want to return
1521                                  * garbage to the ULD
1522                                  */
1523                                 *rxdh = 0;
1524                                 status = VXGE_HAL_COMPLETIONS_REMAIN;
1525                         } else {
1526 #endif
1527                                 __hal_channel_dtr_complete(&ring->channel);
1528
1529                                 *rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp);
1530
1531                                 ring->rxd_mem_avail +=
1532                                     (VXGE_HAL_RING_HAL_PRIV(ring, rxdp))->db_bytes;
1533
1534                                 ring->stats->common_stats.usage_cnt++;
1535                                 if (ring->stats->common_stats.usage_max <
1536                                     ring->stats->common_stats.usage_cnt)
1537                                         ring->stats->common_stats.usage_max =
1538                                             ring->stats->common_stats.usage_cnt;
1539
1540                                 switch (ring->buffer_mode) {
1541                                 case VXGE_HAL_RING_RXD_BUFFER_MODE_1:
1542                                         ring->channel.poll_bytes +=
1543                                             (u32) VXGE_HAL_RING_RXD_1_BUFFER0_SIZE_GET(
1544                                             rxdp->control_1);
1545                                         break;
1546                                 case VXGE_HAL_RING_RXD_BUFFER_MODE_3:
1547                                         ring->channel.poll_bytes +=
1548                                             (u32) VXGE_HAL_RING_RXD_3_BUFFER0_SIZE_GET(
1549                                             rxdp->control_1) +
1550                                             (u32) VXGE_HAL_RING_RXD_3_BUFFER1_SIZE_GET(
1551                                             rxdp->control_1) +
1552                                             (u32) VXGE_HAL_RING_RXD_3_BUFFER2_SIZE_GET(
1553                                             rxdp->control_1);
1554                                         break;
1555                                 case VXGE_HAL_RING_RXD_BUFFER_MODE_5:
1556                                         ring->channel.poll_bytes +=
1557                                             (u32) VXGE_HAL_RING_RXD_5_BUFFER0_SIZE_GET(
1558                                             rxdp->control_1) +
1559                                             (u32) VXGE_HAL_RING_RXD_5_BUFFER1_SIZE_GET(
1560                                             rxdp->control_1) +
1561                                             (u32) VXGE_HAL_RING_RXD_5_BUFFER2_SIZE_GET(
1562                                             rxdp->control_1) +
1563                                             (u32) VXGE_HAL_RING_RXD_5_BUFFER3_SIZE_GET(
1564                                             rxdp->control_2) +
1565                                             (u32) VXGE_HAL_RING_RXD_5_BUFFER4_SIZE_GET(
1566                                             rxdp->control_2);
1567                                         break;
1568                                 }
1569
1570                                 status = VXGE_HAL_OK;
1571 #ifndef VXGE_HAL_IRQ_POLLING
1572                         }
1573 #endif
1574                 }
1575         }
1576
1577 #if defined(VXGE_HAL_RX_MULTI_POST)
1578         vxge_os_spin_unlock(&ring->channel.post_lock);
1579 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1580         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1581 #endif
1582
1583         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1584             __FILE__, __func__, __LINE__, status);
1585         return (status);
1586 }
1587
1588
1589 /*
1590  * vxge_hal_ring_handle_tcode - Handle transfer code.
1591  * @vpath_handle: Virtual Path handle.
1592  * @rxdh: Descriptor handle.
1593  * @t_code: One of the enumerated (and documented in the X3100 user guide)
1594  *       "transfer codes".
1595  *
1596  * Handle descriptor's transfer code. The latter comes with each completed
1597  * descriptor.
1598  *
1599  * Returns: one of the vxge_hal_status_e {} enumerated types.
1600  * VXGE_HAL_OK                  - for success.
1601  * VXGE_HAL_ERR_CRITICAL        - when encounters critical error.
1602  */
1603 vxge_hal_status_e
1604 vxge_hal_ring_handle_tcode(
1605     vxge_hal_vpath_h vpath_handle,
1606     vxge_hal_rxd_h rxdh,
1607     u8 t_code)
1608 {
1609         __hal_device_t *hldev;
1610         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1611
1612         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1613
1614         hldev = (__hal_device_t *) vp->vpath->hldev;
1615
1616         vxge_hal_trace_log_ring("==> %s:%s:%d",
1617             __FILE__, __func__, __LINE__);
1618
1619         vxge_hal_trace_log_ring(
1620             "vpath_handle = 0x"VXGE_OS_STXFMT", "
1621             "rxdh = 0x"VXGE_OS_STXFMT", t_code = 0x%d",
1622             (ptr_t) vpath_handle, (ptr_t) rxdh, t_code);
1623
1624         switch (t_code) {
1625         case 0x0:
1626                 /* 0x0: Transfer ok. */
1627                 break;
1628         case 0x1:
1629                 /*
1630                  * 0x1: Layer 3 checksum presentation
1631                  *      configuration mismatch.
1632                  */
1633                 break;
1634         case 0x2:
1635                 /*
1636                  * 0x2: Layer 4 checksum presentation
1637                  *      configuration mismatch.
1638                  */
1639                 break;
1640         case 0x3:
1641                 /*
1642                  * 0x3: Layer 3 and Layer 4 checksum
1643                  *      presentation configuration mismatch.
1644                  */
1645                 break;
1646         case 0x4:
1647                 /* 0x4: Reserved. */
1648                 break;
1649         case 0x5:
1650                 /*
1651                  * 0x5: Layer 3 error unparseable packet,
1652                  *      such as unknown IPv6 header.
1653                  */
1654                 break;
1655         case 0x6:
1656                 /*
1657                  * 0x6: Layer 2 error frame integrity
1658                  *      error, such as FCS or ECC).
1659                  */
1660                 break;
1661         case 0x7:
1662                 /*
1663                  * 0x7: Buffer size error the RxD buffer(s)
1664                  *      were not appropriately sized and
1665                  *      data loss occurred.
1666                  */
1667                 break;
1668         case 0x8:
1669                 /* 0x8: Internal ECC error RxD corrupted. */
1670                 __hal_device_handle_error(vp->vpath->hldev,
1671                     vp->vpath->vp_id, VXGE_HAL_EVENT_ECCERR);
1672                 break;
1673         case 0x9:
1674                 /*
1675                  * 0x9: Benign overflow the contents of
1676                  *      Segment1 exceeded the capacity of
1677                  *      Buffer1 and the remainder was placed
1678                  *      in Buffer2. Segment2 now starts in
1679                  *      Buffer3. No data loss or errors occurred.
1680                  */
1681                 break;
1682         case 0xA:
1683                 /*
1684                  * 0xA: Buffer size 0 one of the RxDs
1685                  *      assigned buffers has a size of 0 bytes.
1686                  */
1687                 break;
1688         case 0xB:
1689                 /* 0xB: Reserved. */
1690                 break;
1691         case 0xC:
1692                 /*
1693                  * 0xC: Frame dropped either due to
1694                  *      VPath Reset or because of a VPIN mismatch.
1695                  */
1696                 break;
1697         case 0xD:
1698                 /* 0xD: Reserved. */
1699                 break;
1700         case 0xE:
1701                 /* 0xE: Reserved. */
1702                 break;
1703         case 0xF:
1704                 /*
1705                  * 0xF: Multiple errors more than one
1706                  *      transfer code condition occurred.
1707                  */
1708                 break;
1709         default:
1710                 vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1711                     __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_TCODE);
1712                 return (VXGE_HAL_ERR_INVALID_TCODE);
1713         }
1714
1715         vp->vpath->sw_stats->ring_stats.rxd_t_code_err_cnt[t_code]++;
1716
1717         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1718             __FILE__, __func__, __LINE__, VXGE_HAL_OK);
1719         return (VXGE_HAL_OK);
1720 }
1721
1722
1723 /*
1724  * vxge_hal_ring_rxd_private_get - Get ULD private per-descriptor data.
1725  * @vpath_handle: Virtual Path handle.
1726  * @rxdh: Descriptor handle.
1727  *
1728  * Returns: private ULD info associated with the descriptor.
1729  * ULD requests per-descriptor space via vxge_hal_ring_attr.
1730  *
1731  */
1732 void *
1733 vxge_hal_ring_rxd_private_get(
1734     vxge_hal_vpath_h vpath_handle,
1735     vxge_hal_rxd_h rxdh)
1736 {
1737         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1738
1739         return (VXGE_HAL_RING_ULD_PRIV(
1740             ((__hal_ring_t *) vp->vpath->ringh), rxdh));
1741
1742 }
1743
1744 /*
1745  * vxge_hal_ring_rxd_free - Free descriptor.
1746  * @vpath_handle: Virtual Path handle.
1747  * @rxdh: Descriptor handle.
1748  *
1749  * Free the reserved descriptor. This operation is "symmetrical" to
1750  * vxge_hal_ring_rxd_reserve. The "free-ing" completes the descriptor's
1751  * lifecycle.
1752  *
1753  * After free-ing (see vxge_hal_ring_rxd_free()) the descriptor again can
1754  * be:
1755  *
1756  * - reserved (vxge_hal_ring_rxd_reserve);
1757  *
1758  * - posted     (vxge_hal_ring_rxd_post);
1759  *
1760  * - completed (vxge_hal_ring_rxd_next_completed);
1761  *
1762  * - and recycled again (vxge_hal_ring_rxd_free).
1763  *
1764  * For alternative state transitions and more details please refer to
1765  * the design doc.
1766  *
1767  */
1768 void
1769 vxge_hal_ring_rxd_free(
1770     vxge_hal_vpath_h vpath_handle,
1771     vxge_hal_rxd_h rxdh)
1772 {
1773 #if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1774         unsigned long flags;
1775
1776 #endif
1777         __hal_ring_t *ring;
1778         __hal_device_t *hldev;
1779         __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1780
1781         vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1782
1783         hldev = (__hal_device_t *) vp->vpath->hldev;
1784
1785         vxge_hal_trace_log_ring("==> %s:%s:%d",
1786             __FILE__, __func__, __LINE__);
1787
1788         vxge_hal_trace_log_ring(
1789             "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1790             (ptr_t) vpath_handle, (ptr_t) rxdh);
1791
1792         ring = (__hal_ring_t *) vp->vpath->ringh;
1793
1794         vxge_assert(ring != NULL);
1795
1796 #if defined(VXGE_HAL_RX_MULTI_POST)
1797         vxge_os_spin_lock(&ring->channel.post_lock);
1798 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1799         vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1800 #endif
1801
1802         __hal_channel_dtr_free(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1803 #if defined(VXGE_OS_MEMORY_CHECK)
1804         VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->allocated = 0;
1805 #endif
1806
1807 #if defined(VXGE_HAL_RX_MULTI_POST)
1808         vxge_os_spin_unlock(&ring->channel.post_lock);
1809 #elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1810         vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1811 #endif
1812
1813         vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1814             __FILE__, __func__, __LINE__);
1815 }