]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nxge/xgehal/xgehal-ring-fp.c
sys/dev: further adoption of SPDX licensing ID tags.
[FreeBSD/FreeBSD.git] / sys / dev / nxge / xgehal / xgehal-ring-fp.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2007 Neterion, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30
31 #ifdef XGE_DEBUG_FP
32 #include <dev/nxge/include/xgehal-ring.h>
33 #endif
34
35 __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t*
36 __hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh)
37 {
38
39         xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
40         xge_hal_ring_rxd_priv_t *rxd_priv;
41
42         xge_assert(rxdp);
43
44 #if defined(XGE_HAL_USE_5B_MODE)
45         xge_assert(ring);
46         if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
47             xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)dtrh;
48 #if defined (XGE_OS_PLATFORM_64BIT)
49             int memblock_idx = rxdp_5->host_control >> 16;
50             int i = rxdp_5->host_control & 0xFFFF;
51             rxd_priv = (xge_hal_ring_rxd_priv_t *)
52                 ((char*)ring->mempool->memblocks_priv_arr[memblock_idx] + ring->rxd_priv_size * i);
53 #else
54             /* 32-bit case */
55             rxd_priv = (xge_hal_ring_rxd_priv_t *)rxdp_5->host_control;
56 #endif
57         } else
58 #endif
59         {
60             rxd_priv = (xge_hal_ring_rxd_priv_t *)
61                     (ulong_t)rxdp->host_control;
62         }
63
64         xge_assert(rxd_priv);
65         xge_assert(rxd_priv->dma_object);
66
67         xge_assert(rxd_priv->dma_object->handle == rxd_priv->dma_handle);
68
69         xge_assert(rxd_priv->dma_object->addr + rxd_priv->dma_offset ==
70                                 rxd_priv->dma_addr);
71
72         return rxd_priv;
73 }
74
75 __HAL_STATIC_RING __HAL_INLINE_RING int
76 __hal_ring_block_memblock_idx(xge_hal_ring_block_t *block)
77 {
78            return (int)*((u64 *)(void *)((char *)block +
79                                    XGE_HAL_RING_MEMBLOCK_IDX_OFFSET));
80 }
81
82 __HAL_STATIC_RING __HAL_INLINE_RING void
83 __hal_ring_block_memblock_idx_set(xge_hal_ring_block_t*block, int memblock_idx)
84 {
85            *((u64 *)(void *)((char *)block +
86                            XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)) =
87                            memblock_idx;
88 }
89
90
91 __HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t
92 __hal_ring_block_next_pointer(xge_hal_ring_block_t *block)
93 {
94         return (dma_addr_t)*((u64 *)(void *)((char *)block +
95                 XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET));
96 }
97
98 __HAL_STATIC_RING __HAL_INLINE_RING void
99 __hal_ring_block_next_pointer_set(xge_hal_ring_block_t *block,
100                 dma_addr_t dma_next)
101 {
102         *((u64 *)(void *)((char *)block +
103                   XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
104 }
105
106 /**
107  * xge_hal_ring_dtr_private - Get ULD private per-descriptor data.
108  * @channelh: Channel handle.
109  * @dtrh: Descriptor handle.
110  *
111  * Returns: private ULD info associated with the descriptor.
112  * ULD requests per-descriptor space via xge_hal_channel_open().
113  *
114  * See also: xge_hal_fifo_dtr_private().
115  * Usage: See ex_rx_compl{}.
116  */
117 __HAL_STATIC_RING __HAL_INLINE_RING void*
118 xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
119 {
120         return (char *)__hal_ring_rxd_priv((xge_hal_ring_t *) channelh, dtrh) +
121                         sizeof(xge_hal_ring_rxd_priv_t);
122 }
123
124 /**
125  * xge_hal_ring_dtr_reserve - Reserve ring descriptor.
126  * @channelh: Channel handle.
127  * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
128  *        with a valid handle.
129  *
130  * Reserve Rx descriptor for the subsequent filling-in (by upper layer
131  * driver (ULD)) and posting on the corresponding channel (@channelh)
132  * via xge_hal_ring_dtr_post().
133  *
134  * Returns: XGE_HAL_OK - success.
135  * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
136  *
137  * See also: xge_hal_fifo_dtr_reserve(), xge_hal_ring_dtr_free(),
138  * xge_hal_fifo_dtr_reserve_sp(), xge_hal_status_e{}.
139  * Usage: See ex_post_all_rx{}.
140  */
141 __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
142 xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
143 {
144         xge_hal_status_e status;
145 #if defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
146         unsigned long flags;
147 #endif
148
149 #if defined(XGE_HAL_RX_MULTI_RESERVE)
150         xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock);
151 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
152         xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
153         flags);
154 #endif
155
156         status = __hal_channel_dtr_alloc(channelh, dtrh);
157
158 #if defined(XGE_HAL_RX_MULTI_RESERVE)
159         xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock);
160 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
161         xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
162                      flags);
163 #endif
164
165         if (status == XGE_HAL_OK) {
166             xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
167
168             /* instead of memset: reset this RxD */
169             rxdp->control_1 = rxdp->control_2 = 0;
170
171 #if defined(XGE_OS_MEMORY_CHECK)
172             __hal_ring_rxd_priv((xge_hal_ring_t *) channelh, rxdp)->allocated = 1;
173 #endif
174         }
175
176         return status;
177 }
178
179 /**
180  * xge_hal_ring_dtr_info_get - Get extended information associated with
181  * a completed receive descriptor for 1b mode.
182  * @channelh: Channel handle.
183  * @dtrh: Descriptor handle.
184  * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
185  *
186  * Retrieve extended information associated with a completed receive descriptor.
187  *
188  * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
189  * xge_hal_ring_dtr_5b_get().
190  */
191 __HAL_STATIC_RING __HAL_INLINE_RING void
192 xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
193                 xge_hal_dtr_info_t *ext_info)
194 {
195         /* cast to 1-buffer mode RxD: the code below relies on the fact
196          * that control_1 and control_2 are formatted the same way.. */
197         xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
198
199         ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
200         ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
201             ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
202             ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
203         ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
204
205         /* Herc only, a few extra cycles imposed on Xena and/or
206          * when RTH is not enabled.
207          * Alternatively, could check
208          * xge_hal_device_check_id(), hldev->config.rth_en, queue->rth_en */
209         ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
210         ext_info->rth_spdm_hit =
211         XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
212         ext_info->rth_hash_type =
213         XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
214         ext_info->rth_value = XGE_HAL_RXD_1_GET_RTH_VALUE(rxdp->control_2);
215 }
216
217 /**
218  * xge_hal_ring_dtr_info_nb_get - Get extended information associated
219  * with a completed receive descriptor for 3b or 5b
220  * modes.
221  * @channelh: Channel handle.
222  * @dtrh: Descriptor handle.
223  * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
224  *
225  * Retrieve extended information associated with a completed receive descriptor.
226  *
227  * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
228  *           xge_hal_ring_dtr_5b_get().
229  */
230 __HAL_STATIC_RING __HAL_INLINE_RING void
231 xge_hal_ring_dtr_info_nb_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
232                 xge_hal_dtr_info_t *ext_info)
233 {
234         /* cast to 1-buffer mode RxD: the code below relies on the fact
235          * that control_1 and control_2 are formatted the same way.. */
236         xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
237
238         ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
239         ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
240             ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
241             ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
242             ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
243         /* Herc only, a few extra cycles imposed on Xena and/or
244          * when RTH is not enabled. Same comment as above. */
245         ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
246         ext_info->rth_spdm_hit =
247         XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
248         ext_info->rth_hash_type =
249         XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
250         ext_info->rth_value = (u32)rxdp->buffer0_ptr;
251 }
252
253 /**
254  * xge_hal_ring_dtr_1b_set - Prepare 1-buffer-mode descriptor.
255  * @dtrh: Descriptor handle.
256  * @dma_pointer: DMA address of a single receive buffer this descriptor
257  *               should carry. Note that by the time
258  *               xge_hal_ring_dtr_1b_set
259  *               is called, the receive buffer should be already mapped
260  *               to the corresponding Xframe device.
261  * @size: Size of the receive @dma_pointer buffer.
262  *
263  * Prepare 1-buffer-mode Rx descriptor for posting
264  * (via xge_hal_ring_dtr_post()).
265  *
266  * This inline helper-function does not return any parameters and always
267  * succeeds.
268  *
269  * See also: xge_hal_ring_dtr_3b_set(), xge_hal_ring_dtr_5b_set().
270  * Usage: See ex_post_all_rx{}.
271  */
272 __HAL_STATIC_RING __HAL_INLINE_RING void
273 xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size)
274 {
275         xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
276         rxdp->buffer0_ptr = dma_pointer;
277         rxdp->control_2 &= (~XGE_HAL_RXD_1_MASK_BUFFER0_SIZE);
278         rxdp->control_2 |= XGE_HAL_RXD_1_SET_BUFFER0_SIZE(size);
279
280         xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_1b_set: rxdp %p control_2 %p buffer0_ptr %p",
281                     (xge_hal_ring_rxd_1_t *)dtrh,
282                     rxdp->control_2,
283                     rxdp->buffer0_ptr);
284 }
285
286 /**
287  * xge_hal_ring_dtr_1b_get - Get data from the completed 1-buf
288  * descriptor.
289  * @channelh: Channel handle.
290  * @dtrh: Descriptor handle.
291  * @dma_pointer: DMA address of a single receive buffer _this_ descriptor
292  *               carries. Returned by HAL.
293  * @pkt_length: Length (in bytes) of the data in the buffer pointed by
294  *              @dma_pointer. Returned by HAL.
295  *
296  * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
297  * This inline helper-function uses completed descriptor to populate receive
298  * buffer pointer and other "out" parameters. The function always succeeds.
299  *
300  * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
301  * Usage: See ex_rx_compl{}.
302  */
303 __HAL_STATIC_RING __HAL_INLINE_RING void
304 xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
305             dma_addr_t *dma_pointer, int *pkt_length)
306 {
307         xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
308
309         *pkt_length = XGE_HAL_RXD_1_GET_BUFFER0_SIZE(rxdp->control_2);
310         *dma_pointer = rxdp->buffer0_ptr;
311
312         ((xge_hal_channel_t *)channelh)->poll_bytes += *pkt_length;
313 }
314
315 /**
316  * xge_hal_ring_dtr_3b_set - Prepare 3-buffer-mode descriptor.
317  * @dtrh: Descriptor handle.
318  * @dma_pointers: Array of DMA addresses. Contains exactly 3 receive buffers
319  *               _this_ descriptor should carry.
320  *               Note that by the time xge_hal_ring_dtr_3b_set
321  *               is called, the receive buffers should be mapped
322  *               to the corresponding Xframe device.
323  * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
324  *         buffer from @dma_pointers.
325  *
326  * Prepare 3-buffer-mode Rx descriptor for posting (via
327  * xge_hal_ring_dtr_post()).
328  * This inline helper-function does not return any parameters and always
329  * succeeds.
330  *
331  * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_5b_set().
332  */
333 __HAL_STATIC_RING __HAL_INLINE_RING void
334 xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
335                 int sizes[])
336 {
337         xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
338         rxdp->buffer0_ptr = dma_pointers[0];
339         rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER0_SIZE);
340         rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER0_SIZE(sizes[0]);
341         rxdp->buffer1_ptr = dma_pointers[1];
342         rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER1_SIZE);
343         rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER1_SIZE(sizes[1]);
344         rxdp->buffer2_ptr = dma_pointers[2];
345         rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER2_SIZE);
346         rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER2_SIZE(sizes[2]);
347 }
348
349 /**
350  * xge_hal_ring_dtr_3b_get - Get data from the completed 3-buf
351  * descriptor.
352  * @channelh: Channel handle.
353  * @dtrh: Descriptor handle.
354  * @dma_pointers: DMA addresses of the 3 receive buffers _this_ descriptor
355  *                carries. The first two buffers contain ethernet and
356  *                (IP + transport) headers. The 3rd buffer contains packet
357  *                data.
358  *                Returned by HAL.
359  * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
360  * buffer from @dma_pointers. Returned by HAL.
361  *
362  * Retrieve protocol data from the completed 3-buffer-mode Rx descriptor.
363  * This inline helper-function uses completed descriptor to populate receive
364  * buffer pointer and other "out" parameters. The function always succeeds.
365  *
366  * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
367  */
368 __HAL_STATIC_RING __HAL_INLINE_RING void
369 xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
370             dma_addr_t dma_pointers[], int sizes[])
371 {
372         xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
373
374         dma_pointers[0] = rxdp->buffer0_ptr;
375         sizes[0] = XGE_HAL_RXD_3_GET_BUFFER0_SIZE(rxdp->control_2);
376
377         dma_pointers[1] = rxdp->buffer1_ptr;
378         sizes[1] = XGE_HAL_RXD_3_GET_BUFFER1_SIZE(rxdp->control_2);
379
380         dma_pointers[2] = rxdp->buffer2_ptr;
381         sizes[2] = XGE_HAL_RXD_3_GET_BUFFER2_SIZE(rxdp->control_2);
382
383         ((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
384             sizes[2];
385 }
386
387 /**
388  * xge_hal_ring_dtr_5b_set - Prepare 5-buffer-mode descriptor.
389  * @dtrh: Descriptor handle.
390  * @dma_pointers: Array of DMA addresses. Contains exactly 5 receive buffers
391  *               _this_ descriptor should carry.
392  *               Note that by the time xge_hal_ring_dtr_5b_set
393  *               is called, the receive buffers should be mapped
394  *               to the corresponding Xframe device.
395  * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
396  *         buffer from @dma_pointers.
397  *
398  * Prepare 3-buffer-mode Rx descriptor for posting (via
399  * xge_hal_ring_dtr_post()).
400  * This inline helper-function does not return any parameters and always
401  * succeeds.
402  *
403  * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_3b_set().
404  */
405 __HAL_STATIC_RING __HAL_INLINE_RING void
406 xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
407                 int sizes[])
408 {
409         xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
410         rxdp->buffer0_ptr = dma_pointers[0];
411         rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER0_SIZE);
412         rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER0_SIZE(sizes[0]);
413         rxdp->buffer1_ptr = dma_pointers[1];
414         rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER1_SIZE);
415         rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER1_SIZE(sizes[1]);
416         rxdp->buffer2_ptr = dma_pointers[2];
417         rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER2_SIZE);
418         rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER2_SIZE(sizes[2]);
419         rxdp->buffer3_ptr = dma_pointers[3];
420         rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER3_SIZE);
421         rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[3]);
422         rxdp->buffer4_ptr = dma_pointers[4];
423         rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER4_SIZE);
424         rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER4_SIZE(sizes[4]);
425 }
426
427 /**
428  * xge_hal_ring_dtr_5b_get - Get data from the completed 5-buf
429  * descriptor.
430  * @channelh: Channel handle.
431  * @dtrh: Descriptor handle.
432  * @dma_pointers: DMA addresses of the 5 receive buffers _this_ descriptor
433  *                carries. The first 4 buffers contains L2 (ethernet) through
434  *                L5 headers. The 5th buffer contain received (applicaion)
435  *                data. Returned by HAL.
436  * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
437  * buffer from @dma_pointers. Returned by HAL.
438  *
439  * Retrieve protocol data from the completed 5-buffer-mode Rx descriptor.
440  * This inline helper-function uses completed descriptor to populate receive
441  * buffer pointer and other "out" parameters. The function always succeeds.
442  *
443  * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
444  */
445 __HAL_STATIC_RING __HAL_INLINE_RING void
446 xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
447             dma_addr_t dma_pointers[], int sizes[])
448 {
449         xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
450
451         dma_pointers[0] = rxdp->buffer0_ptr;
452         sizes[0] = XGE_HAL_RXD_5_GET_BUFFER0_SIZE(rxdp->control_2);
453
454         dma_pointers[1] = rxdp->buffer1_ptr;
455         sizes[1] = XGE_HAL_RXD_5_GET_BUFFER1_SIZE(rxdp->control_2);
456
457         dma_pointers[2] = rxdp->buffer2_ptr;
458         sizes[2] = XGE_HAL_RXD_5_GET_BUFFER2_SIZE(rxdp->control_2);
459
460         dma_pointers[3] = rxdp->buffer3_ptr;
461         sizes[3] = XGE_HAL_RXD_5_GET_BUFFER3_SIZE(rxdp->control_3);
462
463         dma_pointers[4] = rxdp->buffer4_ptr;
464         sizes[4] = XGE_HAL_RXD_5_GET_BUFFER4_SIZE(rxdp->control_3);
465
466         ((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
467             sizes[2] + sizes[3] + sizes[4];
468 }
469
470
471 /**
472  * xge_hal_ring_dtr_pre_post - FIXME.
473  * @channelh: Channel handle.
474  * @dtrh: Descriptor handle.
475  *
476  * TBD
477  */
478 __HAL_STATIC_RING __HAL_INLINE_RING void
479 xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
480 {
481         xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
482 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
483         xge_hal_ring_rxd_priv_t *priv;
484         xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
485 #endif
486 #if defined(XGE_HAL_RX_MULTI_POST_IRQ)
487         unsigned long flags;
488 #endif
489
490         rxdp->control_2 |= XGE_HAL_RXD_NOT_COMPLETED;
491
492 #ifdef XGE_DEBUG_ASSERT
493             /* make sure Xena overwrites the (illegal) t_code on completion */
494             XGE_HAL_RXD_SET_T_CODE(rxdp->control_1, XGE_HAL_RXD_T_CODE_UNUSED_C);
495 #endif
496
497         xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_pre_post: rxd 0x"XGE_OS_LLXFMT" posted %d  post_qid %d",
498                 (unsigned long long)(ulong_t)dtrh,
499                 ((xge_hal_ring_t *)channelh)->channel.post_index,
500                 ((xge_hal_ring_t *)channelh)->channel.post_qid);
501
502 #if defined(XGE_HAL_RX_MULTI_POST)
503         xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock);
504 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
505         xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
506         flags);
507 #endif
508
509 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_HAL_RING_ENFORCE_ORDER)
510         {
511             xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
512
513             if (channel->post_index != 0) {
514                 xge_hal_dtr_h prev_dtrh;
515                 xge_hal_ring_rxd_priv_t *rxdp_priv;
516
517                 rxdp_priv = __hal_ring_rxd_priv((xge_hal_ring_t*)channel, rxdp);
518                 prev_dtrh = channel->work_arr[channel->post_index - 1];
519
520                 if (prev_dtrh != NULL &&
521                     (rxdp_priv->dma_offset & (~0xFFF)) !=
522                             rxdp_priv->dma_offset) {
523                     xge_assert((char *)prev_dtrh +
524                         ((xge_hal_ring_t*)channel)->rxd_size == dtrh);
525                 }
526             }
527         }
528 #endif
529
530         __hal_channel_dtr_post(channelh, dtrh);
531
532 #if defined(XGE_HAL_RX_MULTI_POST)
533         xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock);
534 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
535         xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
536                        flags);
537 #endif
538 }
539
540
541 /**
542  * xge_hal_ring_dtr_post_post - FIXME.
543  * @channelh: Channel handle.
544  * @dtrh: Descriptor handle.
545  * 
546  * TBD
547  */
548 __HAL_STATIC_RING __HAL_INLINE_RING void
549 xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
550 {
551         xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
552         xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
553 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
554         xge_hal_ring_rxd_priv_t *priv;
555 #endif
556         /* do POST */
557         rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME;
558
559 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
560         priv = __hal_ring_rxd_priv(ring, rxdp);
561         xge_os_dma_sync(ring->channel.pdev,
562                       priv->dma_handle, priv->dma_addr,
563                   priv->dma_offset, ring->rxd_size,
564                   XGE_OS_DMA_DIR_TODEVICE);
565 #endif
566
567         xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post: rxdp %p control_1 %p",
568                       (xge_hal_ring_rxd_1_t *)dtrh,
569                       rxdp->control_1);
570
571         if (ring->channel.usage_cnt > 0)
572             ring->channel.usage_cnt--;
573 }
574
575 /**
576  * xge_hal_ring_dtr_post_post_wmb.
577  * @channelh: Channel handle.
578  * @dtrh: Descriptor handle.
579  * 
580  * Similar as xge_hal_ring_dtr_post_post, but in addition it does memory barrier.
581  */
582 __HAL_STATIC_RING __HAL_INLINE_RING void
583 xge_hal_ring_dtr_post_post_wmb(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
584 {
585         xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
586         xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
587 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
588         xge_hal_ring_rxd_priv_t *priv;
589 #endif
590         /* Do memory barrier before changing the ownership */
591         xge_os_wmb();
592         
593         /* do POST */
594         rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME;
595
596 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
597         priv = __hal_ring_rxd_priv(ring, rxdp);
598         xge_os_dma_sync(ring->channel.pdev,
599                       priv->dma_handle, priv->dma_addr,
600                   priv->dma_offset, ring->rxd_size,
601                   XGE_OS_DMA_DIR_TODEVICE);
602 #endif
603
604         if (ring->channel.usage_cnt > 0)
605             ring->channel.usage_cnt--;
606
607         xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post_wmb: rxdp %p control_1 %p rxds_with_host %d",
608                       (xge_hal_ring_rxd_1_t *)dtrh,
609                       rxdp->control_1, ring->channel.usage_cnt);
610
611 }
612
613 /**
614  * xge_hal_ring_dtr_post - Post descriptor on the ring channel.
615  * @channelh: Channel handle.
616  * @dtrh: Descriptor obtained via xge_hal_ring_dtr_reserve().
617  *
618  * Post descriptor on the 'ring' type channel.
619  * Prior to posting the descriptor should be filled in accordance with
620  * Host/Xframe interface specification for a given service (LL, etc.).
621  *
622  * See also: xge_hal_fifo_dtr_post_many(), xge_hal_fifo_dtr_post().
623  * Usage: See ex_post_all_rx{}.
624  */
625 __HAL_STATIC_RING __HAL_INLINE_RING void
626 xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
627 {
628         xge_hal_ring_dtr_pre_post(channelh, dtrh);
629         xge_hal_ring_dtr_post_post(channelh, dtrh);
630 }
631
632 /**
633  * xge_hal_ring_dtr_next_completed - Get the _next_ completed
634  * descriptor.
635  * @channelh: Channel handle.
636  * @dtrh: Descriptor handle. Returned by HAL.
637  * @t_code: Transfer code, as per Xframe User Guide,
638  *          Receive Descriptor Format. Returned by HAL.
639  *
640  * Retrieve the _next_ completed descriptor.
641  * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
642  * upper-layer driver (ULD) of new completed descriptors. After that
643  * the ULD can use xge_hal_ring_dtr_next_completed to retrieve the rest
644  * completions (the very first completion is passed by HAL via
645  * xge_hal_channel_callback_f).
646  *
647  * Implementation-wise, the upper-layer driver is free to call
648  * xge_hal_ring_dtr_next_completed either immediately from inside the
649  * channel callback, or in a deferred fashion and separate (from HAL)
650  * context.
651  *
652  * Non-zero @t_code means failure to fill-in receive buffer(s)
653  * of the descriptor.
654  * For instance, parity error detected during the data transfer.
655  * In this case Xframe will complete the descriptor and indicate
656  * for the host that the received data is not to be used.
657  * For details please refer to Xframe User Guide.
658  *
659  * Returns: XGE_HAL_OK - success.
660  * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
661  * are currently available for processing.
662  *
663  * See also: xge_hal_channel_callback_f{},
664  * xge_hal_fifo_dtr_next_completed(), xge_hal_status_e{}.
665  * Usage: See ex_rx_compl{}.
666  */
667 __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
668 xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
669                     u8 *t_code)
670 {
671         xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
672         xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
673 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
674         xge_hal_ring_rxd_priv_t *priv;
675 #endif
676
677         __hal_channel_dtr_try_complete(ring, dtrh);
678         rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
679         if (rxdp == NULL) {
680             return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
681         }
682
683 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
684         /* Note: 24 bytes at most means:
685          *  - Control_3 in case of 5-buffer mode
686          *  - Control_1 and Control_2
687          *
688          * This is the only length needs to be invalidated
689          * type of channels.*/
690         priv = __hal_ring_rxd_priv(ring, rxdp);
691         xge_os_dma_sync(ring->channel.pdev,
692                       priv->dma_handle, priv->dma_addr,
693                   priv->dma_offset, 24,
694                   XGE_OS_DMA_DIR_FROMDEVICE);
695 #endif
696
697         /* check whether it is not the end */
698         if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
699             !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
700 #ifndef XGE_HAL_IRQ_POLLING
701             if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
702                 /* reset it. since we don't want to return
703                  * garbage to the ULD */
704                 *dtrh = 0;
705                 return XGE_HAL_COMPLETIONS_REMAIN;
706             }
707 #endif
708
709 #ifdef XGE_DEBUG_ASSERT
710 #if defined(XGE_HAL_USE_5B_MODE)
711 #if !defined(XGE_OS_PLATFORM_64BIT)
712             if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
713                 xge_assert(((xge_hal_ring_rxd_5_t *)
714                         rxdp)->host_control!=0);
715             }
716 #endif
717
718 #else
719             xge_assert(rxdp->host_control!=0);
720 #endif
721 #endif
722
723             __hal_channel_dtr_complete(ring);
724
725             *t_code = (u8)XGE_HAL_RXD_GET_T_CODE(rxdp->control_1);
726
727                     /* see XGE_HAL_SET_RXD_T_CODE() above.. */
728             xge_assert(*t_code != XGE_HAL_RXD_T_CODE_UNUSED_C);
729
730             xge_debug_ring(XGE_TRACE,
731                 "compl_index %d post_qid %d t_code %d rxd 0x"XGE_OS_LLXFMT,
732                 ((xge_hal_channel_t*)ring)->compl_index,
733                 ((xge_hal_channel_t*)ring)->post_qid, *t_code,
734                 (unsigned long long)(ulong_t)rxdp);
735
736             ring->channel.usage_cnt++;
737             if (ring->channel.stats.usage_max < ring->channel.usage_cnt)
738                 ring->channel.stats.usage_max = ring->channel.usage_cnt;
739
740             return XGE_HAL_OK;
741         }
742
743         /* reset it. since we don't want to return
744          * garbage to the ULD */
745         *dtrh = 0;
746         return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
747 }
748
749 /**
750  * xge_hal_ring_dtr_free - Free descriptor.
751  * @channelh: Channel handle.
752  * @dtrh: Descriptor handle.
753  *
754  * Free the reserved descriptor. This operation is "symmetrical" to
755  * xge_hal_ring_dtr_reserve. The "free-ing" completes the descriptor's
756  * lifecycle.
757  *
758  * After free-ing (see xge_hal_ring_dtr_free()) the descriptor again can
759  * be:
760  *
761  * - reserved (xge_hal_ring_dtr_reserve);
762  *
763  * - posted (xge_hal_ring_dtr_post);
764  *
765  * - completed (xge_hal_ring_dtr_next_completed);
766  *
767  * - and recycled again (xge_hal_ring_dtr_free).
768  *
769  * For alternative state transitions and more details please refer to
770  * the design doc.
771  *
772  * See also: xge_hal_ring_dtr_reserve(), xge_hal_fifo_dtr_free().
773  * Usage: See ex_rx_compl{}.
774  */
775 __HAL_STATIC_RING __HAL_INLINE_RING void
776 xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
777 {
778 #if defined(XGE_HAL_RX_MULTI_FREE_IRQ)
779         unsigned long flags;
780 #endif
781
782 #if defined(XGE_HAL_RX_MULTI_FREE)
783         xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
784 #elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
785         xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
786         flags);
787 #endif
788
789         __hal_channel_dtr_free(channelh, dtrh);
790 #if defined(XGE_OS_MEMORY_CHECK)
791         __hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtrh)->allocated = 0;
792 #endif
793
794 #if defined(XGE_HAL_RX_MULTI_FREE)
795         xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
796 #elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
797         xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
798         flags);
799 #endif
800 }
801
802 /**
803  * xge_hal_ring_is_next_dtr_completed - Check if the next dtr is completed
804  * @channelh: Channel handle.
805  *
806  * Checks if the _next_ completed descriptor is in host memory
807  *
808  * Returns: XGE_HAL_OK - success.
809  * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
810  * are currently available for processing.
811  */
812 __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
813 xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh)
814 {
815         xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
816         xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
817         xge_hal_dtr_h dtrh;
818
819         __hal_channel_dtr_try_complete(ring, &dtrh);
820         rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
821         if (rxdp == NULL) {
822             return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
823         }
824
825         /* check whether it is not the end */
826         if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
827             !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
828
829 #ifdef XGE_DEBUG_ASSERT
830 #if defined(XGE_HAL_USE_5B_MODE)
831 #if !defined(XGE_OS_PLATFORM_64BIT)
832             if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
833                 xge_assert(((xge_hal_ring_rxd_5_t *)
834                         rxdp)->host_control!=0);
835             }
836 #endif
837
838 #else
839             xge_assert(rxdp->host_control!=0);
840 #endif
841 #endif
842             return XGE_HAL_OK;
843         }
844
845         return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
846 }