]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/nxge/xgehal/xgehal-device-fp.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / nxge / xgehal / xgehal-device-fp.c
1 /*-
2  * Copyright (c) 2002-2007 Neterion, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #ifdef XGE_DEBUG_FP
30 #include <dev/nxge/include/xgehal-device.h>
31 #endif
32
33 #include <dev/nxge/include/xgehal-ring.h>
34 #include <dev/nxge/include/xgehal-fifo.h>
35
36 /**
37  * xge_hal_device_bar0 - Get BAR0 mapped address.
38  * @hldev: HAL device handle.
39  *
40  * Returns: BAR0 address of the specified device.
41  */
42 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
43 xge_hal_device_bar0(xge_hal_device_t *hldev)
44 {
45         return hldev->bar0;
46 }
47
48 /**
49  * xge_hal_device_isrbar0 - Get BAR0 mapped address.
50  * @hldev: HAL device handle.
51  *
52  * Returns: BAR0 address of the specified device.
53  */
54 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
55 xge_hal_device_isrbar0(xge_hal_device_t *hldev)
56 {
57         return hldev->isrbar0;
58 }
59
60 /**
61  * xge_hal_device_bar1 - Get BAR1 mapped address.
62  * @hldev: HAL device handle.
63  *
64  * Returns: BAR1 address of the specified device.
65  */
66 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
67 xge_hal_device_bar1(xge_hal_device_t *hldev)
68 {
69         return hldev->bar1;
70 }
71
72 /**
73  * xge_hal_device_bar0_set - Set BAR0 mapped address.
74  * @hldev: HAL device handle.
75  * @bar0: BAR0 mapped address.
76  * * Set BAR0 address in the HAL device object.
77  */
78 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
79 xge_hal_device_bar0_set(xge_hal_device_t *hldev, char *bar0)
80 {
81         xge_assert(bar0);
82         hldev->bar0 = bar0;
83 }
84
85 /**
86  * xge_hal_device_isrbar0_set - Set BAR0 mapped address.
87  * @hldev: HAL device handle.
88  * @isrbar0: BAR0 mapped address.
89  * * Set BAR0 address in the HAL device object.
90  */
91 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
92 xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0)
93 {
94         xge_assert(isrbar0);
95         hldev->isrbar0 = isrbar0;
96 }
97
98 /**
99  * xge_hal_device_bar1_set - Set BAR1 mapped address.
100  * @hldev: HAL device handle.
101  * @channelh: Channel handle.
102  * @bar1: BAR1 mapped address.
103  *
104  * Set BAR1 address for the given channel.
105  */
106 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
107 xge_hal_device_bar1_set(xge_hal_device_t *hldev, xge_hal_channel_h channelh,
108                    char *bar1)
109 {
110         xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
111
112         xge_assert(bar1);
113         xge_assert(fifo);
114
115         /* Initializing the BAR1 address as the start of
116          * the FIFO queue pointer and as a location of FIFO control
117          * word. */
118         fifo->hw_pair =
119                 (xge_hal_fifo_hw_pair_t *) (bar1 +
120                     (fifo->channel.post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));
121         hldev->bar1 = bar1;
122 }
123
124
125 /**
126  * xge_hal_device_rev - Get Device revision number.
127  * @hldev: HAL device handle.
128  *
129  * Returns: Device revision number
130  */
131 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE int
132 xge_hal_device_rev(xge_hal_device_t *hldev)
133 {
134             return hldev->revision;
135 }
136
137
138 /**
139  * xge_hal_device_begin_irq - Begin IRQ processing.
140  * @hldev: HAL device handle.
141  * @reason: "Reason" for the interrupt, the value of Xframe's
142  *          general_int_status register.
143  *
144  * The function performs two actions, It first checks whether (shared IRQ) the
145  * interrupt was raised by the device. Next, it masks the device interrupts.
146  *
147  * Note:
148  * xge_hal_device_begin_irq() does not flush MMIO writes through the
149  * bridge. Therefore, two back-to-back interrupts are potentially possible.
150  * It is the responsibility of the ULD to make sure that only one
151  * xge_hal_device_continue_irq() runs at a time.
152  *
153  * Returns: 0, if the interrupt is not "ours" (note that in this case the
154  * device remain enabled).
155  * Otherwise, xge_hal_device_begin_irq() returns 64bit general adapter
156  * status.
157  * See also: xge_hal_device_handle_irq()
158  */
159 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
160 xge_hal_device_begin_irq(xge_hal_device_t *hldev, u64 *reason)
161 {
162         u64 val64;
163         xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
164
165         hldev->stats.sw_dev_info_stats.total_intr_cnt++;
166
167         val64 = xge_os_pio_mem_read64(hldev->pdev,
168                       hldev->regh0, &isrbar0->general_int_status);
169         if (xge_os_unlikely(!val64)) {
170             /* not Xframe interrupt */
171             hldev->stats.sw_dev_info_stats.not_xge_intr_cnt++;
172             *reason = 0;
173                 return XGE_HAL_ERR_WRONG_IRQ;
174         }
175
176         if (xge_os_unlikely(val64 == XGE_HAL_ALL_FOXES)) {
177                     u64 adapter_status =
178                             xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
179                               &isrbar0->adapter_status);
180                     if (adapter_status == XGE_HAL_ALL_FOXES)  {
181                         (void) xge_queue_produce(hldev->queueh,
182                              XGE_HAL_EVENT_SLOT_FREEZE,
183                              hldev,
184                              1,  /* critical: slot freeze */
185                              sizeof(u64),
186                              (void*)&adapter_status);
187                 *reason = 0;
188                 return XGE_HAL_ERR_CRITICAL;
189             }
190         }
191
192         *reason = val64;
193
194         /* separate fast path, i.e. no errors */
195         if (val64 & XGE_HAL_GEN_INTR_RXTRAFFIC) {
196             hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt++;
197             return XGE_HAL_OK;
198         }
199         if (val64 & XGE_HAL_GEN_INTR_TXTRAFFIC) {
200             hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt++;
201             return XGE_HAL_OK;
202         }
203
204         hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
205         if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXPIC)) {
206             xge_hal_status_e status;
207             hldev->stats.sw_dev_info_stats.txpic_intr_cnt++;
208             status = __hal_device_handle_txpic(hldev, val64);
209             if (status != XGE_HAL_OK) {
210                 return status;
211             }
212         }
213
214         if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXDMA)) {
215             xge_hal_status_e status;
216             hldev->stats.sw_dev_info_stats.txdma_intr_cnt++;
217             status = __hal_device_handle_txdma(hldev, val64);
218             if (status != XGE_HAL_OK) {
219                 return status;
220             }
221         }
222
223         if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXMAC)) {
224             xge_hal_status_e status;
225             hldev->stats.sw_dev_info_stats.txmac_intr_cnt++;
226             status = __hal_device_handle_txmac(hldev, val64);
227             if (status != XGE_HAL_OK) {
228                 return status;
229             }
230         }
231
232         if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXXGXS)) {
233             xge_hal_status_e status;
234             hldev->stats.sw_dev_info_stats.txxgxs_intr_cnt++;
235             status = __hal_device_handle_txxgxs(hldev, val64);
236             if (status != XGE_HAL_OK) {
237                 return status;
238             }
239         }
240
241         if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXPIC)) {
242             xge_hal_status_e status;
243             hldev->stats.sw_dev_info_stats.rxpic_intr_cnt++;
244             status = __hal_device_handle_rxpic(hldev, val64);
245             if (status != XGE_HAL_OK) {
246                 return status;
247             }
248         }
249
250         if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXDMA)) {
251             xge_hal_status_e status;
252             hldev->stats.sw_dev_info_stats.rxdma_intr_cnt++;
253             status = __hal_device_handle_rxdma(hldev, val64);
254             if (status != XGE_HAL_OK) {
255                 return status;
256             }
257         }
258
259         if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXMAC)) {
260             xge_hal_status_e status;
261             hldev->stats.sw_dev_info_stats.rxmac_intr_cnt++;
262             status = __hal_device_handle_rxmac(hldev, val64);
263             if (status != XGE_HAL_OK) {
264                 return status;
265             }
266         }
267
268         if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXXGXS)) {
269             xge_hal_status_e status;
270             hldev->stats.sw_dev_info_stats.rxxgxs_intr_cnt++;
271             status = __hal_device_handle_rxxgxs(hldev, val64);
272             if (status != XGE_HAL_OK) {
273                 return status;
274             }
275         }
276
277         if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_MC)) {
278             xge_hal_status_e status;
279             hldev->stats.sw_dev_info_stats.mc_intr_cnt++;
280             status = __hal_device_handle_mc(hldev, val64);
281             if (status != XGE_HAL_OK) {
282                 return status;
283             }
284         }
285
286         return XGE_HAL_OK;
287 }
288
289 /**
290  * xge_hal_device_clear_rx - Acknowledge (that is, clear) the
291  * condition that has caused the RX interrupt.
292  * @hldev: HAL device handle.
293  *
294  * Acknowledge (that is, clear) the condition that has caused
295  * the Rx interrupt.
296  * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(),
297  * xge_hal_device_clear_tx(), xge_hal_device_mask_rx().
298  */
299 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
300 xge_hal_device_clear_rx(xge_hal_device_t *hldev)
301 {
302         xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
303
304         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
305                      0xFFFFFFFFFFFFFFFFULL,
306                      &isrbar0->rx_traffic_int);
307 }
308
309 /**
310  * xge_hal_device_clear_tx - Acknowledge (that is, clear) the
311  * condition that has caused the TX interrupt.
312  * @hldev: HAL device handle.
313  *
314  * Acknowledge (that is, clear) the condition that has caused
315  * the Tx interrupt.
316  * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(),
317  * xge_hal_device_clear_rx(), xge_hal_device_mask_tx().
318  */
319 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
320 xge_hal_device_clear_tx(xge_hal_device_t *hldev)
321 {
322         xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
323
324         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
325                      0xFFFFFFFFFFFFFFFFULL,
326                      &isrbar0->tx_traffic_int);
327 }
328
329 /**
330  * xge_hal_device_poll_rx_channel - Poll Rx channel for completed
331  * descriptors and process the same.
332  * @channel: HAL channel.
333  * @got_rx: Buffer to return the flag set if receive interrupt is occured
334  *
335  * The function polls the Rx channel for the completed  descriptors and calls
336  * the upper-layer driver (ULD) via supplied completion callback.
337  *
338  * Returns: XGE_HAL_OK, if the polling is completed successful.
339  * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
340  * descriptors available which are yet to be processed.
341  *
342  * See also: xge_hal_device_poll_tx_channel()
343  */
344 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
345 xge_hal_device_poll_rx_channel(xge_hal_channel_t *channel, int *got_rx)
346 {
347         xge_hal_status_e ret = XGE_HAL_OK;
348         xge_hal_dtr_h first_dtrh;
349         xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
350         u8 t_code;
351         int got_bytes;
352
353         /* for each opened rx channel */
354         got_bytes = *got_rx = 0;
355         ((xge_hal_ring_t *)channel)->cmpl_cnt = 0;
356         channel->poll_bytes = 0;
357         if ((ret = xge_hal_ring_dtr_next_completed (channel, &first_dtrh,
358             &t_code)) == XGE_HAL_OK) {
359             if (channel->callback(channel, first_dtrh,
360                 t_code, channel->userdata) != XGE_HAL_OK) {
361                 (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1;
362                 got_bytes += channel->poll_bytes + 1;
363                 ret = XGE_HAL_COMPLETIONS_REMAIN;
364             } else {
365                 (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1;
366                 got_bytes += channel->poll_bytes + 1;
367             }
368         }
369
370         if (*got_rx) {
371             hldev->irq_workload_rxd[channel->post_qid] += *got_rx;
372             hldev->irq_workload_rxcnt[channel->post_qid] ++;
373         }
374         hldev->irq_workload_rxlen[channel->post_qid] += got_bytes;
375
376         return ret;
377 }
378
379 /**
380  * xge_hal_device_poll_tx_channel - Poll Tx channel for completed
381  * descriptors and process the same.
382  * @channel: HAL channel.
383  * @got_tx: Buffer to return the flag set if transmit interrupt is occured
384  *
385  * The function polls the Tx channel for the completed  descriptors and calls
386  * the upper-layer driver (ULD) via supplied completion callback.
387  *
388  * Returns: XGE_HAL_OK, if the polling is completed successful.
389  * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
390  * descriptors available which are yet to be processed.
391  *
392  * See also: xge_hal_device_poll_rx_channel().
393  */
394 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
395 xge_hal_device_poll_tx_channel(xge_hal_channel_t *channel, int *got_tx)
396 {
397         xge_hal_dtr_h first_dtrh;
398         xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
399         u8 t_code;
400         int got_bytes;
401
402         /* for each opened tx channel */
403         got_bytes = *got_tx = 0;
404         channel->poll_bytes = 0;
405         if (xge_hal_fifo_dtr_next_completed (channel, &first_dtrh,
406             &t_code) == XGE_HAL_OK) {
407             if (channel->callback(channel, first_dtrh,
408                 t_code, channel->userdata) != XGE_HAL_OK) {
409                 (*got_tx)++;
410                 got_bytes += channel->poll_bytes + 1;
411                 return XGE_HAL_COMPLETIONS_REMAIN;
412             }
413             (*got_tx)++;
414             got_bytes += channel->poll_bytes + 1;
415         }
416
417         if (*got_tx) {
418             hldev->irq_workload_txd[channel->post_qid] += *got_tx;
419             hldev->irq_workload_txcnt[channel->post_qid] ++;
420         }
421         hldev->irq_workload_txlen[channel->post_qid] += got_bytes;
422
423         return XGE_HAL_OK;
424 }
425
426 /**
427  * xge_hal_device_poll_rx_channels - Poll Rx channels for completed
428  * descriptors and process the same.
429  * @hldev: HAL device handle.
430  * @got_rx: Buffer to return flag set if receive is ready
431  *
432  * The function polls the Rx channels for the completed descriptors and calls
433  * the upper-layer driver (ULD) via supplied completion callback.
434  *
435  * Returns: XGE_HAL_OK, if the polling is completed successful.
436  * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
437  * descriptors available which are yet to be processed.
438  *
439  * See also: xge_hal_device_poll_tx_channels(), xge_hal_device_continue_irq().
440  */
441 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
442 xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev, int *got_rx)
443 {
444         xge_list_t *item;
445         xge_hal_channel_t *channel;
446
447         /* for each opened rx channel */
448         xge_list_for_each(item, &hldev->ring_channels) {
449             if (hldev->terminating)
450                 return XGE_HAL_OK;
451             channel = xge_container_of(item, xge_hal_channel_t, item);
452             (void) xge_hal_device_poll_rx_channel(channel, got_rx);
453         }
454
455         return XGE_HAL_OK;
456 }
457
458 /**
459  * xge_hal_device_poll_tx_channels - Poll Tx channels for completed
460  * descriptors and process the same.
461  * @hldev: HAL device handle.
462  * @got_tx: Buffer to return flag set if transmit is ready
463  *
464  * The function polls the Tx channels for the completed descriptors and calls
465  * the upper-layer driver (ULD) via supplied completion callback.
466  *
467  * Returns: XGE_HAL_OK, if the polling is completed successful.
468  * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
469  * descriptors available which are yet to be processed.
470  *
471  * See also: xge_hal_device_poll_rx_channels(), xge_hal_device_continue_irq().
472  */
473 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
474 xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev, int *got_tx)
475 {
476         xge_list_t *item;
477         xge_hal_channel_t *channel;
478
479         /* for each opened tx channel */
480         xge_list_for_each(item, &hldev->fifo_channels) {
481             if (hldev->terminating)
482                 return XGE_HAL_OK;
483             channel = xge_container_of(item, xge_hal_channel_t, item);
484             (void) xge_hal_device_poll_tx_channel(channel, got_tx);
485         }
486
487         return XGE_HAL_OK;
488 }
489
490 /**
491  * xge_hal_device_mask_tx - Mask Tx interrupts.
492  * @hldev: HAL device handle.
493  *
494  * Mask Tx device interrupts.
495  *
496  * See also: xge_hal_device_unmask_tx(), xge_hal_device_mask_rx(),
497  * xge_hal_device_clear_tx().
498  */
499 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
500 xge_hal_device_mask_tx(xge_hal_device_t *hldev)
501 {
502         xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
503
504         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
505                        0xFFFFFFFFFFFFFFFFULL,
506                        &isrbar0->tx_traffic_mask);
507 }
508
509 /**
510  * xge_hal_device_mask_rx - Mask Rx interrupts.
511  * @hldev: HAL device handle.
512  *
513  * Mask Rx device interrupts.
514  *
515  * See also: xge_hal_device_unmask_rx(), xge_hal_device_mask_tx(),
516  * xge_hal_device_clear_rx().
517  */
518 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
519 xge_hal_device_mask_rx(xge_hal_device_t *hldev)
520 {
521         xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
522
523         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
524                        0xFFFFFFFFFFFFFFFFULL,
525                        &isrbar0->rx_traffic_mask);
526 }
527
528 /**
529  * xge_hal_device_mask_all - Mask all device interrupts.
530  * @hldev: HAL device handle.
531  *
532  * Mask all device interrupts.
533  *
534  * See also: xge_hal_device_unmask_all()
535  */
536 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
537 xge_hal_device_mask_all(xge_hal_device_t *hldev)
538 {
539         xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
540
541         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
542                        0xFFFFFFFFFFFFFFFFULL,
543                        &isrbar0->general_int_mask);
544 }
545
546 /**
547  * xge_hal_device_unmask_tx - Unmask Tx interrupts.
548  * @hldev: HAL device handle.
549  *
550  * Unmask Tx device interrupts.
551  *
552  * See also: xge_hal_device_mask_tx(), xge_hal_device_clear_tx().
553  */
554 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
555 xge_hal_device_unmask_tx(xge_hal_device_t *hldev)
556 {
557         xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
558
559         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
560                        0x0ULL,
561                        &isrbar0->tx_traffic_mask);
562 }
563
564 /**
565  * xge_hal_device_unmask_rx - Unmask Rx interrupts.
566  * @hldev: HAL device handle.
567  *
568  * Unmask Rx device interrupts.
569  *
570  * See also: xge_hal_device_mask_rx(), xge_hal_device_clear_rx().
571  */
572 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
573 xge_hal_device_unmask_rx(xge_hal_device_t *hldev)
574 {
575         xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
576
577         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
578                        0x0ULL,
579                        &isrbar0->rx_traffic_mask);
580 }
581
582 /**
583  * xge_hal_device_unmask_all - Unmask all device interrupts.
584  * @hldev: HAL device handle.
585  *
586  * Unmask all device interrupts.
587  *
588  * See also: xge_hal_device_mask_all()
589  */
590 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
591 xge_hal_device_unmask_all(xge_hal_device_t *hldev)
592 {
593         xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
594
595         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
596                        0x0ULL,
597                        &isrbar0->general_int_mask);
598 }
599
600
601 /**
602  * xge_hal_device_continue_irq - Continue handling IRQ: process all
603  * completed descriptors.
604  * @hldev: HAL device handle.
605  *
606  * Process completed descriptors and unmask the device interrupts.
607  *
608  * The xge_hal_device_continue_irq() walks all open channels
609  * and calls upper-layer driver (ULD) via supplied completion
610  * callback. Note that the completion callback is specified at channel open
611  * time, see xge_hal_channel_open().
612  *
613  * Note that the xge_hal_device_continue_irq is part of the _fast_ path.
614  * To optimize the processing, the function does _not_ check for
615  * errors and alarms.
616  *
617  * The latter is done in a polling fashion, via xge_hal_device_poll().
618  *
619  * Returns: XGE_HAL_OK.
620  *
621  * See also: xge_hal_device_handle_irq(), xge_hal_device_poll(),
622  * xge_hal_ring_dtr_next_completed(),
623  * xge_hal_fifo_dtr_next_completed(), xge_hal_channel_callback_f{}.
624  */
625 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
626 xge_hal_device_continue_irq(xge_hal_device_t *hldev)
627 {
628         int got_rx = 1, got_tx = 1;
629         int isr_polling_cnt = hldev->config.isr_polling_cnt;
630         int count = 0;
631
632         do
633         {
634             if (got_rx)
635                 (void) xge_hal_device_poll_rx_channels(hldev, &got_rx);
636             if (got_tx && hldev->tti_enabled)
637                 (void) xge_hal_device_poll_tx_channels(hldev, &got_tx);
638
639             if (!got_rx && !got_tx)
640                 break;
641
642             count += (got_rx + got_tx);
643         }while (isr_polling_cnt--);
644
645         if (!count)
646             hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
647
648         return XGE_HAL_OK;
649 }
650
651 /**
652  * xge_hal_device_handle_irq - Handle device IRQ.
653  * @hldev: HAL device handle.
654  *
655  * Perform the complete handling of the line interrupt. The function
656  * performs two calls.
657  * First it uses xge_hal_device_begin_irq() to  check the reason for
658  * the interrupt and mask the device interrupts.
659  * Second, it calls xge_hal_device_continue_irq() to process all
660  * completed descriptors and re-enable the interrupts.
661  *
662  * Returns: XGE_HAL_OK - success;
663  * XGE_HAL_ERR_WRONG_IRQ - (shared) IRQ produced by other device.
664  *
665  * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq().
666  */
667 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
668 xge_hal_device_handle_irq(xge_hal_device_t *hldev)
669 {
670         u64 reason;
671         xge_hal_status_e status;
672
673         xge_hal_device_mask_all(hldev);
674
675         status = xge_hal_device_begin_irq(hldev, &reason);
676         if (status != XGE_HAL_OK) {
677             xge_hal_device_unmask_all(hldev);
678             return status;
679         }
680
681         if (reason & XGE_HAL_GEN_INTR_RXTRAFFIC) {
682             xge_hal_device_clear_rx(hldev);
683         }
684
685         status = xge_hal_device_continue_irq(hldev);
686
687         xge_hal_device_clear_tx(hldev);
688
689         xge_hal_device_unmask_all(hldev);
690
691         return status;
692 }
693
694 #if defined(XGE_HAL_CONFIG_LRO)
695
696
697 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
698 __hal_lro_check_for_session_match(lro_t *lro, tcplro_t *tcp, iplro_t *ip)
699 {
700
701         /* Match Source address field */
702         if ((lro->ip_hdr->saddr != ip->saddr))
703             return XGE_HAL_FAIL;
704
705         /* Match Destination address field */
706         if ((lro->ip_hdr->daddr != ip->daddr))
707             return XGE_HAL_FAIL;
708
709         /* Match Source Port field */
710         if ((lro->tcp_hdr->source != tcp->source))
711             return XGE_HAL_FAIL;
712
713         /* Match Destination Port field */
714         if ((lro->tcp_hdr->dest != tcp->dest))
715             return XGE_HAL_FAIL;
716             
717         return XGE_HAL_OK;
718 }
719
720 /*
721  * __hal_tcp_seg_len: Find the tcp seg len.
722  * @ip: ip header.
723  * @tcp: tcp header.
724  * returns: Tcp seg length.
725  */
726 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u16
727 __hal_tcp_seg_len(iplro_t *ip, tcplro_t *tcp)
728 {
729         u16 ret;
730
731         ret =  (xge_os_ntohs(ip->tot_len) -
732                ((ip->version_ihl & 0x0F)<<2) -
733                ((tcp->doff_res)>>2));
734         return (ret);
735 }
736
737 /*
738  * __hal_ip_lro_capable: Finds whether ip is lro capable.
739  * @ip: ip header.
740  * @ext_info:  descriptor info.
741  */
742 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
743 __hal_ip_lro_capable(iplro_t *ip,
744                  xge_hal_dtr_info_t *ext_info)
745 {
746
747 #ifdef XGE_LL_DEBUG_DUMP_PKT
748             {
749                 u16 i;
750                 u8 ch, *iph = (u8 *)ip;
751
752                 xge_debug_ring(XGE_TRACE, "Dump Ip:" );
753                 for (i =0; i < 40; i++) {
754                     ch = ntohs(*((u8 *)(iph + i)) );
755                     printf("i:%d %02x, ",i,ch);
756                 }
757             }
758 #endif
759
760         if (ip->version_ihl != IP_FAST_PATH_HDR_MASK) {
761             xge_debug_ring(XGE_ERR, "iphdr !=45 :%d",ip->version_ihl);
762             return XGE_HAL_FAIL;
763         }
764
765         if (ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) {
766             xge_debug_ring(XGE_ERR, "IP fragmented");
767             return XGE_HAL_FAIL;
768         }
769
770         return XGE_HAL_OK;
771 }
772
773 /*
774  * __hal_tcp_lro_capable: Finds whether tcp is lro capable.
775  * @ip: ip header.
776  * @tcp: tcp header.
777  */
778 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
779 __hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp, lro_t *lro, int *ts_off)
780 {
781 #ifdef XGE_LL_DEBUG_DUMP_PKT
782             {
783                 u8 ch;
784                 u16 i;
785
786                 xge_debug_ring(XGE_TRACE, "Dump Tcp:" );
787                 for (i =0; i < 20; i++) {
788                     ch = ntohs(*((u8 *)((u8 *)tcp + i)) );
789                     xge_os_printf("i:%d %02x, ",i,ch);
790                 }
791             }
792 #endif
793         if ((TCP_FAST_PATH_HDR_MASK2 != tcp->ctrl) &&
794             (TCP_FAST_PATH_HDR_MASK3 != tcp->ctrl))
795             goto _exit_fail;
796
797         *ts_off = -1;
798         if (TCP_FAST_PATH_HDR_MASK1 != tcp->doff_res) {
799             u16 tcp_hdr_len = tcp->doff_res >> 2; /* TCP header len */
800             u16 off = 20; /* Start of tcp options */
801             int i, diff; 
802
803             /* Does Packet can contain time stamp */
804             if (tcp_hdr_len < 32) {
805                 /*
806                  * If the session is not opened, we can consider
807                  * this packet for LRO
808                  */
809                 if (lro == NULL)
810                     return XGE_HAL_OK;
811
812                 goto _exit_fail;
813             }
814
815             /* Ignore No-operation 0x1 */
816             while (((u8 *)tcp)[off] == 0x1)
817                 off++;
818
819             /* Next option == Timestamp */
820             if (((u8 *)tcp)[off] != 0x8) {
821                 /*
822                  * If the session ie not opened, we can consider
823                  * this packet for LRO
824                  */
825                 if (lro == NULL)
826                     return XGE_HAL_OK;
827
828                 goto _exit_fail;
829             }
830
831             *ts_off = off;
832             if (lro == NULL)
833                 return XGE_HAL_OK;
834
835             /*
836              * Now the session is opened. If the LRO frame doesn't
837              * have time stamp, we cannot consider current packet for
838              * LRO.
839              */
840             if (lro->ts_off == -1) {
841                 xge_debug_ring(XGE_ERR, "Pkt received with time stamp after session opened with no time stamp : %02x %02x", tcp->doff_res, tcp->ctrl);
842                 return XGE_HAL_FAIL;
843             }
844
845             /*
846              * If the difference is greater than three, then there are
847              * more options possible.
848              * else, there are two cases:
849              * case 1: remaining are padding bytes.
850              * case 2: remaining can contain options or padding
851              */
852             off += ((u8 *)tcp)[off+1];
853             diff = tcp_hdr_len - off;
854             if (diff > 3) {
855                 /*
856                  * Probably contains more options.
857                  */
858                 xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x ", tcp->doff_res,   tcp->ctrl);
859                 return XGE_HAL_FAIL;
860             }
861
862             for (i = 0; i < diff; i++) {
863                 u8 byte = ((u8 *)tcp)[off+i];
864
865                 /* Ignore No-operation 0x1 */
866                 if ((byte == 0x0) || (byte == 0x1)) 
867                     continue;
868                 xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x ", tcp->doff_res,   tcp->ctrl);
869                 return XGE_HAL_FAIL;
870             }
871         
872             /*
873              * Update the time stamp of LRO frame.
874              */
875             xge_os_memcpy(((char *)lro->tcp_hdr + lro->ts_off + 2),
876                     (char *)((char *)tcp + (*ts_off) + 2), 8);
877         }
878
879         return XGE_HAL_OK;
880
881 _exit_fail:
882         xge_debug_ring(XGE_TRACE,   "tcphdr not fastpth %02x %02x", tcp->doff_res, tcp->ctrl);
883         return XGE_HAL_FAIL;
884
885 }
886
887 /*
888  * __hal_lro_capable: Finds whether frame is lro capable.
889  * @buffer: Ethernet frame.
890  * @ip: ip frame.
891  * @tcp: tcp frame.
892  * @ext_info: Descriptor info.
893  */
894 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
895 __hal_lro_capable( u8 *buffer,
896                iplro_t **ip,
897                tcplro_t **tcp,
898            xge_hal_dtr_info_t *ext_info)
899 {
900         u8 ip_off, ip_length;
901
902         if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_TCP)) {
903             xge_debug_ring(XGE_ERR, "Cant do lro %d", ext_info->proto);
904             return XGE_HAL_FAIL;
905         }
906
907   if ( !*ip )
908   {
909 #ifdef XGE_LL_DEBUG_DUMP_PKT
910             {
911                 u8 ch;
912                 u16 i;
913
914                 xge_os_printf("Dump Eth:" );
915                 for (i =0; i < 60; i++) {
916                     ch = ntohs(*((u8 *)(buffer + i)) );
917                     xge_os_printf("i:%d %02x, ",i,ch);
918                 }
919             }
920 #endif
921
922         switch (ext_info->frame) {
923         case XGE_HAL_FRAME_TYPE_DIX:
924           ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
925           break;
926         case XGE_HAL_FRAME_TYPE_LLC:
927           ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE   +
928                     XGE_HAL_HEADER_802_2_SIZE);
929           break;
930         case XGE_HAL_FRAME_TYPE_SNAP:
931           ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE   +
932                     XGE_HAL_HEADER_SNAP_SIZE);
933           break;
934         default: // XGE_HAL_FRAME_TYPE_IPX, etc.
935           return XGE_HAL_FAIL;
936         }
937
938
939         if (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED) {
940           ip_off += XGE_HAL_HEADER_VLAN_SIZE;
941         }
942
943         /* Grab ip, tcp headers */
944         *ip = (iplro_t *)((char*)buffer + ip_off);
945   } /* !*ip */
946
947         ip_length = (u8)((*ip)->version_ihl & 0x0F);
948         ip_length = ip_length <<2;
949         *tcp = (tcplro_t *)((char *)*ip + ip_length);
950
951         xge_debug_ring(XGE_TRACE, "ip_length:%d ip:"XGE_OS_LLXFMT
952                " tcp:"XGE_OS_LLXFMT"", (int)ip_length,
953             (unsigned long long)(ulong_t)*ip, (unsigned long long)(ulong_t)*tcp);
954
955         return XGE_HAL_OK;
956
957 }
958
959
960 /*
961  * __hal_open_lro_session: Open a new LRO session.
962  * @buffer: Ethernet frame.
963  * @ip: ip header.
964  * @tcp: tcp header.
965  * @lro: lro pointer
966  * @ext_info: Descriptor info.
967  * @hldev: Hal context.
968  * @ring_lro: LRO descriptor per rx ring.
969  * @slot: Bucket no.
970  * @tcp_seg_len: Length of tcp segment.
971  * @ts_off: time stamp offset in the packet.
972  */
973 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
974 __hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro,
975                 xge_hal_device_t *hldev, xge_hal_lro_desc_t *ring_lro, int slot,
976           u32 tcp_seg_len, int  ts_off)
977 {
978
979         lro_t *lro_new = &ring_lro->lro_pool[slot];
980
981         lro_new->in_use         =   1;
982         lro_new->ll_hdr         =   buffer;
983         lro_new->ip_hdr         =   ip;
984         lro_new->tcp_hdr        =   tcp;
985         lro_new->tcp_next_seq_num   =   tcp_seg_len + xge_os_ntohl(
986                                     tcp->seq);
987         lro_new->tcp_seq_num        =   tcp->seq;
988         lro_new->tcp_ack_num        =   tcp->ack_seq;
989         lro_new->sg_num         =   1;
990         lro_new->total_length       =   xge_os_ntohs(ip->tot_len);
991         lro_new->frags_len      =   0;
992         lro_new->ts_off         =   ts_off;
993
994         hldev->stats.sw_dev_info_stats.tot_frms_lroised++;
995         hldev->stats.sw_dev_info_stats.tot_lro_sessions++;
996
997         *lro = ring_lro->lro_recent = lro_new;
998         return;
999 }
1000 /*
1001  * __hal_lro_get_free_slot: Get a free LRO bucket.
1002  * @ring_lro: LRO descriptor per ring.
1003  */
1004 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
1005 __hal_lro_get_free_slot (xge_hal_lro_desc_t *ring_lro)
1006 {
1007         int i;
1008
1009         for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) {
1010             lro_t *lro_temp = &ring_lro->lro_pool[i];
1011
1012             if (!lro_temp->in_use)
1013                 return i;
1014         }
1015         return -1;  
1016 }
1017
1018 /*
1019  * __hal_get_lro_session: Gets matching LRO session or creates one.
1020  * @eth_hdr:    Ethernet header.
1021  * @ip: ip header.
1022  * @tcp: tcp header.
1023  * @lro: lro pointer
1024  * @ext_info: Descriptor info.
1025  * @hldev: Hal context.
1026  * @ring_lro: LRO descriptor per rx ring
1027  */
1028 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
1029 __hal_get_lro_session (u8 *eth_hdr,
1030                    iplro_t *ip,
1031                    tcplro_t *tcp,
1032                    lro_t **lro,
1033                    xge_hal_dtr_info_t *ext_info,
1034                    xge_hal_device_t *hldev,
1035                    xge_hal_lro_desc_t   *ring_lro,
1036                    lro_t **lro_end3 /* Valid only when ret=END_3 */)
1037 {
1038         lro_t *lro_match;
1039         int i, free_slot = -1;
1040         u32 tcp_seg_len;
1041         int ts_off = -1;
1042
1043         *lro = lro_match = NULL;
1044         /*
1045          * Compare the incoming frame with the lro session left from the 
1046          * previous call.  There is a good chance that this incoming frame
1047          * matches the lro session.
1048          */
1049         if (ring_lro->lro_recent && ring_lro->lro_recent->in_use)   {
1050             if (__hal_lro_check_for_session_match(ring_lro->lro_recent,
1051                                   tcp, ip)
1052                                 == XGE_HAL_OK)
1053                 lro_match = ring_lro->lro_recent;
1054         }
1055         
1056         if (!lro_match) {
1057             /*
1058              * Search in the pool of LROs for the session that matches 
1059              * the incoming frame.
1060              */
1061             for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) {
1062                 lro_t *lro_temp = &ring_lro->lro_pool[i];
1063
1064                 if (!lro_temp->in_use) {
1065                     if (free_slot == -1)
1066                         free_slot = i;
1067                     continue;
1068                 }   
1069
1070                 if (__hal_lro_check_for_session_match(lro_temp, tcp,
1071                                   ip) == XGE_HAL_OK) {
1072                     lro_match = lro_temp;
1073                     break;
1074                 }
1075             }
1076         }
1077
1078         
1079         if (lro_match) {
1080             /*
1081              * Matching LRO Session found
1082              */         
1083             *lro = lro_match;
1084
1085             if (lro_match->tcp_next_seq_num != xge_os_ntohl(tcp->seq)) {
1086          xge_debug_ring(XGE_ERR,    "**retransmit  **"
1087                             "found***");
1088                 hldev->stats.sw_dev_info_stats.lro_out_of_seq_pkt_cnt++;
1089                 return XGE_HAL_INF_LRO_END_2;
1090             }
1091
1092             if (XGE_HAL_OK != __hal_ip_lro_capable(ip, ext_info))
1093         {
1094                 return XGE_HAL_INF_LRO_END_2;
1095         }
1096
1097             if (XGE_HAL_OK != __hal_tcp_lro_capable(ip, tcp, lro_match,
1098                                 &ts_off)) {
1099                 /*
1100                  * Close the current session and open a new
1101                  * LRO session with this packet,
1102                  * provided it has tcp payload
1103                  */ 
1104                 tcp_seg_len = __hal_tcp_seg_len(ip, tcp);
1105                 if (tcp_seg_len == 0)
1106           {
1107                     return XGE_HAL_INF_LRO_END_2;
1108           }
1109
1110                 /* Get a free bucket  */
1111                 free_slot = __hal_lro_get_free_slot(ring_lro);
1112                 if (free_slot == -1)
1113           {
1114                     return XGE_HAL_INF_LRO_END_2;
1115           }
1116
1117                 /* 
1118                  * Open a new LRO session
1119                  */
1120                 __hal_open_lro_session (eth_hdr,    ip, tcp, lro_end3,
1121                             hldev, ring_lro, free_slot, tcp_seg_len,
1122                             ts_off);
1123
1124                 return XGE_HAL_INF_LRO_END_3;
1125             }
1126
1127                     /*
1128              * The frame is good, in-sequence, can be LRO-ed;
1129              * take its (latest) ACK - unless it is a dupack.
1130              * Note: to be exact need to check window size as well..
1131             */
1132             if (lro_match->tcp_ack_num == tcp->ack_seq &&
1133                 lro_match->tcp_seq_num == tcp->seq) {
1134                 hldev->stats.sw_dev_info_stats.lro_dup_pkt_cnt++;
1135                 return XGE_HAL_INF_LRO_END_2;
1136             }
1137
1138             lro_match->tcp_seq_num = tcp->seq;
1139             lro_match->tcp_ack_num = tcp->ack_seq;
1140             lro_match->frags_len += __hal_tcp_seg_len(ip, tcp);
1141
1142             ring_lro->lro_recent =  lro_match;
1143         
1144             return XGE_HAL_INF_LRO_CONT;
1145         }
1146
1147         /* ********** New Session ***************/
1148         if (free_slot == -1)
1149             return XGE_HAL_INF_LRO_UNCAPABLE;
1150         
1151         if (XGE_HAL_FAIL == __hal_ip_lro_capable(ip, ext_info))
1152             return XGE_HAL_INF_LRO_UNCAPABLE;
1153
1154         if (XGE_HAL_FAIL == __hal_tcp_lro_capable(ip, tcp, NULL, &ts_off))
1155             return XGE_HAL_INF_LRO_UNCAPABLE;
1156             
1157         xge_debug_ring(XGE_TRACE, "Creating lro session.");
1158
1159         /*
1160          * Open a LRO session, provided the packet contains payload.
1161          */
1162         tcp_seg_len = __hal_tcp_seg_len(ip, tcp);
1163         if (tcp_seg_len == 0)
1164             return XGE_HAL_INF_LRO_UNCAPABLE;
1165
1166         __hal_open_lro_session (eth_hdr,    ip, tcp, lro, hldev, ring_lro, free_slot,
1167                     tcp_seg_len, ts_off);
1168
1169         return XGE_HAL_INF_LRO_BEGIN;
1170 }
1171
1172 /*
1173  * __hal_lro_under_optimal_thresh: Finds whether combined session is optimal.
1174  * @ip: ip header.
1175  * @tcp: tcp header.
1176  * @lro: lro pointer
1177  * @hldev: Hal context.
1178  */
1179 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
1180 __hal_lro_under_optimal_thresh (iplro_t *ip,
1181                         tcplro_t *tcp,
1182                     lro_t *lro,
1183                     xge_hal_device_t *hldev)
1184 {
1185         if (!lro) return XGE_HAL_FAIL;
1186
1187         if ((lro->total_length + __hal_tcp_seg_len(ip, tcp) ) > 
1188                             hldev->config.lro_frm_len) {
1189             xge_debug_ring(XGE_TRACE, "Max LRO frame len exceeded:"
1190              "max length %d ", hldev->config.lro_frm_len);
1191             hldev->stats.sw_dev_info_stats.lro_frm_len_exceed_cnt++;
1192             return XGE_HAL_FAIL;
1193         }
1194
1195         if (lro->sg_num == hldev->config.lro_sg_size) {
1196             xge_debug_ring(XGE_TRACE, "Max sg count exceeded:"
1197                      "max sg %d ", hldev->config.lro_sg_size);
1198             hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++;
1199             return XGE_HAL_FAIL;
1200         }
1201
1202         return XGE_HAL_OK;
1203 }
1204
1205 /*
1206  * __hal_collapse_ip_hdr: Collapses ip header.
1207  * @ip: ip header.
1208  * @tcp: tcp header.
1209  * @lro: lro pointer
1210  * @hldev: Hal context.
1211  */
1212 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
1213 __hal_collapse_ip_hdr ( iplro_t *ip,
1214                 tcplro_t *tcp,
1215                 lro_t *lro,
1216                 xge_hal_device_t *hldev)
1217 {
1218
1219         lro->total_length += __hal_tcp_seg_len(ip, tcp);
1220
1221         /* May be we have to handle time stamps or more options */
1222
1223         return XGE_HAL_OK;
1224
1225 }
1226
1227 /*
1228  * __hal_collapse_tcp_hdr: Collapses tcp header.
1229  * @ip: ip header.
1230  * @tcp: tcp header.
1231  * @lro: lro pointer
1232  * @hldev: Hal context.
1233  */
1234 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
1235 __hal_collapse_tcp_hdr ( iplro_t *ip,
1236                  tcplro_t *tcp,
1237                  lro_t *lro,
1238                  xge_hal_device_t *hldev)
1239 {
1240         lro->tcp_next_seq_num += __hal_tcp_seg_len(ip, tcp);
1241         return XGE_HAL_OK;
1242
1243 }
1244
1245 /*
1246  * __hal_append_lro: Appends new frame to existing LRO session.
1247  * @ip: ip header.
1248  * @tcp: IN tcp header, OUT tcp payload.
1249  * @seg_len: tcp payload length.
1250  * @lro: lro pointer
1251  * @hldev: Hal context.
1252  */
1253 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
1254 __hal_append_lro(iplro_t *ip,
1255              tcplro_t **tcp,
1256              u32 *seg_len,
1257              lro_t *lro,
1258              xge_hal_device_t *hldev)
1259 {
1260         (void) __hal_collapse_ip_hdr(ip, *tcp,  lro, hldev);
1261         (void) __hal_collapse_tcp_hdr(ip, *tcp, lro, hldev);
1262         // Update mbuf chain will be done in ll driver.
1263         // xge_hal_accumulate_large_rx on success of appending new frame to
1264         // lro will return to ll driver tcpdata pointer, and tcp payload length.
1265         // along with return code lro frame appended.
1266
1267         lro->sg_num++;
1268         *seg_len = __hal_tcp_seg_len(ip, *tcp);
1269         *tcp = (tcplro_t *)((char *)*tcp    + (((*tcp)->doff_res)>>2));
1270
1271         return XGE_HAL_OK;
1272
1273 }
1274
1275 /**
1276  * __xge_hal_accumulate_large_rx:   LRO a given frame
1277  * frames
1278  * @ring: rx ring number
1279  * @eth_hdr: ethernet header.
1280  * @ip_hdr: ip header (optional)
1281  * @tcp: tcp header.
1282  * @seglen: packet length.
1283  * @p_lro: lro pointer.
1284  * @ext_info: descriptor info, see xge_hal_dtr_info_t{}.
1285  * @hldev: HAL device.
1286  * @lro_end3: for lro_end3 output
1287  *
1288  * LRO the newly received frame, i.e. attach it (if possible) to the
1289  * already accumulated (i.e., already LRO-ed) received frames (if any),
1290  * to form one super-sized frame for the subsequent processing
1291  * by the stack.
1292  */
1293 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
1294 xge_hal_lro_process_rx(int ring, u8 *eth_hdr, u8 *ip_hdr, tcplro_t **tcp,
1295                            u32 *seglen, lro_t **p_lro,
1296                            xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
1297                            lro_t **lro_end3)
1298 {
1299         iplro_t *ip = (iplro_t *)ip_hdr;
1300         xge_hal_status_e ret;
1301         lro_t *lro;
1302
1303         xge_debug_ring(XGE_TRACE, "Entered accumu lro. ");
1304         if (XGE_HAL_OK != __hal_lro_capable(eth_hdr, &ip, (tcplro_t **)tcp,
1305                                           ext_info))
1306             return XGE_HAL_INF_LRO_UNCAPABLE;
1307
1308         /*
1309          * This function shall get matching LRO or else
1310          * create one and return it
1311          */
1312         ret = __hal_get_lro_session(eth_hdr, ip, (tcplro_t *)*tcp,
1313                                   p_lro, ext_info, hldev,   &hldev->lro_desc[ring],
1314                                   lro_end3);
1315         xge_debug_ring(XGE_TRACE, "ret from get_lro:%d ",ret);
1316         lro = *p_lro;
1317         if (XGE_HAL_INF_LRO_CONT == ret) {
1318             if (XGE_HAL_OK == __hal_lro_under_optimal_thresh(ip,
1319                             (tcplro_t *)*tcp, lro, hldev)) {
1320                 (void) __hal_append_lro(ip,(tcplro_t **) tcp, seglen,
1321                                  lro, hldev);
1322                 hldev->stats.sw_dev_info_stats.tot_frms_lroised++;
1323
1324                 if (lro->sg_num >= hldev->config.lro_sg_size) {
1325                     hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++;
1326                     ret = XGE_HAL_INF_LRO_END_1;
1327                 }
1328
1329             } else ret = XGE_HAL_INF_LRO_END_2;
1330         }
1331
1332         /*
1333          * Since its time to flush,
1334          * update ip header so that it can be sent up
1335          */
1336         if ((ret == XGE_HAL_INF_LRO_END_1) ||
1337             (ret == XGE_HAL_INF_LRO_END_2) ||
1338             (ret == XGE_HAL_INF_LRO_END_3)) {
1339             lro->ip_hdr->tot_len = xge_os_htons((*p_lro)->total_length);
1340             lro->ip_hdr->check = xge_os_htons(0);
1341             lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)),
1342                         (lro->ip_hdr->version_ihl & 0x0F));
1343             lro->tcp_hdr->ack_seq = lro->tcp_ack_num;
1344         }
1345
1346         return (ret);
1347 }
1348
1349 /**
1350  * xge_hal_accumulate_large_rx: LRO a given frame
1351  * frames
1352  * @buffer: Ethernet frame.
1353  * @tcp: tcp header.
1354  * @seglen: packet length.
1355  * @p_lro: lro pointer.
1356  * @ext_info: descriptor info, see xge_hal_dtr_info_t{}.
1357  * @hldev: HAL device.
1358  * @lro_end3: for lro_end3 output
1359  *
1360  * LRO the newly received frame, i.e. attach it (if possible) to the
1361  * already accumulated (i.e., already LRO-ed) received frames (if any),
1362  * to form one super-sized frame for the subsequent processing
1363  * by the stack.
1364  */
1365 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
1366 xge_hal_accumulate_large_rx(u8 *buffer, tcplro_t **tcp, u32 *seglen,
1367 lro_t **p_lro, xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
1368 lro_t **lro_end3)
1369 {
1370   int ring = 0;
1371   return xge_hal_lro_process_rx(ring, buffer, NULL, tcp, seglen, p_lro,
1372                                     ext_info, hldev, lro_end3);
1373 }
1374
1375 /**
1376  * xge_hal_lro_close_session: Close LRO session
1377  * @lro: LRO Session.
1378  * @hldev: HAL Context.
1379  */
1380 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
1381 xge_hal_lro_close_session (lro_t *lro)
1382 {
1383         lro->in_use = 0;
1384 }
1385
1386 /**
1387  * xge_hal_lro_next_session: Returns next LRO session in the list or NULL
1388  *                  if none exists.
1389  * @hldev: HAL Context.
1390  * @ring: rx ring number.
1391  */
1392 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
1393 xge_hal_lro_next_session (xge_hal_device_t *hldev, int ring)
1394 {
1395 xge_hal_lro_desc_t *ring_lro = &hldev->lro_desc[ring];
1396         int i;
1397         int start_idx = ring_lro->lro_next_idx;
1398
1399         for(i = start_idx; i < XGE_HAL_LRO_MAX_BUCKETS; i++) {
1400             lro_t *lro = &ring_lro->lro_pool[i];
1401
1402             if (!lro->in_use)
1403                 continue;
1404
1405             lro->ip_hdr->tot_len = xge_os_htons(lro->total_length);
1406             lro->ip_hdr->check = xge_os_htons(0);
1407             lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)),
1408                                     (lro->ip_hdr->version_ihl & 0x0F));
1409             ring_lro->lro_next_idx  = i + 1;
1410             return lro;
1411         }
1412
1413         ring_lro->lro_next_idx  = 0;
1414         return NULL;
1415
1416 }
1417
1418 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
1419 xge_hal_lro_get_next_session(xge_hal_device_t *hldev)
1420 {
1421   int ring = 0; /* assume default ring=0 */
1422   return xge_hal_lro_next_session(hldev, ring);
1423 }
1424 #endif