]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nxge/if_nxge.c
Import tzdata 2018d
[FreeBSD/FreeBSD.git] / sys / dev / nxge / if_nxge.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2007 Neterion, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30
31 #include <dev/nxge/if_nxge.h>
32 #include <dev/nxge/xge-osdep.h>
33 #include <net/if_arp.h>
34 #include <sys/types.h>
35 #include <net/if.h>
36 #include <net/if_var.h>
37 #include <net/if_vlan_var.h>
38
39 int       copyright_print       = 0;
40 int       hal_driver_init_count = 0;
41 size_t    size                  = sizeof(int);
42
43 static void inline xge_flush_txds(xge_hal_channel_h);
44
45 /**
46  * xge_probe
47  * Probes for Xframe devices
48  *
49  * @dev Device handle
50  *
51  * Returns
52  * BUS_PROBE_DEFAULT if device is supported
53  * ENXIO if device is not supported
54  */
55 int
56 xge_probe(device_t dev)
57 {
58         int  devid    = pci_get_device(dev);
59         int  vendorid = pci_get_vendor(dev);
60         int  retValue = ENXIO;
61
62         if(vendorid == XGE_PCI_VENDOR_ID) {
63             if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
64                 (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
65                 if(!copyright_print) {
66                     xge_os_printf(XGE_COPYRIGHT);
67                     copyright_print = 1;
68                 }
69                 device_set_desc_copy(dev,
70                     "Neterion Xframe 10 Gigabit Ethernet Adapter");
71                 retValue = BUS_PROBE_DEFAULT;
72             }
73         }
74
75         return retValue;
76 }
77
78 /**
79  * xge_init_params
80  * Sets HAL parameter values (from kenv).
81  *
82  * @dconfig Device Configuration
83  * @dev Device Handle
84  */
85 void
86 xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
87 {
88         int qindex, tindex, revision;
89         device_t checkdev;
90         xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
91
92         dconfig->mtu                   = XGE_DEFAULT_INITIAL_MTU;
93         dconfig->pci_freq_mherz        = XGE_DEFAULT_USER_HARDCODED;
94         dconfig->device_poll_millis    = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
95         dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
96         dconfig->mac.rmac_bcast_en     = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
97         dconfig->fifo.alignment_size   = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
98
99         XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
100             XGE_DEFAULT_ENABLED_TSO);
101         XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
102             XGE_DEFAULT_ENABLED_LRO);
103         XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
104             XGE_DEFAULT_ENABLED_MSI);
105
106         XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
107             XGE_DEFAULT_LATENCY_TIMER);
108         XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
109             XGE_DEFAULT_MAX_SPLITS_TRANS);
110         XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
111             XGE_DEFAULT_MMRB_COUNT);
112         XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
113             XGE_DEFAULT_SHARED_SPLITS);
114         XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
115             XGE_DEFAULT_ISR_POLLING_CNT);
116         XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
117             stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
118
119         XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
120             XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
121         XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
122             XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
123         XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
124             XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
125         XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
126             XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
127         XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
128             XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
129         XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
130             mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
131         XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
132             mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
133
134         XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
135             XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
136         XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
137             XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
138         XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
139             XGE_DEFAULT_FIFO_MAX_FRAGS);
140
141         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
142             XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
143                 XGE_DEFAULT_FIFO_QUEUE_INTR);
144             XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
145                 XGE_DEFAULT_FIFO_QUEUE_MAX);
146             XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
147                 qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
148
149             for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
150                 dconfig->fifo.queue[qindex].tti[tindex].enabled  = 1;
151                 dconfig->fifo.queue[qindex].configured = 1;
152
153                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
154                     urange_a, qindex, tindex,
155                     XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
156                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
157                     urange_b, qindex, tindex,
158                     XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
159                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
160                     urange_c, qindex, tindex,
161                     XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
162                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
163                     ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
164                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
165                     ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
166                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
167                     ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
168                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
169                     ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
170                 XGE_GET_PARAM_FIFO_QUEUE_TTI(
171                     "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
172                     tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
173                 XGE_GET_PARAM_FIFO_QUEUE_TTI(
174                     "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
175                     tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
176                 XGE_GET_PARAM_FIFO_QUEUE_TTI(
177                     "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
178                     tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
179             }
180         }
181
182         XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
183             XGE_DEFAULT_RING_MEMBLOCK_SIZE);
184
185         XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
186             XGE_DEFAULT_RING_STRIP_VLAN_TAG);
187
188         XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
189             XGE_DEFAULT_BUFFER_MODE);
190         if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
191             (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
192             xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
193             lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
194         }
195
196         for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
197             dconfig->ring.queue[qindex].max_frm_len  = XGE_HAL_RING_USE_MTU;
198             dconfig->ring.queue[qindex].priority     = 0;
199             dconfig->ring.queue[qindex].configured   = 1;
200             dconfig->ring.queue[qindex].buffer_mode  =
201                 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
202                 XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
203
204             XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
205                 XGE_DEFAULT_RING_QUEUE_MAX);
206             XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
207                 qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
208             XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
209                 dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
210             XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
211                 indicate_max_pkts, qindex,
212                 XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
213             XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
214                 backoff_interval_us, qindex,
215                 XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
216
217             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
218                 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
219             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
220                 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
221             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
222                 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
223             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
224                 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
225             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
226                 timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
227             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
228                 timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
229             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
230                 urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
231             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
232                 urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
233             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
234                 urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
235         }
236
237         if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
238             xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
239             xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
240                 (int)(PAGE_SIZE / 32))
241             xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
242             dconfig->fifo.max_frags = (PAGE_SIZE / 32);
243         }
244
245         checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
246         if(checkdev != NULL) {
247             /* Check Revision for 0x12 */
248             revision = pci_read_config(checkdev,
249                 xge_offsetof(xge_hal_pci_config_t, revision), 1);
250             if(revision <= 0x12) {
251                 /* Set mmrb_count to 1k and max splits = 2 */
252                 dconfig->mmrb_count       = 1;
253                 dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
254             }
255         }
256 }
257
258 /**
259  * xge_buffer_sizes_set
260  * Set buffer sizes based on Rx buffer mode
261  *
262  * @lldev Per-adapter Data
263  * @buffer_mode Rx Buffer Mode
264  */
265 void
266 xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
267 {
268         int index = 0;
269         int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
270         int buffer_size = mtu + frame_header;
271
272         xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
273
274         if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
275             lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
276
277         lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
278
279         if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
280             lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
281
282         if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
283             index = 2;
284             buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
285             while(buffer_size > MJUMPAGESIZE) {
286                 lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
287                 buffer_size -= MJUMPAGESIZE;
288             }
289             XGE_ALIGN_TO(buffer_size, 128);
290             lldev->rxd_mbuf_len[index] = buffer_size;
291             lldev->rxd_mbuf_cnt = index + 1;
292         }
293
294         for(index = 0; index < buffer_mode; index++)
295             xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
296                 lldev->rxd_mbuf_len[index]);
297 }
298
299 /**
300  * xge_buffer_mode_init
301  * Init Rx buffer mode
302  *
303  * @lldev Per-adapter Data
304  * @mtu Interface MTU
305  */
306 void
307 xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
308 {
309         int index = 0, buffer_size = 0;
310         xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
311
312         buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
313
314         if(lldev->enabled_lro)
315             (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
316         else
317             (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
318
319         lldev->rxd_mbuf_cnt = lldev->buffer_mode;
320         if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
321             XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
322             ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
323         }
324         else {
325             XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
326             ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
327         }
328         xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
329
330         xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
331             ((lldev->enabled_tso) ? "Enabled":"Disabled"));
332         xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
333             ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
334         xge_os_printf("%s: Rx %d Buffer Mode Enabled",
335             device_get_nameunit(lldev->device), lldev->buffer_mode);
336 }
337
338 /**
339  * xge_driver_initialize
340  * Initializes HAL driver (common for all devices)
341  *
342  * Returns
343  * XGE_HAL_OK if success
344  * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
345  */
346 int
347 xge_driver_initialize(void)
348 {
349         xge_hal_uld_cbs_t       uld_callbacks;
350         xge_hal_driver_config_t driver_config;
351         xge_hal_status_e        status = XGE_HAL_OK;
352
353         /* Initialize HAL driver */
354         if(!hal_driver_init_count) {
355             xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
356             xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
357
358             /*
359              * Initial and maximum size of the queue used to store the events
360              * like Link up/down (xge_hal_event_e)
361              */
362             driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
363             driver_config.queue_size_max     = XGE_HAL_MAX_QUEUE_SIZE_MAX;
364
365             uld_callbacks.link_up   = xge_callback_link_up;
366             uld_callbacks.link_down = xge_callback_link_down;
367             uld_callbacks.crit_err  = xge_callback_crit_err;
368             uld_callbacks.event     = xge_callback_event;
369
370             status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
371             if(status != XGE_HAL_OK) {
372                 XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
373                     xdi_out, status);
374             }
375         }
376         hal_driver_init_count = hal_driver_init_count + 1;
377
378         xge_hal_driver_debug_module_mask_set(0xffffffff);
379         xge_hal_driver_debug_level_set(XGE_TRACE);
380
381 xdi_out:
382         return status;
383 }
384
385 /**
386  * xge_media_init
387  * Initializes, adds and sets media
388  *
389  * @devc Device Handle
390  */
391 void
392 xge_media_init(device_t devc)
393 {
394         xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
395
396         /* Initialize Media */
397         ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
398             xge_ifmedia_status);
399
400         /* Add supported media */
401         ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
402         ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
403         ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO,    0, NULL);
404         ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR,  0, NULL);
405         ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR,  0, NULL);
406
407         /* Set media */
408         ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
409 }
410
411 /**
412  * xge_pci_space_save
413  * Save PCI configuration space
414  *
415  * @dev Device Handle
416  */
417 void
418 xge_pci_space_save(device_t dev)
419 {
420         struct pci_devinfo *dinfo = NULL;
421
422         dinfo = device_get_ivars(dev);
423         xge_trace(XGE_TRACE, "Saving PCI configuration space");
424         pci_cfg_save(dev, dinfo, 0);
425 }
426
427 /**
428  * xge_pci_space_restore
429  * Restore saved PCI configuration space
430  *
431  * @dev Device Handle
432  */
433 void
434 xge_pci_space_restore(device_t dev)
435 {
436         struct pci_devinfo *dinfo = NULL;
437
438         dinfo = device_get_ivars(dev);
439         xge_trace(XGE_TRACE, "Restoring PCI configuration space");
440         pci_cfg_restore(dev, dinfo);
441 }
442
443 /**
444  * xge_msi_info_save
445  * Save MSI info
446  *
447  * @lldev Per-adapter Data
448  */
449 void
450 xge_msi_info_save(xge_lldev_t * lldev)
451 {
452         xge_os_pci_read16(lldev->pdev, NULL,
453             xge_offsetof(xge_hal_pci_config_le_t, msi_control),
454             &lldev->msi_info.msi_control);
455         xge_os_pci_read32(lldev->pdev, NULL,
456             xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
457             &lldev->msi_info.msi_lower_address);
458         xge_os_pci_read32(lldev->pdev, NULL,
459             xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
460             &lldev->msi_info.msi_higher_address);
461         xge_os_pci_read16(lldev->pdev, NULL,
462             xge_offsetof(xge_hal_pci_config_le_t, msi_data),
463             &lldev->msi_info.msi_data);
464 }
465
466 /**
467  * xge_msi_info_restore
468  * Restore saved MSI info
469  *
470  * @dev Device Handle
471  */
472 void
473 xge_msi_info_restore(xge_lldev_t *lldev)
474 {
475         /*
476          * If interface is made down and up, traffic fails. It was observed that
477          * MSI information were getting reset on down. Restoring them.
478          */
479         xge_os_pci_write16(lldev->pdev, NULL,
480             xge_offsetof(xge_hal_pci_config_le_t, msi_control),
481             lldev->msi_info.msi_control);
482
483         xge_os_pci_write32(lldev->pdev, NULL,
484             xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
485             lldev->msi_info.msi_lower_address);
486
487         xge_os_pci_write32(lldev->pdev, NULL,
488             xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
489             lldev->msi_info.msi_higher_address);
490
491         xge_os_pci_write16(lldev->pdev, NULL,
492             xge_offsetof(xge_hal_pci_config_le_t, msi_data),
493             lldev->msi_info.msi_data);
494 }
495
496 /**
497  * xge_init_mutex
498  * Initializes mutexes used in driver
499  *
500  * @lldev  Per-adapter Data
501  */
502 void
503 xge_mutex_init(xge_lldev_t *lldev)
504 {
505         int qindex;
506
507         sprintf(lldev->mtx_name_drv, "%s_drv",
508             device_get_nameunit(lldev->device));
509         mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
510             MTX_DEF);
511
512         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
513             sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
514                 device_get_nameunit(lldev->device), qindex);
515             mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
516                 MTX_DEF);
517         }
518 }
519
520 /**
521  * xge_mutex_destroy
522  * Destroys mutexes used in driver
523  *
524  * @lldev Per-adapter Data
525  */
526 void
527 xge_mutex_destroy(xge_lldev_t *lldev)
528 {
529         int qindex;
530
531         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
532             mtx_destroy(&lldev->mtx_tx[qindex]);
533         mtx_destroy(&lldev->mtx_drv);
534 }
535
536 /**
537  * xge_print_info
538  * Print device and driver information
539  *
540  * @lldev Per-adapter Data
541  */
542 void
543 xge_print_info(xge_lldev_t *lldev)
544 {
545         device_t dev = lldev->device;
546         xge_hal_device_t *hldev = lldev->devh;
547         xge_hal_status_e status = XGE_HAL_OK;
548         u64 val64 = 0;
549         const char *xge_pci_bus_speeds[17] = {
550             "PCI 33MHz Bus",
551             "PCI 66MHz Bus",
552             "PCIX(M1) 66MHz Bus",
553             "PCIX(M1) 100MHz Bus",
554             "PCIX(M1) 133MHz Bus",
555             "PCIX(M2) 133MHz Bus",
556             "PCIX(M2) 200MHz Bus",
557             "PCIX(M2) 266MHz Bus",
558             "PCIX(M1) Reserved",
559             "PCIX(M1) 66MHz Bus (Not Supported)",
560             "PCIX(M1) 100MHz Bus (Not Supported)",
561             "PCIX(M1) 133MHz Bus (Not Supported)",
562             "PCIX(M2) Reserved",
563             "PCIX 533 Reserved",
564             "PCI Basic Mode",
565             "PCIX Basic Mode",
566             "PCI Invalid Mode"
567         };
568
569         xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
570             device_get_nameunit(dev),
571             ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
572             hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
573         xge_os_printf("%s: Serial Number %s",
574             device_get_nameunit(dev), hldev->vpd_data.serial_num);
575
576         if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
577             status = xge_hal_mgmt_reg_read(hldev, 0,
578                 xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
579             if(status != XGE_HAL_OK)
580                 xge_trace(XGE_ERR, "Error for getting bus speed");
581
582             xge_os_printf("%s: Adapter is on %s bit %s",
583                 device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
584                 (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
585         }
586
587         xge_os_printf("%s: Using %s Interrupts",
588             device_get_nameunit(dev),
589             (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
590 }
591
592 /**
593  * xge_create_dma_tags
594  * Creates DMA tags for both Tx and Rx
595  *
596  * @dev Device Handle
597  *
598  * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
599  */
600 xge_hal_status_e
601 xge_create_dma_tags(device_t dev)
602 {
603         xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
604         xge_hal_status_e status = XGE_HAL_FAIL;
605         int mtu = (lldev->ifnetp)->if_mtu, maxsize;
606
607         /* DMA tag for Tx */
608         status = bus_dma_tag_create(
609             bus_get_dma_tag(dev),                /* Parent                    */
610             PAGE_SIZE,                           /* Alignment                 */
611             0,                                   /* Bounds                    */
612             BUS_SPACE_MAXADDR,                   /* Low Address               */
613             BUS_SPACE_MAXADDR,                   /* High Address              */
614             NULL,                                /* Filter Function           */
615             NULL,                                /* Filter Function Arguments */
616             MCLBYTES * XGE_MAX_SEGS,             /* Maximum Size              */
617             XGE_MAX_SEGS,                        /* Number of Segments        */
618             MCLBYTES,                            /* Maximum Segment Size      */
619             BUS_DMA_ALLOCNOW,                    /* Flags                     */
620             NULL,                                /* Lock Function             */
621             NULL,                                /* Lock Function Arguments   */
622             (&lldev->dma_tag_tx));               /* DMA Tag                   */
623         if(status != 0)
624             goto _exit;
625
626         maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
627         if(maxsize <= MCLBYTES) {
628             maxsize = MCLBYTES;
629         }
630         else {
631             if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
632                 maxsize = MJUMPAGESIZE;
633             else
634                 maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
635         }
636         
637         /* DMA tag for Rx */
638         status = bus_dma_tag_create(
639             bus_get_dma_tag(dev),                /* Parent                    */
640             PAGE_SIZE,                           /* Alignment                 */
641             0,                                   /* Bounds                    */
642             BUS_SPACE_MAXADDR,                   /* Low Address               */
643             BUS_SPACE_MAXADDR,                   /* High Address              */
644             NULL,                                /* Filter Function           */
645             NULL,                                /* Filter Function Arguments */
646             maxsize,                             /* Maximum Size              */
647             1,                                   /* Number of Segments        */
648             maxsize,                             /* Maximum Segment Size      */
649             BUS_DMA_ALLOCNOW,                    /* Flags                     */
650             NULL,                                /* Lock Function             */
651             NULL,                                /* Lock Function Arguments   */
652             (&lldev->dma_tag_rx));               /* DMA Tag                   */
653         if(status != 0)
654             goto _exit1;
655
656         status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
657             &lldev->extra_dma_map);
658         if(status != 0)
659             goto _exit2;
660
661         status = XGE_HAL_OK;
662         goto _exit;
663
664 _exit2:
665         status = bus_dma_tag_destroy(lldev->dma_tag_rx);
666         if(status != 0)
667             xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
668 _exit1:
669         status = bus_dma_tag_destroy(lldev->dma_tag_tx);
670         if(status != 0)
671             xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
672         status = XGE_HAL_FAIL;
673 _exit:
674         return status;
675 }
676
677 /**
678  * xge_confirm_changes
679  * Disables and Enables interface to apply requested change
680  *
681  * @lldev Per-adapter Data
682  * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
683  *
684  * Returns 0 or Error Number
685  */
686 void
687 xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
688 {
689         if(lldev->initialized == 0) goto _exit1;
690
691         mtx_lock(&lldev->mtx_drv);
692         if_down(lldev->ifnetp);
693         xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
694
695         if(option == XGE_SET_MTU)
696             (lldev->ifnetp)->if_mtu = lldev->mtu;
697         else
698             xge_buffer_mode_init(lldev, lldev->mtu);
699
700         xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
701         if_up(lldev->ifnetp);
702         mtx_unlock(&lldev->mtx_drv);
703         goto _exit;
704
705 _exit1:
706         /* Request was to change MTU and device not initialized */
707         if(option == XGE_SET_MTU) {
708             (lldev->ifnetp)->if_mtu = lldev->mtu;
709             xge_buffer_mode_init(lldev, lldev->mtu);
710         }
711 _exit:
712         return;
713 }
714
715 /**
716  * xge_change_lro_status
717  * Enable/Disable LRO feature
718  *
719  * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
720  *
721  * Returns 0 or error number.
722  */
723 static int
724 xge_change_lro_status(SYSCTL_HANDLER_ARGS)
725 {
726         xge_lldev_t *lldev = (xge_lldev_t *)arg1;
727         int request = lldev->enabled_lro, status = XGE_HAL_OK;
728
729         status = sysctl_handle_int(oidp, &request, arg2, req);
730         if((status != XGE_HAL_OK) || (!req->newptr))
731             goto _exit;
732
733         if((request < 0) || (request > 1)) {
734             status = EINVAL;
735             goto _exit;
736         }
737
738         /* Return if current and requested states are same */
739         if(request == lldev->enabled_lro){
740             xge_trace(XGE_ERR, "LRO is already %s",
741                 ((request) ? "enabled" : "disabled"));
742             goto _exit;
743         }
744
745         lldev->enabled_lro = request;
746         xge_confirm_changes(lldev, XGE_CHANGE_LRO);
747         arg2 = lldev->enabled_lro;
748
749 _exit:
750         return status;
751 }
752
753 /**
754  * xge_add_sysctl_handlers
755  * Registers sysctl parameter value update handlers
756  *
757  * @lldev Per-adapter data
758  */
759 void
760 xge_add_sysctl_handlers(xge_lldev_t *lldev)
761 {
762         struct sysctl_ctx_list *context_list =
763             device_get_sysctl_ctx(lldev->device);
764         struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
765
766         SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
767             "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
768             xge_change_lro_status, "I", "Enable or disable LRO feature");
769 }
770
771 /**
772  * xge_attach
773  * Connects driver to the system if probe was success
774  *
775  * @dev Device Handle
776  */
777 int
778 xge_attach(device_t dev)
779 {
780         xge_hal_device_config_t *device_config;
781         xge_hal_device_attr_t   attr;
782         xge_lldev_t             *lldev;
783         xge_hal_device_t        *hldev;
784         xge_pci_info_t          *pci_info;
785         struct ifnet            *ifnetp;
786         int                     rid, rid0, rid1, error;
787         int                     msi_count = 0, status = XGE_HAL_OK;
788         int                     enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
789
790         device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
791         if(!device_config) {
792             XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
793                 attach_out_config, ENOMEM);
794         }
795
796         lldev = (xge_lldev_t *) device_get_softc(dev);
797         if(!lldev) {
798             XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
799         }
800         lldev->device = dev;
801
802         xge_mutex_init(lldev);
803
804         error = xge_driver_initialize();
805         if(error != XGE_HAL_OK) {
806             xge_resources_free(dev, xge_free_mutex);
807             XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
808         }
809
810         /* HAL device */
811         hldev =
812             (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
813         if(!hldev) {
814             xge_resources_free(dev, xge_free_terminate_hal_driver);
815             XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
816                 attach_out, ENOMEM);
817         }
818         lldev->devh = hldev;
819
820         /* Our private structure */
821         pci_info =
822             (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
823         if(!pci_info) {
824             xge_resources_free(dev, xge_free_hal_device);
825             XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
826                 attach_out, ENOMEM);
827         }
828         lldev->pdev      = pci_info;
829         pci_info->device = dev;
830
831         /* Set bus master */
832         pci_enable_busmaster(dev);
833
834         /* Get virtual address for BAR0 */
835         rid0 = PCIR_BAR(0);
836         pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
837             RF_ACTIVE);
838         if(pci_info->regmap0 == NULL) {
839             xge_resources_free(dev, xge_free_pci_info);
840             XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
841                 attach_out, ENOMEM);
842         }
843         attr.bar0 = (char *)pci_info->regmap0;
844
845         pci_info->bar0resource = (xge_bus_resource_t*)
846             xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
847         if(pci_info->bar0resource == NULL) {
848             xge_resources_free(dev, xge_free_bar0);
849             XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
850                 attach_out, ENOMEM);
851         }
852         ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
853             rman_get_bustag(pci_info->regmap0);
854         ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
855             rman_get_bushandle(pci_info->regmap0);
856         ((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
857             pci_info->regmap0;
858
859         /* Get virtual address for BAR1 */
860         rid1 = PCIR_BAR(2);
861         pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
862             RF_ACTIVE);
863         if(pci_info->regmap1 == NULL) {
864             xge_resources_free(dev, xge_free_bar0_resource);
865             XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
866                 attach_out, ENOMEM);
867         }
868         attr.bar1 = (char *)pci_info->regmap1;
869
870         pci_info->bar1resource = (xge_bus_resource_t*)
871             xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
872         if(pci_info->bar1resource == NULL) {
873             xge_resources_free(dev, xge_free_bar1);
874             XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
875                 attach_out, ENOMEM);
876         }
877         ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
878             rman_get_bustag(pci_info->regmap1);
879         ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
880             rman_get_bushandle(pci_info->regmap1);
881         ((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
882             pci_info->regmap1;
883
884         /* Save PCI config space */
885         xge_pci_space_save(dev);
886
887         attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
888         attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
889         attr.irqh  = lldev->irqhandle;
890         attr.cfgh  = pci_info;
891         attr.pdev  = pci_info;
892
893         /* Initialize device configuration parameters */
894         xge_init_params(device_config, dev);
895
896         rid = 0;
897         if(lldev->enabled_msi) {
898             /* Number of MSI messages supported by device */
899             msi_count = pci_msi_count(dev);
900             if(msi_count > 1) {
901                 /* Device supports MSI */
902                 if(bootverbose) {
903                     xge_trace(XGE_ERR, "MSI count: %d", msi_count);
904                     xge_trace(XGE_ERR, "Now, driver supporting 1 message");
905                 }
906                 msi_count = 1;
907                 error = pci_alloc_msi(dev, &msi_count);
908                 if(error == 0) {
909                     if(bootverbose)
910                         xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
911                     enable_msi = XGE_HAL_INTR_MODE_MSI;
912                     rid = 1;
913                 }
914                 else {
915                     if(bootverbose)
916                         xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
917                 }
918             }
919         }
920         lldev->enabled_msi = enable_msi;
921
922         /* Allocate resource for irq */
923         lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
924             (RF_SHAREABLE | RF_ACTIVE));
925         if(lldev->irq == NULL) {
926             xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
927                 ((rid == 0) ? "line interrupt" : "MSI"));
928             if(rid == 1) {
929                 error = pci_release_msi(dev);
930                 if(error != 0) {
931                     xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
932                         error);
933                     xge_trace(XGE_ERR, "Requires reboot to use MSI again");
934                 }
935                 xge_trace(XGE_ERR, "Trying line interrupts");
936                 rid = 0;
937                 lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
938                 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
939                     (RF_SHAREABLE | RF_ACTIVE));
940             }
941             if(lldev->irq == NULL) {
942                 xge_trace(XGE_ERR, "Allocating irq resource failed");
943                 xge_resources_free(dev, xge_free_bar1_resource);
944                 status = ENOMEM;
945                 goto attach_out;
946             }
947         }
948
949         device_config->intr_mode = lldev->enabled_msi;
950         if(bootverbose) {
951             xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
952                 lldev->enabled_msi, msi_count);
953         }
954
955         /* Initialize HAL device */
956         error = xge_hal_device_initialize(hldev, &attr, device_config);
957         if(error != XGE_HAL_OK) {
958             xge_resources_free(dev, xge_free_irq_resource);
959             XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
960                 ENXIO);
961         }
962
963         xge_hal_device_private_set(hldev, lldev);
964
965         error = xge_interface_setup(dev);
966         if(error != 0) {
967             status = error;
968             goto attach_out;
969         }
970
971         ifnetp         = lldev->ifnetp;
972         ifnetp->if_mtu = device_config->mtu;
973
974         xge_media_init(dev);
975
976         /* Associate interrupt handler with the device */
977         if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
978             error = bus_setup_intr(dev, lldev->irq,
979                 (INTR_TYPE_NET | INTR_MPSAFE),
980 #if __FreeBSD_version > 700030
981                 NULL,
982 #endif
983                 xge_isr_msi, lldev, &lldev->irqhandle);
984             xge_msi_info_save(lldev);
985         }
986         else {
987             error = bus_setup_intr(dev, lldev->irq,
988                 (INTR_TYPE_NET | INTR_MPSAFE),
989 #if __FreeBSD_version > 700030
990                 xge_isr_filter,
991 #endif
992                 xge_isr_line, lldev, &lldev->irqhandle);
993         }
994         if(error != 0) {
995             xge_resources_free(dev, xge_free_media_interface);
996             XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
997                 attach_out, ENXIO);
998         }
999
1000         xge_print_info(lldev);
1001
1002         xge_add_sysctl_handlers(lldev);
1003
1004         xge_buffer_mode_init(lldev, device_config->mtu);
1005
1006 attach_out:
1007         xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1008 attach_out_config:
1009         return status;
1010 }
1011
1012 /**
1013  * xge_resources_free
1014  * Undo what-all we did during load/attach
1015  *
1016  * @dev Device Handle
1017  * @error Identifies what-all to undo
1018  */
1019 void
1020 xge_resources_free(device_t dev, xge_lables_e error)
1021 {
1022         xge_lldev_t *lldev;
1023         xge_pci_info_t *pci_info;
1024         xge_hal_device_t *hldev;
1025         int rid, status;
1026
1027         /* LL Device */
1028         lldev = (xge_lldev_t *) device_get_softc(dev);
1029         pci_info = lldev->pdev;
1030
1031         /* HAL Device */
1032         hldev = lldev->devh;
1033
1034         switch(error) {
1035             case xge_free_all:
1036                 /* Teardown interrupt handler - device association */
1037                 bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1038
1039             case xge_free_media_interface:
1040                 /* Media */
1041                 ifmedia_removeall(&lldev->media);
1042
1043                 /* Detach Ether */
1044                 ether_ifdetach(lldev->ifnetp);
1045                 if_free(lldev->ifnetp);
1046
1047                 xge_hal_device_private_set(hldev, NULL);
1048                 xge_hal_device_disable(hldev);
1049
1050             case xge_free_terminate_hal_device:
1051                 /* HAL Device */
1052                 xge_hal_device_terminate(hldev);
1053
1054             case xge_free_irq_resource:
1055                 /* Release IRQ resource */
1056                 bus_release_resource(dev, SYS_RES_IRQ,
1057                     ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1058                     lldev->irq);
1059
1060                 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1061                     status = pci_release_msi(dev);
1062                     if(status != 0) {
1063                         if(bootverbose) {
1064                             xge_trace(XGE_ERR,
1065                                 "pci_release_msi returned %d", status);
1066                         }
1067                     }
1068                 }
1069
1070             case xge_free_bar1_resource:
1071                 /* Restore PCI configuration space */
1072                 xge_pci_space_restore(dev);
1073
1074                 /* Free bar1resource */
1075                 xge_os_free(NULL, pci_info->bar1resource,
1076                     sizeof(xge_bus_resource_t));
1077
1078             case xge_free_bar1:
1079                 /* Release BAR1 */
1080                 rid = PCIR_BAR(2);
1081                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1082                     pci_info->regmap1);
1083
1084             case xge_free_bar0_resource:
1085                 /* Free bar0resource */
1086                 xge_os_free(NULL, pci_info->bar0resource,
1087                     sizeof(xge_bus_resource_t));
1088
1089             case xge_free_bar0:
1090                 /* Release BAR0 */
1091                 rid = PCIR_BAR(0);
1092                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1093                     pci_info->regmap0);
1094
1095             case xge_free_pci_info:
1096                 /* Disable Bus Master */
1097                 pci_disable_busmaster(dev);
1098
1099                 /* Free pci_info_t */
1100                 lldev->pdev = NULL;
1101                 xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1102
1103             case xge_free_hal_device:
1104                 /* Free device configuration struct and HAL device */
1105                 xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1106
1107             case xge_free_terminate_hal_driver:
1108                 /* Terminate HAL driver */
1109                 hal_driver_init_count = hal_driver_init_count - 1;
1110                 if(!hal_driver_init_count) {
1111                     xge_hal_driver_terminate();
1112                 }
1113
1114             case xge_free_mutex:
1115                 xge_mutex_destroy(lldev);
1116         }
1117 }
1118
1119 /**
1120  * xge_detach
1121  * Detaches driver from the Kernel subsystem
1122  *
1123  * @dev Device Handle
1124  */
1125 int
1126 xge_detach(device_t dev)
1127 {
1128         xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1129
1130         if(lldev->in_detach == 0) {
1131             lldev->in_detach = 1;
1132             xge_stop(lldev);
1133             xge_resources_free(dev, xge_free_all);
1134         }
1135
1136         return 0;
1137 }
1138
1139 /**
1140  * xge_shutdown
1141  * To shutdown device before system shutdown
1142  *
1143  * @dev Device Handle
1144  */
1145 int
1146 xge_shutdown(device_t dev)
1147 {
1148         xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1149         xge_stop(lldev);
1150
1151         return 0;
1152 }
1153
1154 /**
1155  * xge_interface_setup
1156  * Setup interface
1157  *
1158  * @dev Device Handle
1159  *
1160  * Returns 0 on success, ENXIO/ENOMEM on failure
1161  */
1162 int
1163 xge_interface_setup(device_t dev)
1164 {
1165         u8 mcaddr[ETHER_ADDR_LEN];
1166         xge_hal_status_e status;
1167         xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1168         struct ifnet *ifnetp;
1169         xge_hal_device_t *hldev = lldev->devh;
1170
1171         /* Get the MAC address of the device */
1172         status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1173         if(status != XGE_HAL_OK) {
1174             xge_resources_free(dev, xge_free_terminate_hal_device);
1175             XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1176         }
1177
1178         /* Get interface ifnet structure for this Ether device */
1179         ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1180         if(ifnetp == NULL) {
1181             xge_resources_free(dev, xge_free_terminate_hal_device);
1182             XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1183         }
1184
1185         /* Initialize interface ifnet structure */
1186         if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1187         ifnetp->if_mtu      = XGE_HAL_DEFAULT_MTU;
1188         ifnetp->if_baudrate = XGE_BAUDRATE;
1189         ifnetp->if_init     = xge_init;
1190         ifnetp->if_softc    = lldev;
1191         ifnetp->if_flags    = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1192         ifnetp->if_ioctl    = xge_ioctl;
1193         ifnetp->if_start    = xge_send;
1194
1195         /* TODO: Check and assign optimal value */
1196         ifnetp->if_snd.ifq_maxlen = ifqmaxlen;
1197
1198         ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1199             IFCAP_HWCSUM;
1200         if(lldev->enabled_tso)
1201             ifnetp->if_capabilities |= IFCAP_TSO4;
1202         if(lldev->enabled_lro)
1203             ifnetp->if_capabilities |= IFCAP_LRO;
1204
1205         ifnetp->if_capenable = ifnetp->if_capabilities;
1206
1207         /* Attach the interface */
1208         ether_ifattach(ifnetp, mcaddr);
1209
1210 ifsetup_out:
1211         return status;
1212 }
1213
1214 /**
1215  * xge_callback_link_up
1216  * Callback for Link-up indication from HAL
1217  *
1218  * @userdata Per-adapter data
1219  */
1220 void
1221 xge_callback_link_up(void *userdata)
1222 {
1223         xge_lldev_t  *lldev  = (xge_lldev_t *)userdata;
1224         struct ifnet *ifnetp = lldev->ifnetp;
1225
1226         ifnetp->if_flags  &= ~IFF_DRV_OACTIVE;
1227         if_link_state_change(ifnetp, LINK_STATE_UP);
1228 }
1229
1230 /**
1231  * xge_callback_link_down
1232  * Callback for Link-down indication from HAL
1233  *
1234  * @userdata Per-adapter data
1235  */
1236 void
1237 xge_callback_link_down(void *userdata)
1238 {
1239         xge_lldev_t  *lldev  = (xge_lldev_t *)userdata;
1240         struct ifnet *ifnetp = lldev->ifnetp;
1241
1242         ifnetp->if_flags  |= IFF_DRV_OACTIVE;
1243         if_link_state_change(ifnetp, LINK_STATE_DOWN);
1244 }
1245
1246 /**
1247  * xge_callback_crit_err
1248  * Callback for Critical error indication from HAL
1249  *
1250  * @userdata Per-adapter data
1251  * @type Event type (Enumerated hardware error)
1252  * @serr_data Hardware status
1253  */
1254 void
1255 xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1256 {
1257         xge_trace(XGE_ERR, "Critical Error");
1258         xge_reset(userdata);
1259 }
1260
1261 /**
1262  * xge_callback_event
1263  * Callback from HAL indicating that some event has been queued
1264  *
1265  * @item Queued event item
1266  */
1267 void
1268 xge_callback_event(xge_queue_item_t *item)
1269 {
1270         xge_lldev_t      *lldev  = NULL;
1271         xge_hal_device_t *hldev  = NULL;
1272         struct ifnet     *ifnetp = NULL;
1273
1274         hldev  = item->context;
1275         lldev  = xge_hal_device_private(hldev);
1276         ifnetp = lldev->ifnetp;
1277
1278         switch((int)item->event_type) {
1279             case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1280                 if(lldev->initialized) {
1281                     if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1282                         ifnetp->if_flags  &= ~IFF_DRV_OACTIVE;
1283                     }
1284                     else {
1285                         xge_queue_produce_context(
1286                             xge_hal_device_queue(lldev->devh),
1287                             XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1288                     }
1289                 }
1290                 break;
1291
1292             case XGE_LL_EVENT_DEVICE_RESETTING:
1293                 xge_reset(item->context);
1294                 break;
1295
1296             default:
1297                 break;
1298         }
1299 }
1300
1301 /**
1302  * xge_ifmedia_change
1303  * Media change driver callback
1304  *
1305  * @ifnetp Interface Handle
1306  *
1307  * Returns 0 if media is Ether else EINVAL
1308  */
1309 int
1310 xge_ifmedia_change(struct ifnet *ifnetp)
1311 {
1312         xge_lldev_t    *lldev    = ifnetp->if_softc;
1313         struct ifmedia *ifmediap = &lldev->media;
1314
1315         return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ?  EINVAL:0;
1316 }
1317
1318 /**
1319  * xge_ifmedia_status
1320  * Media status driver callback
1321  *
1322  * @ifnetp Interface Handle
1323  * @ifmr Interface Media Settings
1324  */
1325 void
1326 xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1327 {
1328         xge_hal_status_e status;
1329         u64              regvalue;
1330         xge_lldev_t      *lldev = ifnetp->if_softc;
1331         xge_hal_device_t *hldev = lldev->devh;
1332
1333         ifmr->ifm_status = IFM_AVALID;
1334         ifmr->ifm_active = IFM_ETHER;
1335
1336         status = xge_hal_mgmt_reg_read(hldev, 0,
1337             xge_offsetof(xge_hal_pci_bar0_t, adapter_status), &regvalue);
1338         if(status != XGE_HAL_OK) {
1339             xge_trace(XGE_TRACE, "Getting adapter status failed");
1340             goto _exit;
1341         }
1342
1343         if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1344             XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1345             ifmr->ifm_status |= IFM_ACTIVE;
1346             ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1347             if_link_state_change(ifnetp, LINK_STATE_UP);
1348         }
1349         else {
1350             if_link_state_change(ifnetp, LINK_STATE_DOWN);
1351         }
1352 _exit:
1353         return;
1354 }
1355
1356 /**
1357  * xge_ioctl_stats
1358  * IOCTL to get statistics
1359  *
1360  * @lldev Per-adapter data
1361  * @ifreqp Interface request
1362  */
1363 int
1364 xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1365 {
1366         xge_hal_status_e status = XGE_HAL_OK;
1367         char *data = (char *)ifreqp->ifr_data;
1368         void *info = NULL;
1369         int retValue = EINVAL;
1370
1371         switch(*data) {
1372             case XGE_QUERY_STATS:
1373                 mtx_lock(&lldev->mtx_drv);
1374                 status = xge_hal_stats_hw(lldev->devh,
1375                     (xge_hal_stats_hw_info_t **)&info);
1376                 mtx_unlock(&lldev->mtx_drv);
1377                 if(status == XGE_HAL_OK) {
1378                     if(copyout(info, ifreqp->ifr_data,
1379                         sizeof(xge_hal_stats_hw_info_t)) == 0)
1380                         retValue = 0;
1381                 }
1382                 else {
1383                     xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1384                         status);
1385                 }
1386                 break;
1387
1388             case XGE_QUERY_PCICONF:
1389                 info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1390                 if(info != NULL) {
1391                     mtx_lock(&lldev->mtx_drv);
1392                     status = xge_hal_mgmt_pci_config(lldev->devh, info,
1393                         sizeof(xge_hal_pci_config_t));
1394                     mtx_unlock(&lldev->mtx_drv);
1395                     if(status == XGE_HAL_OK) {
1396                         if(copyout(info, ifreqp->ifr_data,
1397                             sizeof(xge_hal_pci_config_t)) == 0)
1398                             retValue = 0;
1399                     }
1400                     else {
1401                         xge_trace(XGE_ERR,
1402                             "Getting PCI configuration failed (%d)", status);
1403                     }
1404                     xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1405                 }
1406                 break;
1407
1408             case XGE_QUERY_DEVSTATS:
1409                 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1410                 if(info != NULL) {
1411                     mtx_lock(&lldev->mtx_drv);
1412                     status =xge_hal_mgmt_device_stats(lldev->devh, info,
1413                         sizeof(xge_hal_stats_device_info_t));
1414                     mtx_unlock(&lldev->mtx_drv);
1415                     if(status == XGE_HAL_OK) {
1416                         if(copyout(info, ifreqp->ifr_data,
1417                             sizeof(xge_hal_stats_device_info_t)) == 0)
1418                             retValue = 0;
1419                     }
1420                     else {
1421                         xge_trace(XGE_ERR, "Getting device info failed (%d)",
1422                             status);
1423                     }
1424                     xge_os_free(NULL, info,
1425                         sizeof(xge_hal_stats_device_info_t));
1426                 }
1427                 break;
1428
1429             case XGE_QUERY_SWSTATS:
1430                 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1431                 if(info != NULL) {
1432                     mtx_lock(&lldev->mtx_drv);
1433                     status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1434                         sizeof(xge_hal_stats_sw_err_t));
1435                     mtx_unlock(&lldev->mtx_drv);
1436                     if(status == XGE_HAL_OK) {
1437                         if(copyout(info, ifreqp->ifr_data,
1438                             sizeof(xge_hal_stats_sw_err_t)) == 0)
1439                             retValue = 0;
1440                     }
1441                     else {
1442                         xge_trace(XGE_ERR,
1443                             "Getting tcode statistics failed (%d)", status);
1444                     }
1445                     xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1446                 }
1447                 break;
1448
1449             case XGE_QUERY_DRIVERSTATS:
1450                 if(copyout(&lldev->driver_stats, ifreqp->ifr_data,
1451                     sizeof(xge_driver_stats_t)) == 0) {
1452                     retValue = 0;
1453                 }
1454                 else {
1455                     xge_trace(XGE_ERR,
1456                         "Copyout of driver statistics failed (%d)", status);
1457                 }
1458                 break;
1459
1460             case XGE_READ_VERSION:
1461                 info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1462                 if(info != NULL) {
1463                     strcpy(info, XGE_DRIVER_VERSION);
1464                     if(copyout(info, ifreqp->ifr_data, XGE_BUFFER_SIZE) == 0)
1465                         retValue = 0;
1466                     xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1467                 }
1468                 break;
1469
1470             case XGE_QUERY_DEVCONF:
1471                 info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1472                 if(info != NULL) {
1473                     mtx_lock(&lldev->mtx_drv);
1474                     status = xge_hal_mgmt_device_config(lldev->devh, info,
1475                         sizeof(xge_hal_device_config_t));
1476                     mtx_unlock(&lldev->mtx_drv);
1477                     if(status == XGE_HAL_OK) {
1478                         if(copyout(info, ifreqp->ifr_data,
1479                             sizeof(xge_hal_device_config_t)) == 0)
1480                             retValue = 0;
1481                     }
1482                     else {
1483                         xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1484                             status);
1485                     }
1486                     xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1487                 }
1488                 break;
1489
1490             case XGE_QUERY_BUFFER_MODE:
1491                 if(copyout(&lldev->buffer_mode, ifreqp->ifr_data,
1492                     sizeof(int)) == 0)
1493                     retValue = 0;
1494                 break;
1495
1496             case XGE_SET_BUFFER_MODE_1:
1497             case XGE_SET_BUFFER_MODE_2:
1498             case XGE_SET_BUFFER_MODE_5:
1499                 *data = (*data == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1500                 if(copyout(data, ifreqp->ifr_data, sizeof(data)) == 0)
1501                     retValue = 0;
1502                 break;
1503             default:
1504                 xge_trace(XGE_TRACE, "Nothing is matching");
1505                 retValue = ENOTTY;
1506                 break;
1507         }
1508         return retValue;
1509 }
1510
1511 /**
1512  * xge_ioctl_registers
1513  * IOCTL to get registers
1514  *
1515  * @lldev Per-adapter data
1516  * @ifreqp Interface request
1517  */
1518 int
1519 xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1520 {
1521         xge_register_t *data = (xge_register_t *)ifreqp->ifr_data;
1522         xge_hal_status_e status = XGE_HAL_OK;
1523         int retValue = EINVAL, offset = 0, index = 0;
1524         u64 val64 = 0;
1525
1526         /* Reading a register */
1527         if(strcmp(data->option, "-r") == 0) {
1528             data->value = 0x0000;
1529             mtx_lock(&lldev->mtx_drv);
1530             status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1531                 &data->value);
1532             mtx_unlock(&lldev->mtx_drv);
1533             if(status == XGE_HAL_OK) {
1534                 if(copyout(data, ifreqp->ifr_data, sizeof(xge_register_t)) == 0)
1535                     retValue = 0;
1536             }
1537         }
1538         /* Writing to a register */
1539         else if(strcmp(data->option, "-w") == 0) {
1540             mtx_lock(&lldev->mtx_drv);
1541             status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1542                 data->value);
1543             if(status == XGE_HAL_OK) {
1544                 val64 = 0x0000;
1545                 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1546                     &val64);
1547                 if(status != XGE_HAL_OK) {
1548                     xge_trace(XGE_ERR, "Reading back updated register failed");
1549                 }
1550                 else {
1551                     if(val64 != data->value) {
1552                         xge_trace(XGE_ERR,
1553                             "Read and written register values mismatched");
1554                     }
1555                     else retValue = 0;
1556                 }
1557             }
1558             else {
1559                 xge_trace(XGE_ERR, "Getting register value failed");
1560             }
1561             mtx_unlock(&lldev->mtx_drv);
1562         }
1563         else {
1564             mtx_lock(&lldev->mtx_drv);
1565             for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1566                 index++, offset += 0x0008) {
1567                 val64 = 0;
1568                 status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1569                 if(status != XGE_HAL_OK) {
1570                     xge_trace(XGE_ERR, "Getting register value failed");
1571                     break;
1572                 }
1573                 *((u64 *)((u64 *)data + index)) = val64;
1574                 retValue = 0;
1575             }
1576             mtx_unlock(&lldev->mtx_drv);
1577
1578             if(retValue == 0) {
1579                 if(copyout(data, ifreqp->ifr_data,
1580                     sizeof(xge_hal_pci_bar0_t)) != 0) {
1581                     xge_trace(XGE_ERR, "Copyout of register values failed");
1582                     retValue = EINVAL;
1583                 }
1584             }
1585             else {
1586                 xge_trace(XGE_ERR, "Getting register values failed");
1587             }
1588         }
1589         return retValue;
1590 }
1591
1592 /**
1593  * xge_ioctl
1594  * Callback to control the device - Interface configuration
1595  *
1596  * @ifnetp Interface Handle
1597  * @command Device control command
1598  * @data Parameters associated with command (if any)
1599  */
1600 int
1601 xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1602 {
1603         struct ifreq   *ifreqp   = (struct ifreq *)data;
1604         xge_lldev_t    *lldev    = ifnetp->if_softc;
1605         struct ifmedia *ifmediap = &lldev->media;
1606         int             retValue = 0, mask = 0;
1607
1608         if(lldev->in_detach) {
1609             return retValue;
1610         }
1611
1612         switch(command) {
1613             /* Set/Get ifnet address */
1614             case SIOCSIFADDR:
1615             case SIOCGIFADDR:
1616                 ether_ioctl(ifnetp, command, data);
1617                 break;
1618
1619             /* Set ifnet MTU */
1620             case SIOCSIFMTU:
1621                 retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1622                 break;
1623
1624             /* Set ifnet flags */
1625             case SIOCSIFFLAGS:
1626                 if(ifnetp->if_flags & IFF_UP) {
1627                     /* Link status is UP */
1628                     if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1629                         xge_init(lldev);
1630                     }
1631                     xge_disable_promisc(lldev);
1632                     xge_enable_promisc(lldev);
1633                 }
1634                 else {
1635                     /* Link status is DOWN */
1636                     /* If device is in running, make it down */
1637                     if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1638                         xge_stop(lldev);
1639                     }
1640                 }
1641                 break;
1642
1643             /* Add/delete multicast address */
1644             case SIOCADDMULTI:
1645             case SIOCDELMULTI:
1646                 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1647                     xge_setmulti(lldev);
1648                 }
1649                 break;
1650
1651             /* Set/Get net media */
1652             case SIOCSIFMEDIA:
1653             case SIOCGIFMEDIA:
1654                 retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1655                 break;
1656
1657             /* Set capabilities */
1658             case SIOCSIFCAP:
1659                 mtx_lock(&lldev->mtx_drv);
1660                 mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1661                 if(mask & IFCAP_TXCSUM) {
1662                     if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1663                         ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1664                         ifnetp->if_hwassist &=
1665                             ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1666                     }
1667                     else {
1668                         ifnetp->if_capenable |= IFCAP_TXCSUM;
1669                         ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1670                     }
1671                 }
1672                 if(mask & IFCAP_TSO4) {
1673                     if(ifnetp->if_capenable & IFCAP_TSO4) {
1674                         ifnetp->if_capenable &= ~IFCAP_TSO4;
1675                         ifnetp->if_hwassist  &= ~CSUM_TSO;
1676
1677                         xge_os_printf("%s: TSO Disabled",
1678                             device_get_nameunit(lldev->device));
1679                     }
1680                     else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1681                         ifnetp->if_capenable |= IFCAP_TSO4;
1682                         ifnetp->if_hwassist  |= CSUM_TSO;
1683
1684                         xge_os_printf("%s: TSO Enabled",
1685                             device_get_nameunit(lldev->device));
1686                     }
1687                 }
1688
1689                 mtx_unlock(&lldev->mtx_drv);
1690                 break;
1691
1692             /* Custom IOCTL 0 */
1693             case SIOCGPRIVATE_0:
1694                 retValue = xge_ioctl_stats(lldev, ifreqp);
1695                 break;
1696
1697             /* Custom IOCTL 1 */
1698             case SIOCGPRIVATE_1:
1699                 retValue = xge_ioctl_registers(lldev, ifreqp);
1700                 break;
1701
1702             default:
1703                 retValue = EINVAL;
1704                 break;
1705         }
1706         return retValue;
1707 }
1708
1709 /**
1710  * xge_init
1711  * Initialize the interface
1712  *
1713  * @plldev Per-adapter Data
1714  */
1715 void
1716 xge_init(void *plldev)
1717 {
1718         xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1719
1720         mtx_lock(&lldev->mtx_drv);
1721         xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1722         xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1723         mtx_unlock(&lldev->mtx_drv);
1724 }
1725
1726 /**
1727  * xge_device_init
1728  * Initialize the interface (called by holding lock)
1729  *
1730  * @pdevin Per-adapter Data
1731  */
1732 void
1733 xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1734 {
1735         struct ifnet     *ifnetp = lldev->ifnetp;
1736         xge_hal_device_t *hldev  = lldev->devh;
1737         struct ifaddr      *ifaddrp;
1738         unsigned char      *macaddr;
1739         struct sockaddr_dl *sockaddrp;
1740         int                 status   = XGE_HAL_OK;
1741
1742         mtx_assert((&lldev->mtx_drv), MA_OWNED);
1743
1744         /* If device is in running state, initializing is not required */
1745         if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1746             return;
1747
1748         /* Initializing timer */
1749         callout_init(&lldev->timer, 1);
1750
1751         xge_trace(XGE_TRACE, "Set MTU size");
1752         status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1753         if(status != XGE_HAL_OK) {
1754             xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1755             goto _exit;
1756         }
1757
1758         /* Enable HAL device */
1759         xge_hal_device_enable(hldev);
1760
1761         /* Get MAC address and update in HAL */
1762         ifaddrp             = ifnetp->if_addr;
1763         sockaddrp           = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1764         sockaddrp->sdl_type = IFT_ETHER;
1765         sockaddrp->sdl_alen = ifnetp->if_addrlen;
1766         macaddr             = LLADDR(sockaddrp);
1767         xge_trace(XGE_TRACE,
1768             "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1769             *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1770             *(macaddr + 4), *(macaddr + 5));
1771         status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1772         if(status != XGE_HAL_OK)
1773             xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1774
1775         /* Opening channels */
1776         mtx_unlock(&lldev->mtx_drv);
1777         status = xge_channel_open(lldev, option);
1778         mtx_lock(&lldev->mtx_drv);
1779         if(status != XGE_HAL_OK)
1780             goto _exit;
1781
1782         /* Set appropriate flags */
1783         ifnetp->if_drv_flags  |=  IFF_DRV_RUNNING;
1784         ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1785
1786         /* Checksum capability */
1787         ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1788             (CSUM_TCP | CSUM_UDP) : 0;
1789
1790         if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1791             ifnetp->if_hwassist |= CSUM_TSO;
1792
1793         /* Enable interrupts */
1794         xge_hal_device_intr_enable(hldev);
1795
1796         callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1797
1798         /* Disable promiscuous mode */
1799         xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1800         xge_enable_promisc(lldev);
1801
1802         /* Device is initialized */
1803         lldev->initialized = 1;
1804         xge_os_mdelay(1000);
1805
1806 _exit:
1807         return;
1808 }
1809
1810 /**
1811  * xge_timer
1812  * Timer timeout function to handle link status
1813  *
1814  * @devp Per-adapter Data
1815  */
1816 void
1817 xge_timer(void *devp)
1818 {
1819         xge_lldev_t      *lldev = (xge_lldev_t *)devp;
1820         xge_hal_device_t *hldev = lldev->devh;
1821
1822         /* Poll for changes */
1823         xge_hal_device_poll(hldev);
1824
1825         /* Reset timer */
1826         callout_reset(&lldev->timer, hz, xge_timer, lldev);
1827
1828         return;
1829 }
1830
1831 /**
1832  * xge_stop
1833  * De-activate the interface
1834  *
1835  * @lldev Per-adater Data
1836  */
1837 void
1838 xge_stop(xge_lldev_t *lldev)
1839 {
1840         mtx_lock(&lldev->mtx_drv);
1841         xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1842         mtx_unlock(&lldev->mtx_drv);
1843 }
1844
1845 /**
1846  * xge_isr_filter
1847  * ISR filter function - to filter interrupts from other devices (shared)
1848  *
1849  * @handle Per-adapter Data
1850  *
1851  * Returns
1852  * FILTER_STRAY if interrupt is from other device
1853  * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1854  */
1855 int
1856 xge_isr_filter(void *handle)
1857 {
1858         xge_lldev_t *lldev       = (xge_lldev_t *)handle;
1859         xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1860         u16 retValue = FILTER_STRAY;
1861         u64 val64    = 0;
1862
1863         XGE_DRV_STATS(isr_filter);
1864
1865         val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1866             &bar0->general_int_status);
1867         retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1868
1869         return retValue;
1870 }
1871
1872 /**
1873  * xge_isr_line
1874  * Interrupt service routine for Line interrupts
1875  *
1876  * @plldev Per-adapter Data
1877  */
1878 void
1879 xge_isr_line(void *plldev)
1880 {
1881         xge_hal_status_e status;
1882         xge_lldev_t      *lldev   = (xge_lldev_t *)plldev;
1883         xge_hal_device_t *hldev   = (xge_hal_device_t *)lldev->devh;
1884         struct ifnet     *ifnetp  = lldev->ifnetp;
1885
1886         XGE_DRV_STATS(isr_line);
1887
1888         if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1889             status = xge_hal_device_handle_irq(hldev);
1890             if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1891                 xge_send(ifnetp);
1892         }
1893 }
1894
1895 /*
1896  * xge_isr_msi
1897  * ISR for Message signaled interrupts
1898  */
1899 void
1900 xge_isr_msi(void *plldev)
1901 {
1902         xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1903         XGE_DRV_STATS(isr_msi);
1904         xge_hal_device_continue_irq(lldev->devh);
1905 }
1906
1907 /**
1908  * xge_rx_open
1909  * Initiate and open all Rx channels
1910  *
1911  * @qid Ring Index
1912  * @lldev Per-adapter Data
1913  * @rflag Channel open/close/reopen flag
1914  *
1915  * Returns 0 or Error Number
1916  */
1917 int
1918 xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1919 {
1920         u64 adapter_status = 0x0;
1921         xge_hal_status_e status = XGE_HAL_FAIL;
1922
1923         xge_hal_channel_attr_t attr = {
1924             .post_qid      = qid,
1925             .compl_qid     = 0,
1926             .callback      = xge_rx_compl,
1927             .per_dtr_space = sizeof(xge_rx_priv_t),
1928             .flags         = 0,
1929             .type          = XGE_HAL_CHANNEL_TYPE_RING,
1930             .userdata      = lldev,
1931             .dtr_init      = xge_rx_initial_replenish,
1932             .dtr_term      = xge_rx_term
1933         };
1934
1935         /* If device is not ready, return */
1936         status = xge_hal_device_status(lldev->devh, &adapter_status);
1937         if(status != XGE_HAL_OK) {
1938             xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1939             XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1940         }
1941         else {
1942             status = xge_hal_channel_open(lldev->devh, &attr,
1943                 &lldev->ring_channel[qid], rflag);
1944         }
1945
1946 _exit:
1947         return status;
1948 }
1949
1950 /**
1951  * xge_tx_open
1952  * Initialize and open all Tx channels
1953  *
1954  * @lldev Per-adapter Data
1955  * @tflag Channel open/close/reopen flag
1956  *
1957  * Returns 0 or Error Number
1958  */
1959 int
1960 xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1961 {
1962         xge_hal_status_e status = XGE_HAL_FAIL;
1963         u64 adapter_status = 0x0;
1964         int qindex, index;
1965
1966         xge_hal_channel_attr_t attr = {
1967             .compl_qid     = 0,
1968             .callback      = xge_tx_compl,
1969             .per_dtr_space = sizeof(xge_tx_priv_t),
1970             .flags         = 0,
1971             .type          = XGE_HAL_CHANNEL_TYPE_FIFO,
1972             .userdata      = lldev,
1973             .dtr_init      = xge_tx_initial_replenish,
1974             .dtr_term      = xge_tx_term
1975         };
1976
1977         /* If device is not ready, return */
1978         status = xge_hal_device_status(lldev->devh, &adapter_status);
1979         if(status != XGE_HAL_OK) {
1980             xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1981             XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1982         }
1983
1984         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1985             attr.post_qid = qindex,
1986             status = xge_hal_channel_open(lldev->devh, &attr,
1987                 &lldev->fifo_channel[qindex], tflag);
1988             if(status != XGE_HAL_OK) {
1989                 for(index = 0; index < qindex; index++)
1990                     xge_hal_channel_close(lldev->fifo_channel[index], tflag);
1991             }
1992         }
1993
1994 _exit:
1995         return status;
1996 }
1997
1998 /**
1999  * xge_enable_msi
2000  * Enables MSI
2001  *
2002  * @lldev Per-adapter Data
2003  */
2004 void
2005 xge_enable_msi(xge_lldev_t *lldev)
2006 {
2007         xge_list_t        *item    = NULL;
2008         xge_hal_device_t  *hldev   = lldev->devh;
2009         xge_hal_channel_t *channel = NULL;
2010         u16 offset = 0, val16 = 0;
2011
2012         xge_os_pci_read16(lldev->pdev, NULL,
2013             xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2014
2015         /* Update msi_data */
2016         offset = (val16 & 0x80) ? 0x4c : 0x48;
2017         xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2018         if(val16 & 0x1)
2019             val16 &= 0xfffe;
2020         else
2021             val16 |= 0x1;
2022         xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2023
2024         /* Update msi_control */
2025         xge_os_pci_read16(lldev->pdev, NULL,
2026             xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2027         val16 |= 0x10;
2028         xge_os_pci_write16(lldev->pdev, NULL,
2029             xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2030
2031         /* Set TxMAT and RxMAT registers with MSI */
2032         xge_list_for_each(item, &hldev->free_channels) {
2033             channel = xge_container_of(item, xge_hal_channel_t, item);
2034             xge_hal_channel_msi_set(channel, 1, (u32)val16);
2035         }
2036 }
2037
2038 /**
2039  * xge_channel_open
2040  * Open both Tx and Rx channels
2041  *
2042  * @lldev Per-adapter Data
2043  * @option Channel reopen option
2044  */
2045 int
2046 xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2047 {
2048         xge_lro_entry_t *lro_session = NULL;
2049         xge_hal_status_e status   = XGE_HAL_OK;
2050         int index = 0, index2 = 0;
2051
2052         if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2053             xge_msi_info_restore(lldev);
2054             xge_enable_msi(lldev);
2055         }
2056
2057 _exit2:
2058         status = xge_create_dma_tags(lldev->device);
2059         if(status != XGE_HAL_OK)
2060             XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2061
2062         /* Open ring (Rx) channel */
2063         for(index = 0; index < XGE_RING_COUNT; index++) {
2064             status = xge_rx_open(index, lldev, option);
2065             if(status != XGE_HAL_OK) {
2066                 /*
2067                  * DMA mapping fails in the unpatched Kernel which can't
2068                  * allocate contiguous memory for Jumbo frames.
2069                  * Try using 5 buffer mode.
2070                  */
2071                 if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2072                     (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2073                     MJUMPAGESIZE)) {
2074                     /* Close so far opened channels */
2075                     for(index2 = 0; index2 < index; index2++) {
2076                         xge_hal_channel_close(lldev->ring_channel[index2],
2077                             option);
2078                     }
2079
2080                     /* Destroy DMA tags intended to use for 1 buffer mode */
2081                     if(bus_dmamap_destroy(lldev->dma_tag_rx,
2082                         lldev->extra_dma_map)) {
2083                         xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2084                     }
2085                     if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2086                         xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2087                     if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2088                         xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2089
2090                     /* Switch to 5 buffer mode */
2091                     lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2092                     xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2093
2094                     /* Restart init */
2095                     goto _exit2;
2096                 }
2097                 else {
2098                     XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2099                         status);
2100                 }
2101             }
2102         }
2103
2104         if(lldev->enabled_lro) {
2105             SLIST_INIT(&lldev->lro_free);
2106             SLIST_INIT(&lldev->lro_active);
2107             lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2108
2109             for(index = 0; index < lldev->lro_num; index++) {
2110                 lro_session = (xge_lro_entry_t *)
2111                     xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2112                 if(lro_session == NULL) {
2113                     lldev->lro_num = index;
2114                     break;
2115                 }
2116                 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2117             }
2118         }
2119
2120         /* Open FIFO (Tx) channel */
2121         status = xge_tx_open(lldev, option);
2122         if(status != XGE_HAL_OK)
2123             XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2124
2125         goto _exit;
2126
2127 _exit1:
2128         /*
2129          * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2130          * Initialization of LRO failed (index is XGE_RING_COUNT)
2131          * Opening Tx channel failed    (index is XGE_RING_COUNT)
2132          */
2133         for(index2 = 0; index2 < index; index2++)
2134             xge_hal_channel_close(lldev->ring_channel[index2], option);
2135
2136 _exit:
2137         return status;
2138 }
2139
2140 /**
2141  * xge_channel_close
2142  * Close both Tx and Rx channels
2143  *
2144  * @lldev Per-adapter Data
2145  * @option Channel reopen option
2146  *
2147  */
2148 void
2149 xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2150 {
2151         int qindex = 0;
2152
2153         DELAY(1000 * 1000);
2154
2155         /* Close FIFO (Tx) channel */
2156         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2157             xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2158
2159         /* Close Ring (Rx) channels */
2160         for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2161             xge_hal_channel_close(lldev->ring_channel[qindex], option);
2162
2163         if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2164             xge_trace(XGE_ERR, "Rx extra map destroy failed");
2165         if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2166             xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2167         if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2168             xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2169 }
2170
2171 /**
2172  * dmamap_cb
2173  * DMA map callback
2174  *
2175  * @arg Parameter passed from dmamap
2176  * @segs Segments
2177  * @nseg Number of segments
2178  * @error Error
2179  */
2180 void
2181 dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2182 {
2183         if(!error) {
2184             *(bus_addr_t *) arg = segs->ds_addr;
2185         }
2186 }
2187
2188 /**
2189  * xge_reset
2190  * Device Reset
2191  *
2192  * @lldev Per-adapter Data
2193  */
2194 void
2195 xge_reset(xge_lldev_t *lldev)
2196 {
2197         xge_trace(XGE_TRACE, "Reseting the chip");
2198
2199         /* If the device is not initialized, return */
2200         if(lldev->initialized) {
2201             mtx_lock(&lldev->mtx_drv);
2202             xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2203             xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2204             mtx_unlock(&lldev->mtx_drv);
2205         }
2206
2207         return;
2208 }
2209
2210 /**
2211  * xge_setmulti
2212  * Set an address as a multicast address
2213  *
2214  * @lldev Per-adapter Data
2215  */
2216 void
2217 xge_setmulti(xge_lldev_t *lldev)
2218 {
2219         struct ifmultiaddr *ifma;
2220         u8                 *lladdr;
2221         xge_hal_device_t   *hldev        = (xge_hal_device_t *)lldev->devh;
2222         struct ifnet       *ifnetp       = lldev->ifnetp;
2223         int                index         = 0;
2224         int                offset        = 1;
2225         int                table_size    = 47;
2226         xge_hal_status_e   status        = XGE_HAL_OK;
2227         u8                 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2228
2229         if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2230             status = xge_hal_device_mcast_enable(hldev);
2231             lldev->all_multicast = 1;
2232         }
2233         else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2234             status = xge_hal_device_mcast_disable(hldev);
2235             lldev->all_multicast = 0;
2236         }
2237
2238         if(status != XGE_HAL_OK) {
2239             xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2240             goto _exit;
2241         }
2242
2243         /* Updating address list */
2244         if_maddr_rlock(ifnetp);
2245         index = 0;
2246         TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2247             if(ifma->ifma_addr->sa_family != AF_LINK) {
2248                 continue;
2249             }
2250             lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2251             index += 1;
2252         }
2253         if_maddr_runlock(ifnetp);
2254
2255         if((!lldev->all_multicast) && (index)) {
2256             lldev->macaddr_count = (index + 1);
2257             if(lldev->macaddr_count > table_size) {
2258                 goto _exit;
2259             }
2260
2261             /* Clear old addresses */
2262             for(index = 0; index < 48; index++) {
2263                 xge_hal_device_macaddr_set(hldev, (offset + index),
2264                     initial_addr);
2265             }
2266         }
2267
2268         /* Add new addresses */
2269         if_maddr_rlock(ifnetp);
2270         index = 0;
2271         TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2272             if(ifma->ifma_addr->sa_family != AF_LINK) {
2273                 continue;
2274             }
2275             lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2276             xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2277             index += 1;
2278         }
2279         if_maddr_runlock(ifnetp);
2280
2281 _exit:
2282         return;
2283 }
2284
2285 /**
2286  * xge_enable_promisc
2287  * Enable Promiscuous Mode
2288  *
2289  * @lldev Per-adapter Data
2290  */
2291 void
2292 xge_enable_promisc(xge_lldev_t *lldev)
2293 {
2294         struct ifnet *ifnetp = lldev->ifnetp;
2295         xge_hal_device_t *hldev = lldev->devh;
2296         xge_hal_pci_bar0_t *bar0 = NULL;
2297         u64 val64 = 0;
2298
2299         bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2300
2301         if(ifnetp->if_flags & IFF_PROMISC) {
2302             xge_hal_device_promisc_enable(lldev->devh);
2303
2304             /*
2305              * When operating in promiscuous mode, don't strip the VLAN tag
2306              */
2307             val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2308                 &bar0->rx_pa_cfg);
2309             val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2310             val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2311             xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2312                 &bar0->rx_pa_cfg);
2313
2314             xge_trace(XGE_TRACE, "Promiscuous mode ON");
2315         }
2316 }
2317
2318 /**
2319  * xge_disable_promisc
2320  * Disable Promiscuous Mode
2321  *
2322  * @lldev Per-adapter Data
2323  */
2324 void
2325 xge_disable_promisc(xge_lldev_t *lldev)
2326 {
2327         xge_hal_device_t *hldev = lldev->devh;
2328         xge_hal_pci_bar0_t *bar0 = NULL;
2329         u64 val64 = 0;
2330
2331         bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2332
2333         xge_hal_device_promisc_disable(lldev->devh);
2334
2335         /*
2336          * Strip VLAN tag when operating in non-promiscuous mode
2337          */
2338         val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2339             &bar0->rx_pa_cfg);
2340         val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2341         val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2342         xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2343             &bar0->rx_pa_cfg);
2344
2345         xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2346 }
2347
2348 /**
2349  * xge_change_mtu
2350  * Change interface MTU to a requested valid size
2351  *
2352  * @lldev Per-adapter Data
2353  * @NewMtu Requested MTU
2354  *
2355  * Returns 0 or Error Number
2356  */
2357 int
2358 xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2359 {
2360         int status = XGE_HAL_OK;
2361
2362         /* Check requested MTU size for boundary */
2363         if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2364             XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2365         }
2366
2367         lldev->mtu = new_mtu;
2368         xge_confirm_changes(lldev, XGE_SET_MTU);
2369
2370 _exit:
2371         return status;
2372 }
2373
2374 /**
2375  * xge_device_stop
2376  *
2377  * Common code for both stop and part of reset. Disables device, interrupts and
2378  * closes channels
2379  *
2380  * @dev Device Handle
2381  * @option Channel normal/reset option
2382  */
2383 void
2384 xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2385 {
2386         xge_hal_device_t *hldev  = lldev->devh;
2387         struct ifnet     *ifnetp = lldev->ifnetp;
2388         u64               val64  = 0;
2389
2390         mtx_assert((&lldev->mtx_drv), MA_OWNED);
2391
2392         /* If device is not in "Running" state, return */
2393         if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2394             goto _exit;
2395
2396         /* Set appropriate flags */
2397         ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2398
2399         /* Stop timer */
2400         callout_stop(&lldev->timer);
2401
2402         /* Disable interrupts */
2403         xge_hal_device_intr_disable(hldev);
2404
2405         mtx_unlock(&lldev->mtx_drv);
2406         xge_queue_flush(xge_hal_device_queue(lldev->devh));
2407         mtx_lock(&lldev->mtx_drv);
2408
2409         /* Disable HAL device */
2410         if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2411             xge_trace(XGE_ERR, "Disabling HAL device failed");
2412             xge_hal_device_status(hldev, &val64);
2413             xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2414         }
2415
2416         /* Close Tx and Rx channels */
2417         xge_channel_close(lldev, option);
2418
2419         /* Reset HAL device */
2420         xge_hal_device_reset(hldev);
2421
2422         xge_os_mdelay(1000);
2423         lldev->initialized = 0;
2424
2425         if_link_state_change(ifnetp, LINK_STATE_DOWN);
2426
2427 _exit:
2428         return;
2429 }
2430
2431 /**
2432  * xge_set_mbuf_cflags
2433  * set checksum flag for the mbuf
2434  *
2435  * @pkt Packet
2436  */
2437 void
2438 xge_set_mbuf_cflags(mbuf_t pkt)
2439 {
2440         pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2441         pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2442         pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2443         pkt->m_pkthdr.csum_data = htons(0xffff);
2444 }
2445
2446 /**
2447  * xge_lro_flush_sessions
2448  * Flush LRO session and send accumulated LRO packet to upper layer
2449  *
2450  * @lldev Per-adapter Data
2451  */
2452 void
2453 xge_lro_flush_sessions(xge_lldev_t *lldev)
2454 {
2455         xge_lro_entry_t *lro_session = NULL;
2456
2457         while(!SLIST_EMPTY(&lldev->lro_active)) {
2458             lro_session = SLIST_FIRST(&lldev->lro_active);
2459             SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2460             xge_lro_flush(lldev, lro_session);
2461         }
2462 }
2463
2464 /**
2465  * xge_lro_flush
2466  * Flush LRO session. Send accumulated LRO packet to upper layer
2467  *
2468  * @lldev Per-adapter Data
2469  * @lro LRO session to be flushed
2470  */
2471 static void
2472 xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2473 {
2474         struct ip *header_ip;
2475         struct tcphdr *header_tcp;
2476         u32 *ptr;
2477
2478         if(lro_session->append_cnt) {
2479             header_ip = lro_session->lro_header_ip;
2480             header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2481             lro_session->m_head->m_pkthdr.len = lro_session->len;
2482             header_tcp = (struct tcphdr *)(header_ip + 1);
2483             header_tcp->th_ack = lro_session->ack_seq;
2484             header_tcp->th_win = lro_session->window;
2485             if(lro_session->timestamp) {
2486                 ptr = (u32 *)(header_tcp + 1);
2487                 ptr[1] = htonl(lro_session->tsval);
2488                 ptr[2] = lro_session->tsecr;
2489             }
2490         }
2491
2492         (*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2493         lro_session->m_head = NULL;
2494         lro_session->timestamp = 0;
2495         lro_session->append_cnt = 0;
2496         SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2497 }
2498
2499 /**
2500  * xge_lro_accumulate
2501  * Accumulate packets to form a large LRO packet based on various conditions
2502  *
2503  * @lldev Per-adapter Data
2504  * @m_head Current Packet
2505  *
2506  * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2507  */
2508 static int
2509 xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2510 {
2511         struct ether_header *header_ethernet;
2512         struct ip *header_ip;
2513         struct tcphdr *header_tcp;
2514         u32 seq, *ptr;
2515         struct mbuf *buffer_next, *buffer_tail;
2516         xge_lro_entry_t *lro_session;
2517         xge_hal_status_e status = XGE_HAL_FAIL;
2518         int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2519         int trim;
2520
2521         /* Get Ethernet header */
2522         header_ethernet = mtod(m_head, struct ether_header *);
2523
2524         /* Return if it is not IP packet */
2525         if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2526             goto _exit;
2527
2528         /* Get IP header */
2529         header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2530             (struct ip *)(header_ethernet + 1) :
2531             mtod(m_head->m_next, struct ip *);
2532
2533         /* Return if it is not TCP packet */
2534         if(header_ip->ip_p != IPPROTO_TCP)
2535             goto _exit;
2536
2537         /* Return if packet has options */
2538         if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2539             goto _exit;
2540
2541         /* Return if packet is fragmented */
2542         if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2543             goto _exit;
2544
2545         /* Get TCP header */
2546         header_tcp = (struct tcphdr *)(header_ip + 1);
2547
2548         /* Return if not ACK or PUSH */
2549         if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2550             goto _exit;
2551
2552         /* Only timestamp option is handled */
2553         tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2554         tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2555         ptr = (u32 *)(header_tcp + 1);
2556         if(tcp_options != 0) {
2557             if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2558                 (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2559                 TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2560                 goto _exit;
2561             }
2562         }
2563
2564         /* Total length of packet (IP) */
2565         ip_len = ntohs(header_ip->ip_len);
2566
2567         /* TCP data size */
2568         tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2569
2570         /* If the frame is padded, trim it */
2571         tot_len = m_head->m_pkthdr.len;
2572         trim = tot_len - (ip_len + ETHER_HDR_LEN);
2573         if(trim != 0) {
2574             if(trim < 0)
2575                 goto _exit;
2576             m_adj(m_head, -trim);
2577             tot_len = m_head->m_pkthdr.len;
2578         }
2579
2580         buffer_next = m_head;
2581         buffer_tail = NULL;
2582         while(buffer_next != NULL) {
2583             buffer_tail = buffer_next;
2584             buffer_next = buffer_tail->m_next;
2585         }
2586
2587         /* Total size of only headers */
2588         hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2589
2590         /* Get sequence number */
2591         seq = ntohl(header_tcp->th_seq);
2592
2593         SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2594             if(lro_session->source_port == header_tcp->th_sport &&
2595                 lro_session->dest_port == header_tcp->th_dport &&
2596                 lro_session->source_ip == header_ip->ip_src.s_addr &&
2597                 lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2598
2599                 /* Unmatched sequence number, flush LRO session */
2600                 if(__predict_false(seq != lro_session->next_seq)) {
2601                     SLIST_REMOVE(&lldev->lro_active, lro_session,
2602                         xge_lro_entry_t, next);
2603                     xge_lro_flush(lldev, lro_session);
2604                     goto _exit;
2605                 }
2606
2607                 /* Handle timestamp option */
2608                 if(tcp_options) {
2609                     u32 tsval = ntohl(*(ptr + 1));
2610                     if(__predict_false(lro_session->tsval > tsval ||
2611                         *(ptr + 2) == 0)) {
2612                         goto _exit;
2613                     }
2614                     lro_session->tsval = tsval;
2615                     lro_session->tsecr = *(ptr + 2);
2616                 }
2617
2618                 lro_session->next_seq += tcp_data_len;
2619                 lro_session->ack_seq = header_tcp->th_ack;
2620                 lro_session->window = header_tcp->th_win;
2621
2622                 /* If TCP data/payload is of 0 size, free mbuf */
2623                 if(tcp_data_len == 0) {
2624                     m_freem(m_head);
2625                     status = XGE_HAL_OK;
2626                     goto _exit;
2627                 }
2628
2629                 lro_session->append_cnt++;
2630                 lro_session->len += tcp_data_len;
2631
2632                 /* Adjust mbuf so that m_data points to payload than headers */
2633                 m_adj(m_head, hlen);
2634
2635                 /* Append this packet to LRO accumulated packet */
2636                 lro_session->m_tail->m_next = m_head;
2637                 lro_session->m_tail = buffer_tail;
2638
2639                 /* Flush if LRO packet is exceeding maximum size */
2640                 if(lro_session->len >
2641                     (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2642                     SLIST_REMOVE(&lldev->lro_active, lro_session,
2643                         xge_lro_entry_t, next);
2644                     xge_lro_flush(lldev, lro_session);
2645                 }
2646                 status = XGE_HAL_OK;
2647                 goto _exit;
2648             }
2649         }
2650
2651         if(SLIST_EMPTY(&lldev->lro_free))
2652             goto _exit;
2653
2654         /* Start a new LRO session */
2655         lro_session = SLIST_FIRST(&lldev->lro_free);
2656         SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2657         SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2658         lro_session->source_port = header_tcp->th_sport;
2659         lro_session->dest_port = header_tcp->th_dport;
2660         lro_session->source_ip = header_ip->ip_src.s_addr;
2661         lro_session->dest_ip = header_ip->ip_dst.s_addr;
2662         lro_session->next_seq = seq + tcp_data_len;
2663         lro_session->mss = tcp_data_len;
2664         lro_session->ack_seq = header_tcp->th_ack;
2665         lro_session->window = header_tcp->th_win;
2666
2667         lro_session->lro_header_ip = header_ip;
2668
2669         /* Handle timestamp option */
2670         if(tcp_options) {
2671             lro_session->timestamp = 1;
2672             lro_session->tsval = ntohl(*(ptr + 1));
2673             lro_session->tsecr = *(ptr + 2);
2674         }
2675
2676         lro_session->len = tot_len;
2677         lro_session->m_head = m_head;
2678         lro_session->m_tail = buffer_tail;
2679         status = XGE_HAL_OK;
2680
2681 _exit:
2682         return status;
2683 }
2684
2685 /**
2686  * xge_accumulate_large_rx
2687  * Accumulate packets to form a large LRO packet based on various conditions
2688  *
2689  * @lldev Per-adapter Data
2690  * @pkt Current packet
2691  * @pkt_length Packet Length
2692  * @rxd_priv Rx Descriptor Private Data
2693  */
2694 void
2695 xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2696         xge_rx_priv_t *rxd_priv)
2697 {
2698         if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2699             bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2700                 BUS_DMASYNC_POSTREAD);
2701             (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2702         }
2703 }
2704
2705 /**
2706  * xge_rx_compl
2707  * If the interrupt is due to received frame (Rx completion), send it up
2708  *
2709  * @channelh Ring Channel Handle
2710  * @dtr Current Descriptor
2711  * @t_code Transfer Code indicating success or error
2712  * @userdata Per-adapter Data
2713  *
2714  * Returns XGE_HAL_OK or HAL error enums
2715  */
2716 xge_hal_status_e
2717 xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2718         void *userdata)
2719 {
2720         struct ifnet       *ifnetp;
2721         xge_rx_priv_t      *rxd_priv = NULL;
2722         mbuf_t              mbuf_up  = NULL;
2723         xge_hal_status_e    status   = XGE_HAL_OK;
2724         xge_hal_dtr_info_t  ext_info;
2725         int                 index;
2726         u16                 vlan_tag;
2727
2728         /*get the user data portion*/
2729         xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2730         if(!lldev) {
2731             XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2732         }
2733
2734         XGE_DRV_STATS(rx_completions);
2735
2736         /* get the interface pointer */
2737         ifnetp = lldev->ifnetp;
2738
2739         do {
2740             XGE_DRV_STATS(rx_desc_compl);
2741
2742             if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2743                 status = XGE_HAL_FAIL;
2744                 goto _exit;
2745             }
2746
2747             if(t_code) {
2748                 xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2749                 XGE_DRV_STATS(rx_tcode);
2750                 xge_hal_device_handle_tcode(channelh, dtr, t_code);
2751                 xge_hal_ring_dtr_post(channelh,dtr);
2752                 continue;
2753             }
2754
2755             /* Get the private data for this descriptor*/
2756             rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2757                 dtr);
2758             if(!rxd_priv) {
2759                 XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2760                     XGE_HAL_FAIL);
2761             }
2762
2763             /*
2764              * Prepare one buffer to send it to upper layer -- since the upper
2765              * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2766              * prepare a new buffer, do mapping, use it in the current
2767              * descriptor and post descriptor back to ring channel
2768              */
2769             mbuf_up = rxd_priv->bufferArray[0];
2770
2771             /* Gets details of mbuf i.e., packet length */
2772             xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2773
2774             status =
2775                 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2776                 xge_get_buf(dtr, rxd_priv, lldev, 0) :
2777                 xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2778
2779             if(status != XGE_HAL_OK) {
2780                 xge_trace(XGE_ERR, "No memory");
2781                 XGE_DRV_STATS(rx_no_buf);
2782
2783                 /*
2784                  * Unable to allocate buffer. Instead of discarding, post
2785                  * descriptor back to channel for future processing of same
2786                  * packet.
2787                  */
2788                 xge_hal_ring_dtr_post(channelh, dtr);
2789                 continue;
2790             }
2791
2792             /* Get the extended information */
2793             xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2794
2795             /*
2796              * As we have allocated a new mbuf for this descriptor, post this
2797              * descriptor with new mbuf back to ring channel
2798              */
2799             vlan_tag = ext_info.vlan;
2800             xge_hal_ring_dtr_post(channelh, dtr);
2801             if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2802                 (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2803                 (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2804                 (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2805
2806                 /* set Checksum Flag */
2807                 xge_set_mbuf_cflags(mbuf_up);
2808
2809                 if(lldev->enabled_lro) {
2810                     xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2811                         rxd_priv);
2812                 }
2813                 else {
2814                     /* Post-Read sync for buffers*/
2815                     for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2816                         bus_dmamap_sync(lldev->dma_tag_rx,
2817                             rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2818                     }
2819                     (*ifnetp->if_input)(ifnetp, mbuf_up);
2820                 }
2821             }
2822             else {
2823                 /*
2824                  * Packet with erroneous checksum , let the upper layer deal
2825                  * with it
2826                  */
2827
2828                 /* Post-Read sync for buffers*/
2829                 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2830                     bus_dmamap_sync(lldev->dma_tag_rx,
2831                          rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2832                 }
2833
2834                 if(vlan_tag) {
2835                     mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2836                     mbuf_up->m_flags |= M_VLANTAG;
2837                 }
2838
2839                 if(lldev->enabled_lro)
2840                     xge_lro_flush_sessions(lldev);
2841
2842                 (*ifnetp->if_input)(ifnetp, mbuf_up);
2843             }
2844         } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2845             == XGE_HAL_OK);
2846
2847         if(lldev->enabled_lro)
2848             xge_lro_flush_sessions(lldev);
2849
2850 _exit:
2851         return status;
2852 }
2853
2854 /**
2855  * xge_ring_dtr_get
2856  * Get descriptors
2857  *
2858  * @mbuf_up Packet to send up
2859  * @channelh Ring Channel Handle
2860  * @dtr Descriptor
2861  * @lldev Per-adapter Data
2862  * @rxd_priv Rx Descriptor Private Data
2863  *
2864  * Returns XGE_HAL_OK or HAL error enums
2865  */
2866 int
2867 xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2868         xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2869 {
2870         mbuf_t           m;
2871         int              pkt_length[5]={0,0}, pkt_len=0;
2872         dma_addr_t       dma_data[5];
2873         int              index;
2874
2875         m = mbuf_up;
2876         pkt_len = 0;
2877
2878         if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2879             xge_os_memzero(pkt_length, sizeof(pkt_length));
2880
2881             /*
2882              * Retrieve data of interest from the completed descriptor -- This
2883              * returns the packet length
2884              */
2885             if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2886                 xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2887             }
2888             else {
2889                 xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2890             }
2891
2892             for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2893                 m->m_len  = pkt_length[index];
2894
2895                 if(index < (lldev->rxd_mbuf_cnt-1)) {
2896                     m->m_next = rxd_priv->bufferArray[index + 1];
2897                     m = m->m_next;
2898                 }
2899                 else {
2900                     m->m_next = NULL;
2901                 }
2902                 pkt_len+=pkt_length[index];
2903             }
2904
2905             /*
2906              * Since 2 buffer mode is an exceptional case where data is in 3rd
2907              * buffer but not in 2nd buffer
2908              */
2909             if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2910                 m->m_len = pkt_length[2];
2911                 pkt_len+=pkt_length[2];
2912             }
2913
2914             /*
2915              * Update length of newly created buffer to be sent up with packet
2916              * length
2917              */
2918             mbuf_up->m_pkthdr.len = pkt_len;
2919         }
2920         else {
2921             /*
2922              * Retrieve data of interest from the completed descriptor -- This
2923              * returns the packet length
2924              */
2925             xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2926
2927             /*
2928              * Update length of newly created buffer to be sent up with packet
2929              * length
2930              */
2931             mbuf_up->m_len =  mbuf_up->m_pkthdr.len = pkt_length[0];
2932         }
2933
2934         return XGE_HAL_OK;
2935 }
2936
2937 /**
2938  * xge_flush_txds
2939  * Flush Tx descriptors
2940  *
2941  * @channelh Channel handle
2942  */
2943 static void inline
2944 xge_flush_txds(xge_hal_channel_h channelh)
2945 {
2946         xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2947         xge_hal_dtr_h tx_dtr;
2948         xge_tx_priv_t *tx_priv;
2949         u8 t_code;
2950
2951         while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2952             == XGE_HAL_OK) {
2953             XGE_DRV_STATS(tx_desc_compl);
2954             if(t_code) {
2955                 xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2956                 XGE_DRV_STATS(tx_tcode);
2957                 xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2958             }
2959
2960             tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2961             bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2962             m_freem(tx_priv->buffer);
2963             tx_priv->buffer = NULL;
2964             xge_hal_fifo_dtr_free(channelh, tx_dtr);
2965         }
2966 }
2967
2968 /**
2969  * xge_send
2970  * Transmit function
2971  *
2972  * @ifnetp Interface Handle
2973  */
2974 void
2975 xge_send(struct ifnet *ifnetp)
2976 {
2977         int qindex = 0;
2978         xge_lldev_t *lldev = ifnetp->if_softc;
2979
2980         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2981             if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2982                 XGE_DRV_STATS(tx_lock_fail);
2983                 break;
2984             }
2985             xge_send_locked(ifnetp, qindex);
2986             mtx_unlock(&lldev->mtx_tx[qindex]);
2987         }
2988 }
2989
2990 static void inline
2991 xge_send_locked(struct ifnet *ifnetp, int qindex)
2992 {
2993         xge_hal_dtr_h            dtr;
2994         static bus_dma_segment_t segs[XGE_MAX_SEGS];
2995         xge_hal_status_e         status;
2996         unsigned int             max_fragments;
2997         xge_lldev_t              *lldev          = ifnetp->if_softc;
2998         xge_hal_channel_h        channelh        = lldev->fifo_channel[qindex];
2999         mbuf_t                   m_head          = NULL;
3000         mbuf_t                   m_buf           = NULL;
3001         xge_tx_priv_t            *ll_tx_priv     = NULL;
3002         register unsigned int    count           = 0;
3003         unsigned int             nsegs           = 0;
3004         u16                      vlan_tag;
3005
3006         max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3007
3008         /* If device is not initialized, return */
3009         if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3010             return;
3011
3012         XGE_DRV_STATS(tx_calls);
3013
3014         /*
3015          * This loop will be executed for each packet in the kernel maintained
3016          * queue -- each packet can be with fragments as an mbuf chain
3017          */
3018         for(;;) {
3019             IF_DEQUEUE(&ifnetp->if_snd, m_head);
3020             if (m_head == NULL) {
3021                 ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3022                 return;
3023             }
3024
3025             for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3026                 if(m_buf->m_len) count += 1;
3027             }
3028
3029             if(count >= max_fragments) {
3030                 m_buf = m_defrag(m_head, M_NOWAIT);
3031                 if(m_buf != NULL) m_head = m_buf;
3032                 XGE_DRV_STATS(tx_defrag);
3033             }
3034
3035             /* Reserve descriptors */
3036             status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3037             if(status != XGE_HAL_OK) {
3038                 XGE_DRV_STATS(tx_no_txd);
3039                 xge_flush_txds(channelh);
3040                 break;
3041             }
3042
3043             vlan_tag =
3044                 (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3045             xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3046
3047             /* Update Tx private structure for this descriptor */
3048             ll_tx_priv         = xge_hal_fifo_dtr_private(dtr);
3049             ll_tx_priv->buffer = m_head;
3050
3051             /*
3052              * Do mapping -- Required DMA tag has been created in xge_init
3053              * function and DMA maps have already been created in the
3054              * xgell_tx_replenish function.
3055              * Returns number of segments through nsegs
3056              */
3057             if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3058                 ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3059                 xge_trace(XGE_TRACE, "DMA map load failed");
3060                 XGE_DRV_STATS(tx_map_fail);
3061                 break;
3062             }
3063
3064             if(lldev->driver_stats.tx_max_frags < nsegs)
3065                 lldev->driver_stats.tx_max_frags = nsegs;
3066
3067             /* Set descriptor buffer for header and each fragment/segment */
3068             count = 0;
3069             do {
3070                 xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3071                     (dma_addr_t)htole64(segs[count].ds_addr),
3072                     segs[count].ds_len);
3073                 count++;
3074             } while(count < nsegs);
3075
3076             /* Pre-write Sync of mapping */
3077             bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3078                 BUS_DMASYNC_PREWRITE);
3079
3080             if((lldev->enabled_tso) &&
3081                 (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3082                 XGE_DRV_STATS(tx_tso);
3083                 xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3084             }
3085
3086             /* Checksum */
3087             if(ifnetp->if_hwassist > 0) {
3088                 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3089                     | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3090             }
3091
3092             /* Post descriptor to FIFO channel */
3093             xge_hal_fifo_dtr_post(channelh, dtr);
3094             XGE_DRV_STATS(tx_posted);
3095
3096             /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3097              * listener so that we can use tools like tcpdump */
3098             ETHER_BPF_MTAP(ifnetp, m_head);
3099         }
3100
3101         /* Prepend the packet back to queue */
3102         IF_PREPEND(&ifnetp->if_snd, m_head);
3103         ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3104
3105         xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3106             XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3107         XGE_DRV_STATS(tx_again);
3108 }
3109
3110 /**
3111  * xge_get_buf
3112  * Allocates new mbufs to be placed into descriptors
3113  *
3114  * @dtrh Descriptor Handle
3115  * @rxd_priv Rx Descriptor Private Data
3116  * @lldev Per-adapter Data
3117  * @index Buffer Index (if multi-buffer mode)
3118  *
3119  * Returns XGE_HAL_OK or HAL error enums
3120  */
3121 int
3122 xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3123         xge_lldev_t *lldev, int index)
3124 {
3125         register mbuf_t mp            = NULL;
3126         struct          ifnet *ifnetp = lldev->ifnetp;
3127         int             status        = XGE_HAL_OK;
3128         int             buffer_size = 0, cluster_size = 0, count;
3129         bus_dmamap_t    map = rxd_priv->dmainfo[index].dma_map;
3130         bus_dma_segment_t segs[3];
3131
3132         buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3133             ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3134             lldev->rxd_mbuf_len[index];
3135
3136         if(buffer_size <= MCLBYTES) {
3137             cluster_size = MCLBYTES;
3138             mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3139         }
3140         else {
3141             cluster_size = MJUMPAGESIZE;
3142             if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3143                 (buffer_size > MJUMPAGESIZE)) {
3144                 cluster_size = MJUM9BYTES;
3145             }
3146             mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, cluster_size);
3147         }
3148         if(!mp) {
3149             xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3150             status = XGE_HAL_FAIL;
3151             goto getbuf_out;
3152         }
3153
3154         /* Update mbuf's length, packet length and receive interface */
3155         mp->m_len = mp->m_pkthdr.len = buffer_size;
3156         mp->m_pkthdr.rcvif = ifnetp;
3157
3158         /* Load DMA map */
3159         if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3160             mp, segs, &count, BUS_DMA_NOWAIT)) {
3161             XGE_DRV_STATS(rx_map_fail);
3162             m_freem(mp);
3163             XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3164         }
3165
3166         /* Update descriptor private data */
3167         rxd_priv->bufferArray[index]         = mp;
3168         rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3169         rxd_priv->dmainfo[index].dma_map     = lldev->extra_dma_map;
3170         lldev->extra_dma_map = map;
3171
3172         /* Pre-Read/Write sync */
3173         bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3174
3175         /* Unload DMA map of mbuf in current descriptor */
3176         bus_dmamap_unload(lldev->dma_tag_rx, map);
3177
3178         /* Set descriptor buffer */
3179         if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3180             xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3181                 cluster_size);
3182         }
3183
3184 getbuf_out:
3185         return status;
3186 }
3187
3188 /**
3189  * xge_get_buf_3b_5b
3190  * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3191  *
3192  * @dtrh Descriptor Handle
3193  * @rxd_priv Rx Descriptor Private Data
3194  * @lldev Per-adapter Data
3195  *
3196  * Returns XGE_HAL_OK or HAL error enums
3197  */
3198 int
3199 xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3200         xge_lldev_t *lldev)
3201 {
3202         bus_addr_t  dma_pointers[5];
3203         int         dma_sizes[5];
3204         int         status = XGE_HAL_OK, index;
3205         int         newindex = 0;
3206
3207         for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3208             status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3209             if(status != XGE_HAL_OK) {
3210                 for(newindex = 0; newindex < index; newindex++) {
3211                     m_freem(rxd_priv->bufferArray[newindex]);
3212                 }
3213                 XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3214             }
3215         }
3216
3217         for(index = 0; index < lldev->buffer_mode; index++) {
3218             if(lldev->rxd_mbuf_len[index] != 0) {
3219                 dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3220                 dma_sizes[index]    = lldev->rxd_mbuf_len[index];
3221             }
3222             else {
3223                 dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3224                 dma_sizes[index]    = 1;
3225             }
3226         }
3227
3228         /* Assigning second buffer to third pointer in 2 buffer mode */
3229         if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3230             dma_pointers[2] = dma_pointers[1];
3231             dma_sizes[2]    = dma_sizes[1];
3232             dma_sizes[1]    = 1;
3233         }
3234
3235         if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3236             xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3237         }
3238         else {
3239             xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3240         }
3241
3242 _exit:
3243         return status;
3244 }
3245
3246 /**
3247  * xge_tx_compl
3248  * If the interrupt is due to Tx completion, free the sent buffer
3249  *
3250  * @channelh Channel Handle
3251  * @dtr Descriptor
3252  * @t_code Transfer Code indicating success or error
3253  * @userdata Per-adapter Data
3254  *
3255  * Returns XGE_HAL_OK or HAL error enum
3256  */
3257 xge_hal_status_e
3258 xge_tx_compl(xge_hal_channel_h channelh,
3259         xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3260 {
3261         xge_tx_priv_t *ll_tx_priv = NULL;
3262         xge_lldev_t   *lldev  = (xge_lldev_t *)userdata;
3263         struct ifnet  *ifnetp = lldev->ifnetp;
3264         mbuf_t         m_buffer = NULL;
3265         int            qindex   = xge_hal_channel_id(channelh);
3266
3267         mtx_lock(&lldev->mtx_tx[qindex]);
3268
3269         XGE_DRV_STATS(tx_completions);
3270
3271         /*
3272          * For each completed descriptor: Get private structure, free buffer,
3273          * do unmapping, and free descriptor
3274          */
3275         do {
3276             XGE_DRV_STATS(tx_desc_compl);
3277
3278             if(t_code) {
3279                 XGE_DRV_STATS(tx_tcode);
3280                 xge_trace(XGE_TRACE, "t_code %d", t_code);
3281                 xge_hal_device_handle_tcode(channelh, dtr, t_code);
3282             }
3283
3284             ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3285             m_buffer   = ll_tx_priv->buffer;
3286             bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3287             m_freem(m_buffer);
3288             ll_tx_priv->buffer = NULL;
3289             xge_hal_fifo_dtr_free(channelh, dtr);
3290         } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3291             == XGE_HAL_OK);
3292         xge_send_locked(ifnetp, qindex);
3293         ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3294
3295         mtx_unlock(&lldev->mtx_tx[qindex]);
3296
3297         return XGE_HAL_OK;
3298 }
3299
3300 /**
3301  * xge_tx_initial_replenish
3302  * Initially allocate buffers and set them into descriptors for later use
3303  *
3304  * @channelh Tx Channel Handle
3305  * @dtrh Descriptor Handle
3306  * @index
3307  * @userdata Per-adapter Data
3308  * @reopen Channel open/reopen option
3309  *
3310  * Returns XGE_HAL_OK or HAL error enums
3311  */
3312 xge_hal_status_e
3313 xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3314         int index, void *userdata, xge_hal_channel_reopen_e reopen)
3315 {
3316         xge_tx_priv_t *txd_priv = NULL;
3317         int            status   = XGE_HAL_OK;
3318
3319         /* Get the user data portion from channel handle */
3320         xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3321         if(lldev == NULL) {
3322             XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3323                 XGE_HAL_FAIL);
3324         }
3325
3326         /* Get the private data */
3327         txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3328         if(txd_priv == NULL) {
3329             XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3330                 XGE_HAL_FAIL);
3331         }
3332
3333         /* Create DMA map for this descriptor */
3334         if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3335             &txd_priv->dma_map)) {
3336             XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3337                 txinit_out, XGE_HAL_FAIL);
3338         }
3339
3340 txinit_out:
3341         return status;
3342 }
3343
3344 /**
3345  * xge_rx_initial_replenish
3346  * Initially allocate buffers and set them into descriptors for later use
3347  *
3348  * @channelh Tx Channel Handle
3349  * @dtrh Descriptor Handle
3350  * @index Ring Index
3351  * @userdata Per-adapter Data
3352  * @reopen Channel open/reopen option
3353  *
3354  * Returns XGE_HAL_OK or HAL error enums
3355  */
3356 xge_hal_status_e
3357 xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3358         int index, void *userdata, xge_hal_channel_reopen_e reopen)
3359 {
3360         xge_rx_priv_t  *rxd_priv = NULL;
3361         int             status   = XGE_HAL_OK;
3362         int             index1 = 0, index2 = 0;
3363
3364         /* Get the user data portion from channel handle */
3365         xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3366         if(lldev == NULL) {
3367             XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3368                 XGE_HAL_FAIL);
3369         }
3370
3371         /* Get the private data */
3372         rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3373         if(rxd_priv == NULL) {
3374             XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3375                 XGE_HAL_FAIL);
3376         }
3377
3378         rxd_priv->bufferArray = xge_os_malloc(NULL,
3379                 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3380
3381         if(rxd_priv->bufferArray == NULL) {
3382             XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3383                 XGE_HAL_FAIL);
3384         }
3385
3386         if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3387             /* Create DMA map for these descriptors*/
3388             if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3389                 &rxd_priv->dmainfo[0].dma_map)) {
3390                 XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3391                     rxinit_err_out, XGE_HAL_FAIL);
3392             }
3393             /* Get a buffer, attach it to this descriptor */
3394             status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3395         }
3396         else {
3397             for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3398                 /* Create DMA map for this descriptor */
3399                 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3400                     &rxd_priv->dmainfo[index1].dma_map)) {
3401                     for(index2 = index1 - 1; index2 >= 0; index2--) {
3402                         bus_dmamap_destroy(lldev->dma_tag_rx,
3403                             rxd_priv->dmainfo[index2].dma_map);
3404                     }
3405                     XGE_EXIT_ON_ERR(
3406                         "Jumbo DMA map creation for Rx descriptor failed",
3407                         rxinit_err_out, XGE_HAL_FAIL);
3408                 }
3409             }
3410             status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3411         }
3412
3413         if(status != XGE_HAL_OK) {
3414             for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3415                 bus_dmamap_destroy(lldev->dma_tag_rx,
3416                     rxd_priv->dmainfo[index1].dma_map);
3417             }
3418             goto rxinit_err_out;
3419         }
3420         else {
3421             goto rxinit_out;
3422         }
3423
3424 rxinit_err_out:
3425         xge_os_free(NULL, rxd_priv->bufferArray,
3426             (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3427 rxinit_out:
3428         return status;
3429 }
3430
3431 /**
3432  * xge_rx_term
3433  * During unload terminate and free all descriptors
3434  *
3435  * @channelh Rx Channel Handle
3436  * @dtrh Rx Descriptor Handle
3437  * @state Descriptor State
3438  * @userdata Per-adapter Data
3439  * @reopen Channel open/reopen option
3440  */
3441 void
3442 xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3443         xge_hal_dtr_state_e state, void *userdata,
3444         xge_hal_channel_reopen_e reopen)
3445 {
3446         xge_rx_priv_t *rxd_priv = NULL;
3447         xge_lldev_t   *lldev    = NULL;
3448         int            index = 0;
3449
3450         /* Descriptor state is not "Posted" */
3451         if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3452
3453         /* Get the user data portion */
3454         lldev = xge_hal_channel_userdata(channelh);
3455
3456         /* Get the private data */
3457         rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3458
3459         for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3460             if(rxd_priv->dmainfo[index].dma_map != NULL) {
3461                 bus_dmamap_sync(lldev->dma_tag_rx,
3462                     rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3463                 bus_dmamap_unload(lldev->dma_tag_rx,
3464                     rxd_priv->dmainfo[index].dma_map);
3465                 if(rxd_priv->bufferArray[index] != NULL)
3466                     m_free(rxd_priv->bufferArray[index]);
3467                 bus_dmamap_destroy(lldev->dma_tag_rx,
3468                     rxd_priv->dmainfo[index].dma_map);
3469             }
3470         }
3471         xge_os_free(NULL, rxd_priv->bufferArray,
3472             (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3473
3474         /* Free the descriptor */
3475         xge_hal_ring_dtr_free(channelh, dtrh);
3476
3477 rxterm_out:
3478         return;
3479 }
3480
3481 /**
3482  * xge_tx_term
3483  * During unload terminate and free all descriptors
3484  *
3485  * @channelh Rx Channel Handle
3486  * @dtrh Rx Descriptor Handle
3487  * @state Descriptor State
3488  * @userdata Per-adapter Data
3489  * @reopen Channel open/reopen option
3490  */
3491 void
3492 xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3493         xge_hal_dtr_state_e state, void *userdata,
3494         xge_hal_channel_reopen_e reopen)
3495 {
3496         xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3497         xge_lldev_t   *lldev      = (xge_lldev_t *)userdata;
3498
3499         /* Destroy DMA map */
3500         bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3501 }
3502
3503 /**
3504  * xge_methods
3505  *
3506  * FreeBSD device interface entry points
3507  */
3508 static device_method_t xge_methods[] = {
3509         DEVMETHOD(device_probe,     xge_probe),
3510         DEVMETHOD(device_attach,    xge_attach),
3511         DEVMETHOD(device_detach,    xge_detach),
3512         DEVMETHOD(device_shutdown,  xge_shutdown),
3513
3514         DEVMETHOD_END
3515 };
3516
3517 static driver_t xge_driver = {
3518         "nxge",
3519         xge_methods,
3520         sizeof(xge_lldev_t),
3521 };
3522 static devclass_t xge_devclass;
3523 DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);
3524