]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nxge/if_nxge.c
Use an accessor function to access ifr_data.
[FreeBSD/FreeBSD.git] / sys / dev / nxge / if_nxge.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2007 Neterion, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30
31 #include <dev/nxge/if_nxge.h>
32 #include <dev/nxge/xge-osdep.h>
33 #include <net/if_arp.h>
34 #include <sys/types.h>
35 #include <net/if.h>
36 #include <net/if_var.h>
37 #include <net/if_vlan_var.h>
38
39 int       copyright_print       = 0;
40 int       hal_driver_init_count = 0;
41 size_t    size                  = sizeof(int);
42
43 static void inline xge_flush_txds(xge_hal_channel_h);
44
45 /**
46  * xge_probe
47  * Probes for Xframe devices
48  *
49  * @dev Device handle
50  *
51  * Returns
52  * BUS_PROBE_DEFAULT if device is supported
53  * ENXIO if device is not supported
54  */
55 int
56 xge_probe(device_t dev)
57 {
58         int  devid    = pci_get_device(dev);
59         int  vendorid = pci_get_vendor(dev);
60         int  retValue = ENXIO;
61
62         if(vendorid == XGE_PCI_VENDOR_ID) {
63             if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
64                 (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
65                 if(!copyright_print) {
66                     xge_os_printf(XGE_COPYRIGHT);
67                     copyright_print = 1;
68                 }
69                 device_set_desc_copy(dev,
70                     "Neterion Xframe 10 Gigabit Ethernet Adapter");
71                 retValue = BUS_PROBE_DEFAULT;
72             }
73         }
74
75         return retValue;
76 }
77
78 /**
79  * xge_init_params
80  * Sets HAL parameter values (from kenv).
81  *
82  * @dconfig Device Configuration
83  * @dev Device Handle
84  */
85 void
86 xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
87 {
88         int qindex, tindex, revision;
89         device_t checkdev;
90         xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
91
92         dconfig->mtu                   = XGE_DEFAULT_INITIAL_MTU;
93         dconfig->pci_freq_mherz        = XGE_DEFAULT_USER_HARDCODED;
94         dconfig->device_poll_millis    = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
95         dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
96         dconfig->mac.rmac_bcast_en     = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
97         dconfig->fifo.alignment_size   = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
98
99         XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
100             XGE_DEFAULT_ENABLED_TSO);
101         XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
102             XGE_DEFAULT_ENABLED_LRO);
103         XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
104             XGE_DEFAULT_ENABLED_MSI);
105
106         XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
107             XGE_DEFAULT_LATENCY_TIMER);
108         XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
109             XGE_DEFAULT_MAX_SPLITS_TRANS);
110         XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
111             XGE_DEFAULT_MMRB_COUNT);
112         XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
113             XGE_DEFAULT_SHARED_SPLITS);
114         XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
115             XGE_DEFAULT_ISR_POLLING_CNT);
116         XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
117             stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
118
119         XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
120             XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
121         XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
122             XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
123         XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
124             XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
125         XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
126             XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
127         XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
128             XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
129         XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
130             mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
131         XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
132             mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
133
134         XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
135             XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
136         XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
137             XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
138         XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
139             XGE_DEFAULT_FIFO_MAX_FRAGS);
140
141         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
142             XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
143                 XGE_DEFAULT_FIFO_QUEUE_INTR);
144             XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
145                 XGE_DEFAULT_FIFO_QUEUE_MAX);
146             XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
147                 qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
148
149             for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
150                 dconfig->fifo.queue[qindex].tti[tindex].enabled  = 1;
151                 dconfig->fifo.queue[qindex].configured = 1;
152
153                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
154                     urange_a, qindex, tindex,
155                     XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
156                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
157                     urange_b, qindex, tindex,
158                     XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
159                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
160                     urange_c, qindex, tindex,
161                     XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
162                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
163                     ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
164                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
165                     ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
166                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
167                     ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
168                 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
169                     ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
170                 XGE_GET_PARAM_FIFO_QUEUE_TTI(
171                     "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
172                     tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
173                 XGE_GET_PARAM_FIFO_QUEUE_TTI(
174                     "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
175                     tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
176                 XGE_GET_PARAM_FIFO_QUEUE_TTI(
177                     "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
178                     tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
179             }
180         }
181
182         XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
183             XGE_DEFAULT_RING_MEMBLOCK_SIZE);
184
185         XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
186             XGE_DEFAULT_RING_STRIP_VLAN_TAG);
187
188         XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
189             XGE_DEFAULT_BUFFER_MODE);
190         if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
191             (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
192             xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
193             lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
194         }
195
196         for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
197             dconfig->ring.queue[qindex].max_frm_len  = XGE_HAL_RING_USE_MTU;
198             dconfig->ring.queue[qindex].priority     = 0;
199             dconfig->ring.queue[qindex].configured   = 1;
200             dconfig->ring.queue[qindex].buffer_mode  =
201                 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
202                 XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
203
204             XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
205                 XGE_DEFAULT_RING_QUEUE_MAX);
206             XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
207                 qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
208             XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
209                 dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
210             XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
211                 indicate_max_pkts, qindex,
212                 XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
213             XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
214                 backoff_interval_us, qindex,
215                 XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
216
217             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
218                 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
219             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
220                 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
221             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
222                 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
223             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
224                 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
225             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
226                 timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
227             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
228                 timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
229             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
230                 urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
231             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
232                 urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
233             XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
234                 urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
235         }
236
237         if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
238             xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
239             xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
240                 (int)(PAGE_SIZE / 32))
241             xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
242             dconfig->fifo.max_frags = (PAGE_SIZE / 32);
243         }
244
245         checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
246         if(checkdev != NULL) {
247             /* Check Revision for 0x12 */
248             revision = pci_read_config(checkdev,
249                 xge_offsetof(xge_hal_pci_config_t, revision), 1);
250             if(revision <= 0x12) {
251                 /* Set mmrb_count to 1k and max splits = 2 */
252                 dconfig->mmrb_count       = 1;
253                 dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
254             }
255         }
256 }
257
258 /**
259  * xge_buffer_sizes_set
260  * Set buffer sizes based on Rx buffer mode
261  *
262  * @lldev Per-adapter Data
263  * @buffer_mode Rx Buffer Mode
264  */
265 void
266 xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
267 {
268         int index = 0;
269         int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
270         int buffer_size = mtu + frame_header;
271
272         xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
273
274         if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
275             lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
276
277         lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
278
279         if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
280             lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
281
282         if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
283             index = 2;
284             buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
285             while(buffer_size > MJUMPAGESIZE) {
286                 lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
287                 buffer_size -= MJUMPAGESIZE;
288             }
289             XGE_ALIGN_TO(buffer_size, 128);
290             lldev->rxd_mbuf_len[index] = buffer_size;
291             lldev->rxd_mbuf_cnt = index + 1;
292         }
293
294         for(index = 0; index < buffer_mode; index++)
295             xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
296                 lldev->rxd_mbuf_len[index]);
297 }
298
299 /**
300  * xge_buffer_mode_init
301  * Init Rx buffer mode
302  *
303  * @lldev Per-adapter Data
304  * @mtu Interface MTU
305  */
306 void
307 xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
308 {
309         int index = 0, buffer_size = 0;
310         xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
311
312         buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
313
314         if(lldev->enabled_lro)
315             (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
316         else
317             (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
318
319         lldev->rxd_mbuf_cnt = lldev->buffer_mode;
320         if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
321             XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
322             ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
323         }
324         else {
325             XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
326             ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
327         }
328         xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
329
330         xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
331             ((lldev->enabled_tso) ? "Enabled":"Disabled"));
332         xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
333             ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
334         xge_os_printf("%s: Rx %d Buffer Mode Enabled",
335             device_get_nameunit(lldev->device), lldev->buffer_mode);
336 }
337
338 /**
339  * xge_driver_initialize
340  * Initializes HAL driver (common for all devices)
341  *
342  * Returns
343  * XGE_HAL_OK if success
344  * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
345  */
346 int
347 xge_driver_initialize(void)
348 {
349         xge_hal_uld_cbs_t       uld_callbacks;
350         xge_hal_driver_config_t driver_config;
351         xge_hal_status_e        status = XGE_HAL_OK;
352
353         /* Initialize HAL driver */
354         if(!hal_driver_init_count) {
355             xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
356             xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
357
358             /*
359              * Initial and maximum size of the queue used to store the events
360              * like Link up/down (xge_hal_event_e)
361              */
362             driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
363             driver_config.queue_size_max     = XGE_HAL_MAX_QUEUE_SIZE_MAX;
364
365             uld_callbacks.link_up   = xge_callback_link_up;
366             uld_callbacks.link_down = xge_callback_link_down;
367             uld_callbacks.crit_err  = xge_callback_crit_err;
368             uld_callbacks.event     = xge_callback_event;
369
370             status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
371             if(status != XGE_HAL_OK) {
372                 XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
373                     xdi_out, status);
374             }
375         }
376         hal_driver_init_count = hal_driver_init_count + 1;
377
378         xge_hal_driver_debug_module_mask_set(0xffffffff);
379         xge_hal_driver_debug_level_set(XGE_TRACE);
380
381 xdi_out:
382         return status;
383 }
384
385 /**
386  * xge_media_init
387  * Initializes, adds and sets media
388  *
389  * @devc Device Handle
390  */
391 void
392 xge_media_init(device_t devc)
393 {
394         xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
395
396         /* Initialize Media */
397         ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
398             xge_ifmedia_status);
399
400         /* Add supported media */
401         ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
402         ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
403         ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO,    0, NULL);
404         ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR,  0, NULL);
405         ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR,  0, NULL);
406
407         /* Set media */
408         ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
409 }
410
411 /**
412  * xge_pci_space_save
413  * Save PCI configuration space
414  *
415  * @dev Device Handle
416  */
417 void
418 xge_pci_space_save(device_t dev)
419 {
420         struct pci_devinfo *dinfo = NULL;
421
422         dinfo = device_get_ivars(dev);
423         xge_trace(XGE_TRACE, "Saving PCI configuration space");
424         pci_cfg_save(dev, dinfo, 0);
425 }
426
427 /**
428  * xge_pci_space_restore
429  * Restore saved PCI configuration space
430  *
431  * @dev Device Handle
432  */
433 void
434 xge_pci_space_restore(device_t dev)
435 {
436         struct pci_devinfo *dinfo = NULL;
437
438         dinfo = device_get_ivars(dev);
439         xge_trace(XGE_TRACE, "Restoring PCI configuration space");
440         pci_cfg_restore(dev, dinfo);
441 }
442
443 /**
444  * xge_msi_info_save
445  * Save MSI info
446  *
447  * @lldev Per-adapter Data
448  */
449 void
450 xge_msi_info_save(xge_lldev_t * lldev)
451 {
452         xge_os_pci_read16(lldev->pdev, NULL,
453             xge_offsetof(xge_hal_pci_config_le_t, msi_control),
454             &lldev->msi_info.msi_control);
455         xge_os_pci_read32(lldev->pdev, NULL,
456             xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
457             &lldev->msi_info.msi_lower_address);
458         xge_os_pci_read32(lldev->pdev, NULL,
459             xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
460             &lldev->msi_info.msi_higher_address);
461         xge_os_pci_read16(lldev->pdev, NULL,
462             xge_offsetof(xge_hal_pci_config_le_t, msi_data),
463             &lldev->msi_info.msi_data);
464 }
465
466 /**
467  * xge_msi_info_restore
468  * Restore saved MSI info
469  *
470  * @dev Device Handle
471  */
472 void
473 xge_msi_info_restore(xge_lldev_t *lldev)
474 {
475         /*
476          * If interface is made down and up, traffic fails. It was observed that
477          * MSI information were getting reset on down. Restoring them.
478          */
479         xge_os_pci_write16(lldev->pdev, NULL,
480             xge_offsetof(xge_hal_pci_config_le_t, msi_control),
481             lldev->msi_info.msi_control);
482
483         xge_os_pci_write32(lldev->pdev, NULL,
484             xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
485             lldev->msi_info.msi_lower_address);
486
487         xge_os_pci_write32(lldev->pdev, NULL,
488             xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
489             lldev->msi_info.msi_higher_address);
490
491         xge_os_pci_write16(lldev->pdev, NULL,
492             xge_offsetof(xge_hal_pci_config_le_t, msi_data),
493             lldev->msi_info.msi_data);
494 }
495
496 /**
497  * xge_init_mutex
498  * Initializes mutexes used in driver
499  *
500  * @lldev  Per-adapter Data
501  */
502 void
503 xge_mutex_init(xge_lldev_t *lldev)
504 {
505         int qindex;
506
507         sprintf(lldev->mtx_name_drv, "%s_drv",
508             device_get_nameunit(lldev->device));
509         mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
510             MTX_DEF);
511
512         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
513             sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
514                 device_get_nameunit(lldev->device), qindex);
515             mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
516                 MTX_DEF);
517         }
518 }
519
520 /**
521  * xge_mutex_destroy
522  * Destroys mutexes used in driver
523  *
524  * @lldev Per-adapter Data
525  */
526 void
527 xge_mutex_destroy(xge_lldev_t *lldev)
528 {
529         int qindex;
530
531         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
532             mtx_destroy(&lldev->mtx_tx[qindex]);
533         mtx_destroy(&lldev->mtx_drv);
534 }
535
536 /**
537  * xge_print_info
538  * Print device and driver information
539  *
540  * @lldev Per-adapter Data
541  */
542 void
543 xge_print_info(xge_lldev_t *lldev)
544 {
545         device_t dev = lldev->device;
546         xge_hal_device_t *hldev = lldev->devh;
547         xge_hal_status_e status = XGE_HAL_OK;
548         u64 val64 = 0;
549         const char *xge_pci_bus_speeds[17] = {
550             "PCI 33MHz Bus",
551             "PCI 66MHz Bus",
552             "PCIX(M1) 66MHz Bus",
553             "PCIX(M1) 100MHz Bus",
554             "PCIX(M1) 133MHz Bus",
555             "PCIX(M2) 133MHz Bus",
556             "PCIX(M2) 200MHz Bus",
557             "PCIX(M2) 266MHz Bus",
558             "PCIX(M1) Reserved",
559             "PCIX(M1) 66MHz Bus (Not Supported)",
560             "PCIX(M1) 100MHz Bus (Not Supported)",
561             "PCIX(M1) 133MHz Bus (Not Supported)",
562             "PCIX(M2) Reserved",
563             "PCIX 533 Reserved",
564             "PCI Basic Mode",
565             "PCIX Basic Mode",
566             "PCI Invalid Mode"
567         };
568
569         xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
570             device_get_nameunit(dev),
571             ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
572             hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
573         xge_os_printf("%s: Serial Number %s",
574             device_get_nameunit(dev), hldev->vpd_data.serial_num);
575
576         if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
577             status = xge_hal_mgmt_reg_read(hldev, 0,
578                 xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
579             if(status != XGE_HAL_OK)
580                 xge_trace(XGE_ERR, "Error for getting bus speed");
581
582             xge_os_printf("%s: Adapter is on %s bit %s",
583                 device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
584                 (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
585         }
586
587         xge_os_printf("%s: Using %s Interrupts",
588             device_get_nameunit(dev),
589             (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
590 }
591
592 /**
593  * xge_create_dma_tags
594  * Creates DMA tags for both Tx and Rx
595  *
596  * @dev Device Handle
597  *
598  * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
599  */
600 xge_hal_status_e
601 xge_create_dma_tags(device_t dev)
602 {
603         xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
604         xge_hal_status_e status = XGE_HAL_FAIL;
605         int mtu = (lldev->ifnetp)->if_mtu, maxsize;
606
607         /* DMA tag for Tx */
608         status = bus_dma_tag_create(
609             bus_get_dma_tag(dev),                /* Parent                    */
610             PAGE_SIZE,                           /* Alignment                 */
611             0,                                   /* Bounds                    */
612             BUS_SPACE_MAXADDR,                   /* Low Address               */
613             BUS_SPACE_MAXADDR,                   /* High Address              */
614             NULL,                                /* Filter Function           */
615             NULL,                                /* Filter Function Arguments */
616             MCLBYTES * XGE_MAX_SEGS,             /* Maximum Size              */
617             XGE_MAX_SEGS,                        /* Number of Segments        */
618             MCLBYTES,                            /* Maximum Segment Size      */
619             BUS_DMA_ALLOCNOW,                    /* Flags                     */
620             NULL,                                /* Lock Function             */
621             NULL,                                /* Lock Function Arguments   */
622             (&lldev->dma_tag_tx));               /* DMA Tag                   */
623         if(status != 0)
624             goto _exit;
625
626         maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
627         if(maxsize <= MCLBYTES) {
628             maxsize = MCLBYTES;
629         }
630         else {
631             if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
632                 maxsize = MJUMPAGESIZE;
633             else
634                 maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
635         }
636         
637         /* DMA tag for Rx */
638         status = bus_dma_tag_create(
639             bus_get_dma_tag(dev),                /* Parent                    */
640             PAGE_SIZE,                           /* Alignment                 */
641             0,                                   /* Bounds                    */
642             BUS_SPACE_MAXADDR,                   /* Low Address               */
643             BUS_SPACE_MAXADDR,                   /* High Address              */
644             NULL,                                /* Filter Function           */
645             NULL,                                /* Filter Function Arguments */
646             maxsize,                             /* Maximum Size              */
647             1,                                   /* Number of Segments        */
648             maxsize,                             /* Maximum Segment Size      */
649             BUS_DMA_ALLOCNOW,                    /* Flags                     */
650             NULL,                                /* Lock Function             */
651             NULL,                                /* Lock Function Arguments   */
652             (&lldev->dma_tag_rx));               /* DMA Tag                   */
653         if(status != 0)
654             goto _exit1;
655
656         status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
657             &lldev->extra_dma_map);
658         if(status != 0)
659             goto _exit2;
660
661         status = XGE_HAL_OK;
662         goto _exit;
663
664 _exit2:
665         status = bus_dma_tag_destroy(lldev->dma_tag_rx);
666         if(status != 0)
667             xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
668 _exit1:
669         status = bus_dma_tag_destroy(lldev->dma_tag_tx);
670         if(status != 0)
671             xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
672         status = XGE_HAL_FAIL;
673 _exit:
674         return status;
675 }
676
677 /**
678  * xge_confirm_changes
679  * Disables and Enables interface to apply requested change
680  *
681  * @lldev Per-adapter Data
682  * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
683  *
684  * Returns 0 or Error Number
685  */
686 void
687 xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
688 {
689         if(lldev->initialized == 0) goto _exit1;
690
691         mtx_lock(&lldev->mtx_drv);
692         if_down(lldev->ifnetp);
693         xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
694
695         if(option == XGE_SET_MTU)
696             (lldev->ifnetp)->if_mtu = lldev->mtu;
697         else
698             xge_buffer_mode_init(lldev, lldev->mtu);
699
700         xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
701         if_up(lldev->ifnetp);
702         mtx_unlock(&lldev->mtx_drv);
703         goto _exit;
704
705 _exit1:
706         /* Request was to change MTU and device not initialized */
707         if(option == XGE_SET_MTU) {
708             (lldev->ifnetp)->if_mtu = lldev->mtu;
709             xge_buffer_mode_init(lldev, lldev->mtu);
710         }
711 _exit:
712         return;
713 }
714
715 /**
716  * xge_change_lro_status
717  * Enable/Disable LRO feature
718  *
719  * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
720  *
721  * Returns 0 or error number.
722  */
723 static int
724 xge_change_lro_status(SYSCTL_HANDLER_ARGS)
725 {
726         xge_lldev_t *lldev = (xge_lldev_t *)arg1;
727         int request = lldev->enabled_lro, status = XGE_HAL_OK;
728
729         status = sysctl_handle_int(oidp, &request, arg2, req);
730         if((status != XGE_HAL_OK) || (!req->newptr))
731             goto _exit;
732
733         if((request < 0) || (request > 1)) {
734             status = EINVAL;
735             goto _exit;
736         }
737
738         /* Return if current and requested states are same */
739         if(request == lldev->enabled_lro){
740             xge_trace(XGE_ERR, "LRO is already %s",
741                 ((request) ? "enabled" : "disabled"));
742             goto _exit;
743         }
744
745         lldev->enabled_lro = request;
746         xge_confirm_changes(lldev, XGE_CHANGE_LRO);
747         arg2 = lldev->enabled_lro;
748
749 _exit:
750         return status;
751 }
752
753 /**
754  * xge_add_sysctl_handlers
755  * Registers sysctl parameter value update handlers
756  *
757  * @lldev Per-adapter data
758  */
759 void
760 xge_add_sysctl_handlers(xge_lldev_t *lldev)
761 {
762         struct sysctl_ctx_list *context_list =
763             device_get_sysctl_ctx(lldev->device);
764         struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
765
766         SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
767             "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
768             xge_change_lro_status, "I", "Enable or disable LRO feature");
769 }
770
771 /**
772  * xge_attach
773  * Connects driver to the system if probe was success
774  *
775  * @dev Device Handle
776  */
777 int
778 xge_attach(device_t dev)
779 {
780         xge_hal_device_config_t *device_config;
781         xge_hal_device_attr_t   attr;
782         xge_lldev_t             *lldev;
783         xge_hal_device_t        *hldev;
784         xge_pci_info_t          *pci_info;
785         struct ifnet            *ifnetp;
786         int                     rid, rid0, rid1, error;
787         int                     msi_count = 0, status = XGE_HAL_OK;
788         int                     enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
789
790         device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
791         if(!device_config) {
792             XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
793                 attach_out_config, ENOMEM);
794         }
795
796         lldev = (xge_lldev_t *) device_get_softc(dev);
797         if(!lldev) {
798             XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
799         }
800         lldev->device = dev;
801
802         xge_mutex_init(lldev);
803
804         error = xge_driver_initialize();
805         if(error != XGE_HAL_OK) {
806             xge_resources_free(dev, xge_free_mutex);
807             XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
808         }
809
810         /* HAL device */
811         hldev =
812             (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
813         if(!hldev) {
814             xge_resources_free(dev, xge_free_terminate_hal_driver);
815             XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
816                 attach_out, ENOMEM);
817         }
818         lldev->devh = hldev;
819
820         /* Our private structure */
821         pci_info =
822             (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
823         if(!pci_info) {
824             xge_resources_free(dev, xge_free_hal_device);
825             XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
826                 attach_out, ENOMEM);
827         }
828         lldev->pdev      = pci_info;
829         pci_info->device = dev;
830
831         /* Set bus master */
832         pci_enable_busmaster(dev);
833
834         /* Get virtual address for BAR0 */
835         rid0 = PCIR_BAR(0);
836         pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
837             RF_ACTIVE);
838         if(pci_info->regmap0 == NULL) {
839             xge_resources_free(dev, xge_free_pci_info);
840             XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
841                 attach_out, ENOMEM);
842         }
843         attr.bar0 = (char *)pci_info->regmap0;
844
845         pci_info->bar0resource = (xge_bus_resource_t*)
846             xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
847         if(pci_info->bar0resource == NULL) {
848             xge_resources_free(dev, xge_free_bar0);
849             XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
850                 attach_out, ENOMEM);
851         }
852         ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
853             rman_get_bustag(pci_info->regmap0);
854         ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
855             rman_get_bushandle(pci_info->regmap0);
856         ((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
857             pci_info->regmap0;
858
859         /* Get virtual address for BAR1 */
860         rid1 = PCIR_BAR(2);
861         pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
862             RF_ACTIVE);
863         if(pci_info->regmap1 == NULL) {
864             xge_resources_free(dev, xge_free_bar0_resource);
865             XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
866                 attach_out, ENOMEM);
867         }
868         attr.bar1 = (char *)pci_info->regmap1;
869
870         pci_info->bar1resource = (xge_bus_resource_t*)
871             xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
872         if(pci_info->bar1resource == NULL) {
873             xge_resources_free(dev, xge_free_bar1);
874             XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
875                 attach_out, ENOMEM);
876         }
877         ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
878             rman_get_bustag(pci_info->regmap1);
879         ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
880             rman_get_bushandle(pci_info->regmap1);
881         ((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
882             pci_info->regmap1;
883
884         /* Save PCI config space */
885         xge_pci_space_save(dev);
886
887         attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
888         attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
889         attr.irqh  = lldev->irqhandle;
890         attr.cfgh  = pci_info;
891         attr.pdev  = pci_info;
892
893         /* Initialize device configuration parameters */
894         xge_init_params(device_config, dev);
895
896         rid = 0;
897         if(lldev->enabled_msi) {
898             /* Number of MSI messages supported by device */
899             msi_count = pci_msi_count(dev);
900             if(msi_count > 1) {
901                 /* Device supports MSI */
902                 if(bootverbose) {
903                     xge_trace(XGE_ERR, "MSI count: %d", msi_count);
904                     xge_trace(XGE_ERR, "Now, driver supporting 1 message");
905                 }
906                 msi_count = 1;
907                 error = pci_alloc_msi(dev, &msi_count);
908                 if(error == 0) {
909                     if(bootverbose)
910                         xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
911                     enable_msi = XGE_HAL_INTR_MODE_MSI;
912                     rid = 1;
913                 }
914                 else {
915                     if(bootverbose)
916                         xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
917                 }
918             }
919         }
920         lldev->enabled_msi = enable_msi;
921
922         /* Allocate resource for irq */
923         lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
924             (RF_SHAREABLE | RF_ACTIVE));
925         if(lldev->irq == NULL) {
926             xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
927                 ((rid == 0) ? "line interrupt" : "MSI"));
928             if(rid == 1) {
929                 error = pci_release_msi(dev);
930                 if(error != 0) {
931                     xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
932                         error);
933                     xge_trace(XGE_ERR, "Requires reboot to use MSI again");
934                 }
935                 xge_trace(XGE_ERR, "Trying line interrupts");
936                 rid = 0;
937                 lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
938                 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
939                     (RF_SHAREABLE | RF_ACTIVE));
940             }
941             if(lldev->irq == NULL) {
942                 xge_trace(XGE_ERR, "Allocating irq resource failed");
943                 xge_resources_free(dev, xge_free_bar1_resource);
944                 status = ENOMEM;
945                 goto attach_out;
946             }
947         }
948
949         device_config->intr_mode = lldev->enabled_msi;
950         if(bootverbose) {
951             xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
952                 lldev->enabled_msi, msi_count);
953         }
954
955         /* Initialize HAL device */
956         error = xge_hal_device_initialize(hldev, &attr, device_config);
957         if(error != XGE_HAL_OK) {
958             xge_resources_free(dev, xge_free_irq_resource);
959             XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
960                 ENXIO);
961         }
962
963         xge_hal_device_private_set(hldev, lldev);
964
965         error = xge_interface_setup(dev);
966         if(error != 0) {
967             status = error;
968             goto attach_out;
969         }
970
971         ifnetp         = lldev->ifnetp;
972         ifnetp->if_mtu = device_config->mtu;
973
974         xge_media_init(dev);
975
976         /* Associate interrupt handler with the device */
977         if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
978             error = bus_setup_intr(dev, lldev->irq,
979                 (INTR_TYPE_NET | INTR_MPSAFE),
980 #if __FreeBSD_version > 700030
981                 NULL,
982 #endif
983                 xge_isr_msi, lldev, &lldev->irqhandle);
984             xge_msi_info_save(lldev);
985         }
986         else {
987             error = bus_setup_intr(dev, lldev->irq,
988                 (INTR_TYPE_NET | INTR_MPSAFE),
989 #if __FreeBSD_version > 700030
990                 xge_isr_filter,
991 #endif
992                 xge_isr_line, lldev, &lldev->irqhandle);
993         }
994         if(error != 0) {
995             xge_resources_free(dev, xge_free_media_interface);
996             XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
997                 attach_out, ENXIO);
998         }
999
1000         xge_print_info(lldev);
1001
1002         xge_add_sysctl_handlers(lldev);
1003
1004         xge_buffer_mode_init(lldev, device_config->mtu);
1005
1006 attach_out:
1007         xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1008 attach_out_config:
1009         return status;
1010 }
1011
1012 /**
1013  * xge_resources_free
1014  * Undo what-all we did during load/attach
1015  *
1016  * @dev Device Handle
1017  * @error Identifies what-all to undo
1018  */
1019 void
1020 xge_resources_free(device_t dev, xge_lables_e error)
1021 {
1022         xge_lldev_t *lldev;
1023         xge_pci_info_t *pci_info;
1024         xge_hal_device_t *hldev;
1025         int rid, status;
1026
1027         /* LL Device */
1028         lldev = (xge_lldev_t *) device_get_softc(dev);
1029         pci_info = lldev->pdev;
1030
1031         /* HAL Device */
1032         hldev = lldev->devh;
1033
1034         switch(error) {
1035             case xge_free_all:
1036                 /* Teardown interrupt handler - device association */
1037                 bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1038
1039             case xge_free_media_interface:
1040                 /* Media */
1041                 ifmedia_removeall(&lldev->media);
1042
1043                 /* Detach Ether */
1044                 ether_ifdetach(lldev->ifnetp);
1045                 if_free(lldev->ifnetp);
1046
1047                 xge_hal_device_private_set(hldev, NULL);
1048                 xge_hal_device_disable(hldev);
1049
1050             case xge_free_terminate_hal_device:
1051                 /* HAL Device */
1052                 xge_hal_device_terminate(hldev);
1053
1054             case xge_free_irq_resource:
1055                 /* Release IRQ resource */
1056                 bus_release_resource(dev, SYS_RES_IRQ,
1057                     ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1058                     lldev->irq);
1059
1060                 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1061                     status = pci_release_msi(dev);
1062                     if(status != 0) {
1063                         if(bootverbose) {
1064                             xge_trace(XGE_ERR,
1065                                 "pci_release_msi returned %d", status);
1066                         }
1067                     }
1068                 }
1069
1070             case xge_free_bar1_resource:
1071                 /* Restore PCI configuration space */
1072                 xge_pci_space_restore(dev);
1073
1074                 /* Free bar1resource */
1075                 xge_os_free(NULL, pci_info->bar1resource,
1076                     sizeof(xge_bus_resource_t));
1077
1078             case xge_free_bar1:
1079                 /* Release BAR1 */
1080                 rid = PCIR_BAR(2);
1081                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1082                     pci_info->regmap1);
1083
1084             case xge_free_bar0_resource:
1085                 /* Free bar0resource */
1086                 xge_os_free(NULL, pci_info->bar0resource,
1087                     sizeof(xge_bus_resource_t));
1088
1089             case xge_free_bar0:
1090                 /* Release BAR0 */
1091                 rid = PCIR_BAR(0);
1092                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1093                     pci_info->regmap0);
1094
1095             case xge_free_pci_info:
1096                 /* Disable Bus Master */
1097                 pci_disable_busmaster(dev);
1098
1099                 /* Free pci_info_t */
1100                 lldev->pdev = NULL;
1101                 xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1102
1103             case xge_free_hal_device:
1104                 /* Free device configuration struct and HAL device */
1105                 xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1106
1107             case xge_free_terminate_hal_driver:
1108                 /* Terminate HAL driver */
1109                 hal_driver_init_count = hal_driver_init_count - 1;
1110                 if(!hal_driver_init_count) {
1111                     xge_hal_driver_terminate();
1112                 }
1113
1114             case xge_free_mutex:
1115                 xge_mutex_destroy(lldev);
1116         }
1117 }
1118
1119 /**
1120  * xge_detach
1121  * Detaches driver from the Kernel subsystem
1122  *
1123  * @dev Device Handle
1124  */
1125 int
1126 xge_detach(device_t dev)
1127 {
1128         xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1129
1130         if(lldev->in_detach == 0) {
1131             lldev->in_detach = 1;
1132             xge_stop(lldev);
1133             xge_resources_free(dev, xge_free_all);
1134         }
1135
1136         return 0;
1137 }
1138
1139 /**
1140  * xge_shutdown
1141  * To shutdown device before system shutdown
1142  *
1143  * @dev Device Handle
1144  */
1145 int
1146 xge_shutdown(device_t dev)
1147 {
1148         xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1149         xge_stop(lldev);
1150
1151         return 0;
1152 }
1153
1154 /**
1155  * xge_interface_setup
1156  * Setup interface
1157  *
1158  * @dev Device Handle
1159  *
1160  * Returns 0 on success, ENXIO/ENOMEM on failure
1161  */
1162 int
1163 xge_interface_setup(device_t dev)
1164 {
1165         u8 mcaddr[ETHER_ADDR_LEN];
1166         xge_hal_status_e status;
1167         xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1168         struct ifnet *ifnetp;
1169         xge_hal_device_t *hldev = lldev->devh;
1170
1171         /* Get the MAC address of the device */
1172         status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1173         if(status != XGE_HAL_OK) {
1174             xge_resources_free(dev, xge_free_terminate_hal_device);
1175             XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1176         }
1177
1178         /* Get interface ifnet structure for this Ether device */
1179         ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1180         if(ifnetp == NULL) {
1181             xge_resources_free(dev, xge_free_terminate_hal_device);
1182             XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1183         }
1184
1185         /* Initialize interface ifnet structure */
1186         if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1187         ifnetp->if_mtu      = XGE_HAL_DEFAULT_MTU;
1188         ifnetp->if_baudrate = XGE_BAUDRATE;
1189         ifnetp->if_init     = xge_init;
1190         ifnetp->if_softc    = lldev;
1191         ifnetp->if_flags    = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1192         ifnetp->if_ioctl    = xge_ioctl;
1193         ifnetp->if_start    = xge_send;
1194
1195         /* TODO: Check and assign optimal value */
1196         ifnetp->if_snd.ifq_maxlen = ifqmaxlen;
1197
1198         ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1199             IFCAP_HWCSUM;
1200         if(lldev->enabled_tso)
1201             ifnetp->if_capabilities |= IFCAP_TSO4;
1202         if(lldev->enabled_lro)
1203             ifnetp->if_capabilities |= IFCAP_LRO;
1204
1205         ifnetp->if_capenable = ifnetp->if_capabilities;
1206
1207         /* Attach the interface */
1208         ether_ifattach(ifnetp, mcaddr);
1209
1210 ifsetup_out:
1211         return status;
1212 }
1213
1214 /**
1215  * xge_callback_link_up
1216  * Callback for Link-up indication from HAL
1217  *
1218  * @userdata Per-adapter data
1219  */
1220 void
1221 xge_callback_link_up(void *userdata)
1222 {
1223         xge_lldev_t  *lldev  = (xge_lldev_t *)userdata;
1224         struct ifnet *ifnetp = lldev->ifnetp;
1225
1226         ifnetp->if_flags  &= ~IFF_DRV_OACTIVE;
1227         if_link_state_change(ifnetp, LINK_STATE_UP);
1228 }
1229
1230 /**
1231  * xge_callback_link_down
1232  * Callback for Link-down indication from HAL
1233  *
1234  * @userdata Per-adapter data
1235  */
1236 void
1237 xge_callback_link_down(void *userdata)
1238 {
1239         xge_lldev_t  *lldev  = (xge_lldev_t *)userdata;
1240         struct ifnet *ifnetp = lldev->ifnetp;
1241
1242         ifnetp->if_flags  |= IFF_DRV_OACTIVE;
1243         if_link_state_change(ifnetp, LINK_STATE_DOWN);
1244 }
1245
1246 /**
1247  * xge_callback_crit_err
1248  * Callback for Critical error indication from HAL
1249  *
1250  * @userdata Per-adapter data
1251  * @type Event type (Enumerated hardware error)
1252  * @serr_data Hardware status
1253  */
1254 void
1255 xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1256 {
1257         xge_trace(XGE_ERR, "Critical Error");
1258         xge_reset(userdata);
1259 }
1260
1261 /**
1262  * xge_callback_event
1263  * Callback from HAL indicating that some event has been queued
1264  *
1265  * @item Queued event item
1266  */
1267 void
1268 xge_callback_event(xge_queue_item_t *item)
1269 {
1270         xge_lldev_t      *lldev  = NULL;
1271         xge_hal_device_t *hldev  = NULL;
1272         struct ifnet     *ifnetp = NULL;
1273
1274         hldev  = item->context;
1275         lldev  = xge_hal_device_private(hldev);
1276         ifnetp = lldev->ifnetp;
1277
1278         switch((int)item->event_type) {
1279             case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1280                 if(lldev->initialized) {
1281                     if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1282                         ifnetp->if_flags  &= ~IFF_DRV_OACTIVE;
1283                     }
1284                     else {
1285                         xge_queue_produce_context(
1286                             xge_hal_device_queue(lldev->devh),
1287                             XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1288                     }
1289                 }
1290                 break;
1291
1292             case XGE_LL_EVENT_DEVICE_RESETTING:
1293                 xge_reset(item->context);
1294                 break;
1295
1296             default:
1297                 break;
1298         }
1299 }
1300
1301 /**
1302  * xge_ifmedia_change
1303  * Media change driver callback
1304  *
1305  * @ifnetp Interface Handle
1306  *
1307  * Returns 0 if media is Ether else EINVAL
1308  */
1309 int
1310 xge_ifmedia_change(struct ifnet *ifnetp)
1311 {
1312         xge_lldev_t    *lldev    = ifnetp->if_softc;
1313         struct ifmedia *ifmediap = &lldev->media;
1314
1315         return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ?  EINVAL:0;
1316 }
1317
1318 /**
1319  * xge_ifmedia_status
1320  * Media status driver callback
1321  *
1322  * @ifnetp Interface Handle
1323  * @ifmr Interface Media Settings
1324  */
1325 void
1326 xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1327 {
1328         xge_hal_status_e status;
1329         u64              regvalue;
1330         xge_lldev_t      *lldev = ifnetp->if_softc;
1331         xge_hal_device_t *hldev = lldev->devh;
1332
1333         ifmr->ifm_status = IFM_AVALID;
1334         ifmr->ifm_active = IFM_ETHER;
1335
1336         status = xge_hal_mgmt_reg_read(hldev, 0,
1337             xge_offsetof(xge_hal_pci_bar0_t, adapter_status), &regvalue);
1338         if(status != XGE_HAL_OK) {
1339             xge_trace(XGE_TRACE, "Getting adapter status failed");
1340             goto _exit;
1341         }
1342
1343         if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1344             XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1345             ifmr->ifm_status |= IFM_ACTIVE;
1346             ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1347             if_link_state_change(ifnetp, LINK_STATE_UP);
1348         }
1349         else {
1350             if_link_state_change(ifnetp, LINK_STATE_DOWN);
1351         }
1352 _exit:
1353         return;
1354 }
1355
1356 /**
1357  * xge_ioctl_stats
1358  * IOCTL to get statistics
1359  *
1360  * @lldev Per-adapter data
1361  * @ifreqp Interface request
1362  */
1363 int
1364 xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1365 {
1366         xge_hal_status_e status = XGE_HAL_OK;
1367         char cmd, mode;
1368         void *info = NULL;
1369         int retValue = EINVAL;
1370
1371         cmd = fubyte(ifr_data_get_ptr(ifreqp));
1372         if (cmd == -1)
1373                 return (EFAULT);
1374
1375         switch(cmd) {
1376             case XGE_QUERY_STATS:
1377                 mtx_lock(&lldev->mtx_drv);
1378                 status = xge_hal_stats_hw(lldev->devh,
1379                     (xge_hal_stats_hw_info_t **)&info);
1380                 mtx_unlock(&lldev->mtx_drv);
1381                 if(status == XGE_HAL_OK) {
1382                     if(copyout(info, ifr_data_get_ptr(ifreqp),
1383                         sizeof(xge_hal_stats_hw_info_t)) == 0)
1384                         retValue = 0;
1385                 }
1386                 else {
1387                     xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1388                         status);
1389                 }
1390                 break;
1391
1392             case XGE_QUERY_PCICONF:
1393                 info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1394                 if(info != NULL) {
1395                     mtx_lock(&lldev->mtx_drv);
1396                     status = xge_hal_mgmt_pci_config(lldev->devh, info,
1397                         sizeof(xge_hal_pci_config_t));
1398                     mtx_unlock(&lldev->mtx_drv);
1399                     if(status == XGE_HAL_OK) {
1400                         if(copyout(info, ifr_data_get_ptr(ifreqp),
1401                             sizeof(xge_hal_pci_config_t)) == 0)
1402                             retValue = 0;
1403                     }
1404                     else {
1405                         xge_trace(XGE_ERR,
1406                             "Getting PCI configuration failed (%d)", status);
1407                     }
1408                     xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1409                 }
1410                 break;
1411
1412             case XGE_QUERY_DEVSTATS:
1413                 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1414                 if(info != NULL) {
1415                     mtx_lock(&lldev->mtx_drv);
1416                     status =xge_hal_mgmt_device_stats(lldev->devh, info,
1417                         sizeof(xge_hal_stats_device_info_t));
1418                     mtx_unlock(&lldev->mtx_drv);
1419                     if(status == XGE_HAL_OK) {
1420                         if(copyout(info, ifr_data_get_ptr(ifreqp),
1421                             sizeof(xge_hal_stats_device_info_t)) == 0)
1422                             retValue = 0;
1423                     }
1424                     else {
1425                         xge_trace(XGE_ERR, "Getting device info failed (%d)",
1426                             status);
1427                     }
1428                     xge_os_free(NULL, info,
1429                         sizeof(xge_hal_stats_device_info_t));
1430                 }
1431                 break;
1432
1433             case XGE_QUERY_SWSTATS:
1434                 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1435                 if(info != NULL) {
1436                     mtx_lock(&lldev->mtx_drv);
1437                     status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1438                         sizeof(xge_hal_stats_sw_err_t));
1439                     mtx_unlock(&lldev->mtx_drv);
1440                     if(status == XGE_HAL_OK) {
1441                         if(copyout(info, ifr_data_get_ptr(ifreqp),
1442                             sizeof(xge_hal_stats_sw_err_t)) == 0)
1443                             retValue = 0;
1444                     }
1445                     else {
1446                         xge_trace(XGE_ERR,
1447                             "Getting tcode statistics failed (%d)", status);
1448                     }
1449                     xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1450                 }
1451                 break;
1452
1453             case XGE_QUERY_DRIVERSTATS:
1454                 if(copyout(&lldev->driver_stats, ifr_data_get_ptr(ifreqp),
1455                     sizeof(xge_driver_stats_t)) == 0) {
1456                     retValue = 0;
1457                 }
1458                 else {
1459                     xge_trace(XGE_ERR,
1460                         "Copyout of driver statistics failed (%d)", status);
1461                 }
1462                 break;
1463
1464             case XGE_READ_VERSION:
1465                 info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1466                 if(info != NULL) {
1467                     strcpy(info, XGE_DRIVER_VERSION);
1468                     if(copyout(info, ifr_data_get_ptr(ifreqp),
1469                         XGE_BUFFER_SIZE) == 0)
1470                         retValue = 0;
1471                     xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1472                 }
1473                 break;
1474
1475             case XGE_QUERY_DEVCONF:
1476                 info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1477                 if(info != NULL) {
1478                     mtx_lock(&lldev->mtx_drv);
1479                     status = xge_hal_mgmt_device_config(lldev->devh, info,
1480                         sizeof(xge_hal_device_config_t));
1481                     mtx_unlock(&lldev->mtx_drv);
1482                     if(status == XGE_HAL_OK) {
1483                         if(copyout(info, ifr_data_get_ptr(ifreqp),
1484                             sizeof(xge_hal_device_config_t)) == 0)
1485                             retValue = 0;
1486                     }
1487                     else {
1488                         xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1489                             status);
1490                     }
1491                     xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1492                 }
1493                 break;
1494
1495             case XGE_QUERY_BUFFER_MODE:
1496                 if(copyout(&lldev->buffer_mode, ifr_data_get_ptr(ifreqp),
1497                     sizeof(int)) == 0)
1498                     retValue = 0;
1499                 break;
1500
1501             case XGE_SET_BUFFER_MODE_1:
1502             case XGE_SET_BUFFER_MODE_2:
1503             case XGE_SET_BUFFER_MODE_5:
1504                 mode = (cmd == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1505                 if(copyout(&mode, ifr_data_get_ptr(ifreqp), sizeof(mode)) == 0)
1506                     retValue = 0;
1507                 break;
1508             default:
1509                 xge_trace(XGE_TRACE, "Nothing is matching");
1510                 retValue = ENOTTY;
1511                 break;
1512         }
1513         return retValue;
1514 }
1515
1516 /**
1517  * xge_ioctl_registers
1518  * IOCTL to get registers
1519  *
1520  * @lldev Per-adapter data
1521  * @ifreqp Interface request
1522  */
1523 int
1524 xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1525 {
1526         xge_register_t tmpdata;
1527         xge_register_t *data;
1528         xge_hal_status_e status = XGE_HAL_OK;
1529         int retValue = EINVAL, offset = 0, index = 0;
1530         int error;
1531         u64 val64 = 0;
1532
1533         error = copyin(ifr_data_get_ptr(ifreqp), &tmpdata, sizeof(tmpdata));
1534         if (error != 0)
1535                 return (error);
1536         data = &tmpdata;
1537
1538         /* Reading a register */
1539         if(strcmp(data->option, "-r") == 0) {
1540             data->value = 0x0000;
1541             mtx_lock(&lldev->mtx_drv);
1542             status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1543                 &data->value);
1544             mtx_unlock(&lldev->mtx_drv);
1545             if(status == XGE_HAL_OK) {
1546                 if(copyout(data, ifr_data_get_ptr(ifreqp),
1547                     sizeof(xge_register_t)) == 0)
1548                     retValue = 0;
1549             }
1550         }
1551         /* Writing to a register */
1552         else if(strcmp(data->option, "-w") == 0) {
1553             mtx_lock(&lldev->mtx_drv);
1554             status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1555                 data->value);
1556             if(status == XGE_HAL_OK) {
1557                 val64 = 0x0000;
1558                 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1559                     &val64);
1560                 if(status != XGE_HAL_OK) {
1561                     xge_trace(XGE_ERR, "Reading back updated register failed");
1562                 }
1563                 else {
1564                     if(val64 != data->value) {
1565                         xge_trace(XGE_ERR,
1566                             "Read and written register values mismatched");
1567                     }
1568                     else retValue = 0;
1569                 }
1570             }
1571             else {
1572                 xge_trace(XGE_ERR, "Getting register value failed");
1573             }
1574             mtx_unlock(&lldev->mtx_drv);
1575         }
1576         else {
1577             mtx_lock(&lldev->mtx_drv);
1578             for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1579                 index++, offset += 0x0008) {
1580                 val64 = 0;
1581                 status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1582                 if(status != XGE_HAL_OK) {
1583                     xge_trace(XGE_ERR, "Getting register value failed");
1584                     break;
1585                 }
1586                 *((u64 *)((u64 *)data + index)) = val64;
1587                 retValue = 0;
1588             }
1589             mtx_unlock(&lldev->mtx_drv);
1590
1591             if(retValue == 0) {
1592                 if(copyout(data, ifr_data_get_ptr(ifreqp),
1593                     sizeof(xge_hal_pci_bar0_t)) != 0) {
1594                     xge_trace(XGE_ERR, "Copyout of register values failed");
1595                     retValue = EINVAL;
1596                 }
1597             }
1598             else {
1599                 xge_trace(XGE_ERR, "Getting register values failed");
1600             }
1601         }
1602         return retValue;
1603 }
1604
1605 /**
1606  * xge_ioctl
1607  * Callback to control the device - Interface configuration
1608  *
1609  * @ifnetp Interface Handle
1610  * @command Device control command
1611  * @data Parameters associated with command (if any)
1612  */
1613 int
1614 xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1615 {
1616         struct ifreq   *ifreqp   = (struct ifreq *)data;
1617         xge_lldev_t    *lldev    = ifnetp->if_softc;
1618         struct ifmedia *ifmediap = &lldev->media;
1619         int             retValue = 0, mask = 0;
1620
1621         if(lldev->in_detach) {
1622             return retValue;
1623         }
1624
1625         switch(command) {
1626             /* Set/Get ifnet address */
1627             case SIOCSIFADDR:
1628             case SIOCGIFADDR:
1629                 ether_ioctl(ifnetp, command, data);
1630                 break;
1631
1632             /* Set ifnet MTU */
1633             case SIOCSIFMTU:
1634                 retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1635                 break;
1636
1637             /* Set ifnet flags */
1638             case SIOCSIFFLAGS:
1639                 if(ifnetp->if_flags & IFF_UP) {
1640                     /* Link status is UP */
1641                     if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1642                         xge_init(lldev);
1643                     }
1644                     xge_disable_promisc(lldev);
1645                     xge_enable_promisc(lldev);
1646                 }
1647                 else {
1648                     /* Link status is DOWN */
1649                     /* If device is in running, make it down */
1650                     if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1651                         xge_stop(lldev);
1652                     }
1653                 }
1654                 break;
1655
1656             /* Add/delete multicast address */
1657             case SIOCADDMULTI:
1658             case SIOCDELMULTI:
1659                 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1660                     xge_setmulti(lldev);
1661                 }
1662                 break;
1663
1664             /* Set/Get net media */
1665             case SIOCSIFMEDIA:
1666             case SIOCGIFMEDIA:
1667                 retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1668                 break;
1669
1670             /* Set capabilities */
1671             case SIOCSIFCAP:
1672                 mtx_lock(&lldev->mtx_drv);
1673                 mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1674                 if(mask & IFCAP_TXCSUM) {
1675                     if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1676                         ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1677                         ifnetp->if_hwassist &=
1678                             ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1679                     }
1680                     else {
1681                         ifnetp->if_capenable |= IFCAP_TXCSUM;
1682                         ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1683                     }
1684                 }
1685                 if(mask & IFCAP_TSO4) {
1686                     if(ifnetp->if_capenable & IFCAP_TSO4) {
1687                         ifnetp->if_capenable &= ~IFCAP_TSO4;
1688                         ifnetp->if_hwassist  &= ~CSUM_TSO;
1689
1690                         xge_os_printf("%s: TSO Disabled",
1691                             device_get_nameunit(lldev->device));
1692                     }
1693                     else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1694                         ifnetp->if_capenable |= IFCAP_TSO4;
1695                         ifnetp->if_hwassist  |= CSUM_TSO;
1696
1697                         xge_os_printf("%s: TSO Enabled",
1698                             device_get_nameunit(lldev->device));
1699                     }
1700                 }
1701
1702                 mtx_unlock(&lldev->mtx_drv);
1703                 break;
1704
1705             /* Custom IOCTL 0 */
1706             case SIOCGPRIVATE_0:
1707                 retValue = xge_ioctl_stats(lldev, ifreqp);
1708                 break;
1709
1710             /* Custom IOCTL 1 */
1711             case SIOCGPRIVATE_1:
1712                 retValue = xge_ioctl_registers(lldev, ifreqp);
1713                 break;
1714
1715             default:
1716                 retValue = EINVAL;
1717                 break;
1718         }
1719         return retValue;
1720 }
1721
1722 /**
1723  * xge_init
1724  * Initialize the interface
1725  *
1726  * @plldev Per-adapter Data
1727  */
1728 void
1729 xge_init(void *plldev)
1730 {
1731         xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1732
1733         mtx_lock(&lldev->mtx_drv);
1734         xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1735         xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1736         mtx_unlock(&lldev->mtx_drv);
1737 }
1738
1739 /**
1740  * xge_device_init
1741  * Initialize the interface (called by holding lock)
1742  *
1743  * @pdevin Per-adapter Data
1744  */
1745 void
1746 xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1747 {
1748         struct ifnet     *ifnetp = lldev->ifnetp;
1749         xge_hal_device_t *hldev  = lldev->devh;
1750         struct ifaddr      *ifaddrp;
1751         unsigned char      *macaddr;
1752         struct sockaddr_dl *sockaddrp;
1753         int                 status   = XGE_HAL_OK;
1754
1755         mtx_assert((&lldev->mtx_drv), MA_OWNED);
1756
1757         /* If device is in running state, initializing is not required */
1758         if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1759             return;
1760
1761         /* Initializing timer */
1762         callout_init(&lldev->timer, 1);
1763
1764         xge_trace(XGE_TRACE, "Set MTU size");
1765         status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1766         if(status != XGE_HAL_OK) {
1767             xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1768             goto _exit;
1769         }
1770
1771         /* Enable HAL device */
1772         xge_hal_device_enable(hldev);
1773
1774         /* Get MAC address and update in HAL */
1775         ifaddrp             = ifnetp->if_addr;
1776         sockaddrp           = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1777         sockaddrp->sdl_type = IFT_ETHER;
1778         sockaddrp->sdl_alen = ifnetp->if_addrlen;
1779         macaddr             = LLADDR(sockaddrp);
1780         xge_trace(XGE_TRACE,
1781             "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1782             *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1783             *(macaddr + 4), *(macaddr + 5));
1784         status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1785         if(status != XGE_HAL_OK)
1786             xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1787
1788         /* Opening channels */
1789         mtx_unlock(&lldev->mtx_drv);
1790         status = xge_channel_open(lldev, option);
1791         mtx_lock(&lldev->mtx_drv);
1792         if(status != XGE_HAL_OK)
1793             goto _exit;
1794
1795         /* Set appropriate flags */
1796         ifnetp->if_drv_flags  |=  IFF_DRV_RUNNING;
1797         ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1798
1799         /* Checksum capability */
1800         ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1801             (CSUM_TCP | CSUM_UDP) : 0;
1802
1803         if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1804             ifnetp->if_hwassist |= CSUM_TSO;
1805
1806         /* Enable interrupts */
1807         xge_hal_device_intr_enable(hldev);
1808
1809         callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1810
1811         /* Disable promiscuous mode */
1812         xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1813         xge_enable_promisc(lldev);
1814
1815         /* Device is initialized */
1816         lldev->initialized = 1;
1817         xge_os_mdelay(1000);
1818
1819 _exit:
1820         return;
1821 }
1822
1823 /**
1824  * xge_timer
1825  * Timer timeout function to handle link status
1826  *
1827  * @devp Per-adapter Data
1828  */
1829 void
1830 xge_timer(void *devp)
1831 {
1832         xge_lldev_t      *lldev = (xge_lldev_t *)devp;
1833         xge_hal_device_t *hldev = lldev->devh;
1834
1835         /* Poll for changes */
1836         xge_hal_device_poll(hldev);
1837
1838         /* Reset timer */
1839         callout_reset(&lldev->timer, hz, xge_timer, lldev);
1840
1841         return;
1842 }
1843
1844 /**
1845  * xge_stop
1846  * De-activate the interface
1847  *
1848  * @lldev Per-adater Data
1849  */
1850 void
1851 xge_stop(xge_lldev_t *lldev)
1852 {
1853         mtx_lock(&lldev->mtx_drv);
1854         xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1855         mtx_unlock(&lldev->mtx_drv);
1856 }
1857
1858 /**
1859  * xge_isr_filter
1860  * ISR filter function - to filter interrupts from other devices (shared)
1861  *
1862  * @handle Per-adapter Data
1863  *
1864  * Returns
1865  * FILTER_STRAY if interrupt is from other device
1866  * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1867  */
1868 int
1869 xge_isr_filter(void *handle)
1870 {
1871         xge_lldev_t *lldev       = (xge_lldev_t *)handle;
1872         xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1873         u16 retValue = FILTER_STRAY;
1874         u64 val64    = 0;
1875
1876         XGE_DRV_STATS(isr_filter);
1877
1878         val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1879             &bar0->general_int_status);
1880         retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1881
1882         return retValue;
1883 }
1884
1885 /**
1886  * xge_isr_line
1887  * Interrupt service routine for Line interrupts
1888  *
1889  * @plldev Per-adapter Data
1890  */
1891 void
1892 xge_isr_line(void *plldev)
1893 {
1894         xge_hal_status_e status;
1895         xge_lldev_t      *lldev   = (xge_lldev_t *)plldev;
1896         xge_hal_device_t *hldev   = (xge_hal_device_t *)lldev->devh;
1897         struct ifnet     *ifnetp  = lldev->ifnetp;
1898
1899         XGE_DRV_STATS(isr_line);
1900
1901         if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1902             status = xge_hal_device_handle_irq(hldev);
1903             if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1904                 xge_send(ifnetp);
1905         }
1906 }
1907
1908 /*
1909  * xge_isr_msi
1910  * ISR for Message signaled interrupts
1911  */
1912 void
1913 xge_isr_msi(void *plldev)
1914 {
1915         xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1916         XGE_DRV_STATS(isr_msi);
1917         xge_hal_device_continue_irq(lldev->devh);
1918 }
1919
1920 /**
1921  * xge_rx_open
1922  * Initiate and open all Rx channels
1923  *
1924  * @qid Ring Index
1925  * @lldev Per-adapter Data
1926  * @rflag Channel open/close/reopen flag
1927  *
1928  * Returns 0 or Error Number
1929  */
1930 int
1931 xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1932 {
1933         u64 adapter_status = 0x0;
1934         xge_hal_status_e status = XGE_HAL_FAIL;
1935
1936         xge_hal_channel_attr_t attr = {
1937             .post_qid      = qid,
1938             .compl_qid     = 0,
1939             .callback      = xge_rx_compl,
1940             .per_dtr_space = sizeof(xge_rx_priv_t),
1941             .flags         = 0,
1942             .type          = XGE_HAL_CHANNEL_TYPE_RING,
1943             .userdata      = lldev,
1944             .dtr_init      = xge_rx_initial_replenish,
1945             .dtr_term      = xge_rx_term
1946         };
1947
1948         /* If device is not ready, return */
1949         status = xge_hal_device_status(lldev->devh, &adapter_status);
1950         if(status != XGE_HAL_OK) {
1951             xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1952             XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1953         }
1954         else {
1955             status = xge_hal_channel_open(lldev->devh, &attr,
1956                 &lldev->ring_channel[qid], rflag);
1957         }
1958
1959 _exit:
1960         return status;
1961 }
1962
1963 /**
1964  * xge_tx_open
1965  * Initialize and open all Tx channels
1966  *
1967  * @lldev Per-adapter Data
1968  * @tflag Channel open/close/reopen flag
1969  *
1970  * Returns 0 or Error Number
1971  */
1972 int
1973 xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1974 {
1975         xge_hal_status_e status = XGE_HAL_FAIL;
1976         u64 adapter_status = 0x0;
1977         int qindex, index;
1978
1979         xge_hal_channel_attr_t attr = {
1980             .compl_qid     = 0,
1981             .callback      = xge_tx_compl,
1982             .per_dtr_space = sizeof(xge_tx_priv_t),
1983             .flags         = 0,
1984             .type          = XGE_HAL_CHANNEL_TYPE_FIFO,
1985             .userdata      = lldev,
1986             .dtr_init      = xge_tx_initial_replenish,
1987             .dtr_term      = xge_tx_term
1988         };
1989
1990         /* If device is not ready, return */
1991         status = xge_hal_device_status(lldev->devh, &adapter_status);
1992         if(status != XGE_HAL_OK) {
1993             xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1994             XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1995         }
1996
1997         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1998             attr.post_qid = qindex,
1999             status = xge_hal_channel_open(lldev->devh, &attr,
2000                 &lldev->fifo_channel[qindex], tflag);
2001             if(status != XGE_HAL_OK) {
2002                 for(index = 0; index < qindex; index++)
2003                     xge_hal_channel_close(lldev->fifo_channel[index], tflag);
2004             }
2005         }
2006
2007 _exit:
2008         return status;
2009 }
2010
2011 /**
2012  * xge_enable_msi
2013  * Enables MSI
2014  *
2015  * @lldev Per-adapter Data
2016  */
2017 void
2018 xge_enable_msi(xge_lldev_t *lldev)
2019 {
2020         xge_list_t        *item    = NULL;
2021         xge_hal_device_t  *hldev   = lldev->devh;
2022         xge_hal_channel_t *channel = NULL;
2023         u16 offset = 0, val16 = 0;
2024
2025         xge_os_pci_read16(lldev->pdev, NULL,
2026             xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2027
2028         /* Update msi_data */
2029         offset = (val16 & 0x80) ? 0x4c : 0x48;
2030         xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2031         if(val16 & 0x1)
2032             val16 &= 0xfffe;
2033         else
2034             val16 |= 0x1;
2035         xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2036
2037         /* Update msi_control */
2038         xge_os_pci_read16(lldev->pdev, NULL,
2039             xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2040         val16 |= 0x10;
2041         xge_os_pci_write16(lldev->pdev, NULL,
2042             xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2043
2044         /* Set TxMAT and RxMAT registers with MSI */
2045         xge_list_for_each(item, &hldev->free_channels) {
2046             channel = xge_container_of(item, xge_hal_channel_t, item);
2047             xge_hal_channel_msi_set(channel, 1, (u32)val16);
2048         }
2049 }
2050
2051 /**
2052  * xge_channel_open
2053  * Open both Tx and Rx channels
2054  *
2055  * @lldev Per-adapter Data
2056  * @option Channel reopen option
2057  */
2058 int
2059 xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2060 {
2061         xge_lro_entry_t *lro_session = NULL;
2062         xge_hal_status_e status   = XGE_HAL_OK;
2063         int index = 0, index2 = 0;
2064
2065         if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2066             xge_msi_info_restore(lldev);
2067             xge_enable_msi(lldev);
2068         }
2069
2070 _exit2:
2071         status = xge_create_dma_tags(lldev->device);
2072         if(status != XGE_HAL_OK)
2073             XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2074
2075         /* Open ring (Rx) channel */
2076         for(index = 0; index < XGE_RING_COUNT; index++) {
2077             status = xge_rx_open(index, lldev, option);
2078             if(status != XGE_HAL_OK) {
2079                 /*
2080                  * DMA mapping fails in the unpatched Kernel which can't
2081                  * allocate contiguous memory for Jumbo frames.
2082                  * Try using 5 buffer mode.
2083                  */
2084                 if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2085                     (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2086                     MJUMPAGESIZE)) {
2087                     /* Close so far opened channels */
2088                     for(index2 = 0; index2 < index; index2++) {
2089                         xge_hal_channel_close(lldev->ring_channel[index2],
2090                             option);
2091                     }
2092
2093                     /* Destroy DMA tags intended to use for 1 buffer mode */
2094                     if(bus_dmamap_destroy(lldev->dma_tag_rx,
2095                         lldev->extra_dma_map)) {
2096                         xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2097                     }
2098                     if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2099                         xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2100                     if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2101                         xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2102
2103                     /* Switch to 5 buffer mode */
2104                     lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2105                     xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2106
2107                     /* Restart init */
2108                     goto _exit2;
2109                 }
2110                 else {
2111                     XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2112                         status);
2113                 }
2114             }
2115         }
2116
2117         if(lldev->enabled_lro) {
2118             SLIST_INIT(&lldev->lro_free);
2119             SLIST_INIT(&lldev->lro_active);
2120             lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2121
2122             for(index = 0; index < lldev->lro_num; index++) {
2123                 lro_session = (xge_lro_entry_t *)
2124                     xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2125                 if(lro_session == NULL) {
2126                     lldev->lro_num = index;
2127                     break;
2128                 }
2129                 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2130             }
2131         }
2132
2133         /* Open FIFO (Tx) channel */
2134         status = xge_tx_open(lldev, option);
2135         if(status != XGE_HAL_OK)
2136             XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2137
2138         goto _exit;
2139
2140 _exit1:
2141         /*
2142          * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2143          * Initialization of LRO failed (index is XGE_RING_COUNT)
2144          * Opening Tx channel failed    (index is XGE_RING_COUNT)
2145          */
2146         for(index2 = 0; index2 < index; index2++)
2147             xge_hal_channel_close(lldev->ring_channel[index2], option);
2148
2149 _exit:
2150         return status;
2151 }
2152
2153 /**
2154  * xge_channel_close
2155  * Close both Tx and Rx channels
2156  *
2157  * @lldev Per-adapter Data
2158  * @option Channel reopen option
2159  *
2160  */
2161 void
2162 xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2163 {
2164         int qindex = 0;
2165
2166         DELAY(1000 * 1000);
2167
2168         /* Close FIFO (Tx) channel */
2169         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2170             xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2171
2172         /* Close Ring (Rx) channels */
2173         for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2174             xge_hal_channel_close(lldev->ring_channel[qindex], option);
2175
2176         if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2177             xge_trace(XGE_ERR, "Rx extra map destroy failed");
2178         if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2179             xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2180         if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2181             xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2182 }
2183
2184 /**
2185  * dmamap_cb
2186  * DMA map callback
2187  *
2188  * @arg Parameter passed from dmamap
2189  * @segs Segments
2190  * @nseg Number of segments
2191  * @error Error
2192  */
2193 void
2194 dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2195 {
2196         if(!error) {
2197             *(bus_addr_t *) arg = segs->ds_addr;
2198         }
2199 }
2200
2201 /**
2202  * xge_reset
2203  * Device Reset
2204  *
2205  * @lldev Per-adapter Data
2206  */
2207 void
2208 xge_reset(xge_lldev_t *lldev)
2209 {
2210         xge_trace(XGE_TRACE, "Reseting the chip");
2211
2212         /* If the device is not initialized, return */
2213         if(lldev->initialized) {
2214             mtx_lock(&lldev->mtx_drv);
2215             xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2216             xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2217             mtx_unlock(&lldev->mtx_drv);
2218         }
2219
2220         return;
2221 }
2222
2223 /**
2224  * xge_setmulti
2225  * Set an address as a multicast address
2226  *
2227  * @lldev Per-adapter Data
2228  */
2229 void
2230 xge_setmulti(xge_lldev_t *lldev)
2231 {
2232         struct ifmultiaddr *ifma;
2233         u8                 *lladdr;
2234         xge_hal_device_t   *hldev        = (xge_hal_device_t *)lldev->devh;
2235         struct ifnet       *ifnetp       = lldev->ifnetp;
2236         int                index         = 0;
2237         int                offset        = 1;
2238         int                table_size    = 47;
2239         xge_hal_status_e   status        = XGE_HAL_OK;
2240         u8                 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2241
2242         if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2243             status = xge_hal_device_mcast_enable(hldev);
2244             lldev->all_multicast = 1;
2245         }
2246         else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2247             status = xge_hal_device_mcast_disable(hldev);
2248             lldev->all_multicast = 0;
2249         }
2250
2251         if(status != XGE_HAL_OK) {
2252             xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2253             goto _exit;
2254         }
2255
2256         /* Updating address list */
2257         if_maddr_rlock(ifnetp);
2258         index = 0;
2259         TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2260             if(ifma->ifma_addr->sa_family != AF_LINK) {
2261                 continue;
2262             }
2263             lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2264             index += 1;
2265         }
2266         if_maddr_runlock(ifnetp);
2267
2268         if((!lldev->all_multicast) && (index)) {
2269             lldev->macaddr_count = (index + 1);
2270             if(lldev->macaddr_count > table_size) {
2271                 goto _exit;
2272             }
2273
2274             /* Clear old addresses */
2275             for(index = 0; index < 48; index++) {
2276                 xge_hal_device_macaddr_set(hldev, (offset + index),
2277                     initial_addr);
2278             }
2279         }
2280
2281         /* Add new addresses */
2282         if_maddr_rlock(ifnetp);
2283         index = 0;
2284         TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2285             if(ifma->ifma_addr->sa_family != AF_LINK) {
2286                 continue;
2287             }
2288             lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2289             xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2290             index += 1;
2291         }
2292         if_maddr_runlock(ifnetp);
2293
2294 _exit:
2295         return;
2296 }
2297
2298 /**
2299  * xge_enable_promisc
2300  * Enable Promiscuous Mode
2301  *
2302  * @lldev Per-adapter Data
2303  */
2304 void
2305 xge_enable_promisc(xge_lldev_t *lldev)
2306 {
2307         struct ifnet *ifnetp = lldev->ifnetp;
2308         xge_hal_device_t *hldev = lldev->devh;
2309         xge_hal_pci_bar0_t *bar0 = NULL;
2310         u64 val64 = 0;
2311
2312         bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2313
2314         if(ifnetp->if_flags & IFF_PROMISC) {
2315             xge_hal_device_promisc_enable(lldev->devh);
2316
2317             /*
2318              * When operating in promiscuous mode, don't strip the VLAN tag
2319              */
2320             val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2321                 &bar0->rx_pa_cfg);
2322             val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2323             val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2324             xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2325                 &bar0->rx_pa_cfg);
2326
2327             xge_trace(XGE_TRACE, "Promiscuous mode ON");
2328         }
2329 }
2330
2331 /**
2332  * xge_disable_promisc
2333  * Disable Promiscuous Mode
2334  *
2335  * @lldev Per-adapter Data
2336  */
2337 void
2338 xge_disable_promisc(xge_lldev_t *lldev)
2339 {
2340         xge_hal_device_t *hldev = lldev->devh;
2341         xge_hal_pci_bar0_t *bar0 = NULL;
2342         u64 val64 = 0;
2343
2344         bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2345
2346         xge_hal_device_promisc_disable(lldev->devh);
2347
2348         /*
2349          * Strip VLAN tag when operating in non-promiscuous mode
2350          */
2351         val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2352             &bar0->rx_pa_cfg);
2353         val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2354         val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2355         xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2356             &bar0->rx_pa_cfg);
2357
2358         xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2359 }
2360
2361 /**
2362  * xge_change_mtu
2363  * Change interface MTU to a requested valid size
2364  *
2365  * @lldev Per-adapter Data
2366  * @NewMtu Requested MTU
2367  *
2368  * Returns 0 or Error Number
2369  */
2370 int
2371 xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2372 {
2373         int status = XGE_HAL_OK;
2374
2375         /* Check requested MTU size for boundary */
2376         if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2377             XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2378         }
2379
2380         lldev->mtu = new_mtu;
2381         xge_confirm_changes(lldev, XGE_SET_MTU);
2382
2383 _exit:
2384         return status;
2385 }
2386
2387 /**
2388  * xge_device_stop
2389  *
2390  * Common code for both stop and part of reset. Disables device, interrupts and
2391  * closes channels
2392  *
2393  * @dev Device Handle
2394  * @option Channel normal/reset option
2395  */
2396 void
2397 xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2398 {
2399         xge_hal_device_t *hldev  = lldev->devh;
2400         struct ifnet     *ifnetp = lldev->ifnetp;
2401         u64               val64  = 0;
2402
2403         mtx_assert((&lldev->mtx_drv), MA_OWNED);
2404
2405         /* If device is not in "Running" state, return */
2406         if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2407             goto _exit;
2408
2409         /* Set appropriate flags */
2410         ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2411
2412         /* Stop timer */
2413         callout_stop(&lldev->timer);
2414
2415         /* Disable interrupts */
2416         xge_hal_device_intr_disable(hldev);
2417
2418         mtx_unlock(&lldev->mtx_drv);
2419         xge_queue_flush(xge_hal_device_queue(lldev->devh));
2420         mtx_lock(&lldev->mtx_drv);
2421
2422         /* Disable HAL device */
2423         if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2424             xge_trace(XGE_ERR, "Disabling HAL device failed");
2425             xge_hal_device_status(hldev, &val64);
2426             xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2427         }
2428
2429         /* Close Tx and Rx channels */
2430         xge_channel_close(lldev, option);
2431
2432         /* Reset HAL device */
2433         xge_hal_device_reset(hldev);
2434
2435         xge_os_mdelay(1000);
2436         lldev->initialized = 0;
2437
2438         if_link_state_change(ifnetp, LINK_STATE_DOWN);
2439
2440 _exit:
2441         return;
2442 }
2443
2444 /**
2445  * xge_set_mbuf_cflags
2446  * set checksum flag for the mbuf
2447  *
2448  * @pkt Packet
2449  */
2450 void
2451 xge_set_mbuf_cflags(mbuf_t pkt)
2452 {
2453         pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2454         pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2455         pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2456         pkt->m_pkthdr.csum_data = htons(0xffff);
2457 }
2458
2459 /**
2460  * xge_lro_flush_sessions
2461  * Flush LRO session and send accumulated LRO packet to upper layer
2462  *
2463  * @lldev Per-adapter Data
2464  */
2465 void
2466 xge_lro_flush_sessions(xge_lldev_t *lldev)
2467 {
2468         xge_lro_entry_t *lro_session = NULL;
2469
2470         while(!SLIST_EMPTY(&lldev->lro_active)) {
2471             lro_session = SLIST_FIRST(&lldev->lro_active);
2472             SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2473             xge_lro_flush(lldev, lro_session);
2474         }
2475 }
2476
2477 /**
2478  * xge_lro_flush
2479  * Flush LRO session. Send accumulated LRO packet to upper layer
2480  *
2481  * @lldev Per-adapter Data
2482  * @lro LRO session to be flushed
2483  */
2484 static void
2485 xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2486 {
2487         struct ip *header_ip;
2488         struct tcphdr *header_tcp;
2489         u32 *ptr;
2490
2491         if(lro_session->append_cnt) {
2492             header_ip = lro_session->lro_header_ip;
2493             header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2494             lro_session->m_head->m_pkthdr.len = lro_session->len;
2495             header_tcp = (struct tcphdr *)(header_ip + 1);
2496             header_tcp->th_ack = lro_session->ack_seq;
2497             header_tcp->th_win = lro_session->window;
2498             if(lro_session->timestamp) {
2499                 ptr = (u32 *)(header_tcp + 1);
2500                 ptr[1] = htonl(lro_session->tsval);
2501                 ptr[2] = lro_session->tsecr;
2502             }
2503         }
2504
2505         (*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2506         lro_session->m_head = NULL;
2507         lro_session->timestamp = 0;
2508         lro_session->append_cnt = 0;
2509         SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2510 }
2511
2512 /**
2513  * xge_lro_accumulate
2514  * Accumulate packets to form a large LRO packet based on various conditions
2515  *
2516  * @lldev Per-adapter Data
2517  * @m_head Current Packet
2518  *
2519  * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2520  */
2521 static int
2522 xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2523 {
2524         struct ether_header *header_ethernet;
2525         struct ip *header_ip;
2526         struct tcphdr *header_tcp;
2527         u32 seq, *ptr;
2528         struct mbuf *buffer_next, *buffer_tail;
2529         xge_lro_entry_t *lro_session;
2530         xge_hal_status_e status = XGE_HAL_FAIL;
2531         int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2532         int trim;
2533
2534         /* Get Ethernet header */
2535         header_ethernet = mtod(m_head, struct ether_header *);
2536
2537         /* Return if it is not IP packet */
2538         if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2539             goto _exit;
2540
2541         /* Get IP header */
2542         header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2543             (struct ip *)(header_ethernet + 1) :
2544             mtod(m_head->m_next, struct ip *);
2545
2546         /* Return if it is not TCP packet */
2547         if(header_ip->ip_p != IPPROTO_TCP)
2548             goto _exit;
2549
2550         /* Return if packet has options */
2551         if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2552             goto _exit;
2553
2554         /* Return if packet is fragmented */
2555         if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2556             goto _exit;
2557
2558         /* Get TCP header */
2559         header_tcp = (struct tcphdr *)(header_ip + 1);
2560
2561         /* Return if not ACK or PUSH */
2562         if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2563             goto _exit;
2564
2565         /* Only timestamp option is handled */
2566         tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2567         tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2568         ptr = (u32 *)(header_tcp + 1);
2569         if(tcp_options != 0) {
2570             if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2571                 (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2572                 TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2573                 goto _exit;
2574             }
2575         }
2576
2577         /* Total length of packet (IP) */
2578         ip_len = ntohs(header_ip->ip_len);
2579
2580         /* TCP data size */
2581         tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2582
2583         /* If the frame is padded, trim it */
2584         tot_len = m_head->m_pkthdr.len;
2585         trim = tot_len - (ip_len + ETHER_HDR_LEN);
2586         if(trim != 0) {
2587             if(trim < 0)
2588                 goto _exit;
2589             m_adj(m_head, -trim);
2590             tot_len = m_head->m_pkthdr.len;
2591         }
2592
2593         buffer_next = m_head;
2594         buffer_tail = NULL;
2595         while(buffer_next != NULL) {
2596             buffer_tail = buffer_next;
2597             buffer_next = buffer_tail->m_next;
2598         }
2599
2600         /* Total size of only headers */
2601         hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2602
2603         /* Get sequence number */
2604         seq = ntohl(header_tcp->th_seq);
2605
2606         SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2607             if(lro_session->source_port == header_tcp->th_sport &&
2608                 lro_session->dest_port == header_tcp->th_dport &&
2609                 lro_session->source_ip == header_ip->ip_src.s_addr &&
2610                 lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2611
2612                 /* Unmatched sequence number, flush LRO session */
2613                 if(__predict_false(seq != lro_session->next_seq)) {
2614                     SLIST_REMOVE(&lldev->lro_active, lro_session,
2615                         xge_lro_entry_t, next);
2616                     xge_lro_flush(lldev, lro_session);
2617                     goto _exit;
2618                 }
2619
2620                 /* Handle timestamp option */
2621                 if(tcp_options) {
2622                     u32 tsval = ntohl(*(ptr + 1));
2623                     if(__predict_false(lro_session->tsval > tsval ||
2624                         *(ptr + 2) == 0)) {
2625                         goto _exit;
2626                     }
2627                     lro_session->tsval = tsval;
2628                     lro_session->tsecr = *(ptr + 2);
2629                 }
2630
2631                 lro_session->next_seq += tcp_data_len;
2632                 lro_session->ack_seq = header_tcp->th_ack;
2633                 lro_session->window = header_tcp->th_win;
2634
2635                 /* If TCP data/payload is of 0 size, free mbuf */
2636                 if(tcp_data_len == 0) {
2637                     m_freem(m_head);
2638                     status = XGE_HAL_OK;
2639                     goto _exit;
2640                 }
2641
2642                 lro_session->append_cnt++;
2643                 lro_session->len += tcp_data_len;
2644
2645                 /* Adjust mbuf so that m_data points to payload than headers */
2646                 m_adj(m_head, hlen);
2647
2648                 /* Append this packet to LRO accumulated packet */
2649                 lro_session->m_tail->m_next = m_head;
2650                 lro_session->m_tail = buffer_tail;
2651
2652                 /* Flush if LRO packet is exceeding maximum size */
2653                 if(lro_session->len >
2654                     (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2655                     SLIST_REMOVE(&lldev->lro_active, lro_session,
2656                         xge_lro_entry_t, next);
2657                     xge_lro_flush(lldev, lro_session);
2658                 }
2659                 status = XGE_HAL_OK;
2660                 goto _exit;
2661             }
2662         }
2663
2664         if(SLIST_EMPTY(&lldev->lro_free))
2665             goto _exit;
2666
2667         /* Start a new LRO session */
2668         lro_session = SLIST_FIRST(&lldev->lro_free);
2669         SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2670         SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2671         lro_session->source_port = header_tcp->th_sport;
2672         lro_session->dest_port = header_tcp->th_dport;
2673         lro_session->source_ip = header_ip->ip_src.s_addr;
2674         lro_session->dest_ip = header_ip->ip_dst.s_addr;
2675         lro_session->next_seq = seq + tcp_data_len;
2676         lro_session->mss = tcp_data_len;
2677         lro_session->ack_seq = header_tcp->th_ack;
2678         lro_session->window = header_tcp->th_win;
2679
2680         lro_session->lro_header_ip = header_ip;
2681
2682         /* Handle timestamp option */
2683         if(tcp_options) {
2684             lro_session->timestamp = 1;
2685             lro_session->tsval = ntohl(*(ptr + 1));
2686             lro_session->tsecr = *(ptr + 2);
2687         }
2688
2689         lro_session->len = tot_len;
2690         lro_session->m_head = m_head;
2691         lro_session->m_tail = buffer_tail;
2692         status = XGE_HAL_OK;
2693
2694 _exit:
2695         return status;
2696 }
2697
2698 /**
2699  * xge_accumulate_large_rx
2700  * Accumulate packets to form a large LRO packet based on various conditions
2701  *
2702  * @lldev Per-adapter Data
2703  * @pkt Current packet
2704  * @pkt_length Packet Length
2705  * @rxd_priv Rx Descriptor Private Data
2706  */
2707 void
2708 xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2709         xge_rx_priv_t *rxd_priv)
2710 {
2711         if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2712             bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2713                 BUS_DMASYNC_POSTREAD);
2714             (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2715         }
2716 }
2717
2718 /**
2719  * xge_rx_compl
2720  * If the interrupt is due to received frame (Rx completion), send it up
2721  *
2722  * @channelh Ring Channel Handle
2723  * @dtr Current Descriptor
2724  * @t_code Transfer Code indicating success or error
2725  * @userdata Per-adapter Data
2726  *
2727  * Returns XGE_HAL_OK or HAL error enums
2728  */
2729 xge_hal_status_e
2730 xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2731         void *userdata)
2732 {
2733         struct ifnet       *ifnetp;
2734         xge_rx_priv_t      *rxd_priv = NULL;
2735         mbuf_t              mbuf_up  = NULL;
2736         xge_hal_status_e    status   = XGE_HAL_OK;
2737         xge_hal_dtr_info_t  ext_info;
2738         int                 index;
2739         u16                 vlan_tag;
2740
2741         /*get the user data portion*/
2742         xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2743         if(!lldev) {
2744             XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2745         }
2746
2747         XGE_DRV_STATS(rx_completions);
2748
2749         /* get the interface pointer */
2750         ifnetp = lldev->ifnetp;
2751
2752         do {
2753             XGE_DRV_STATS(rx_desc_compl);
2754
2755             if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2756                 status = XGE_HAL_FAIL;
2757                 goto _exit;
2758             }
2759
2760             if(t_code) {
2761                 xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2762                 XGE_DRV_STATS(rx_tcode);
2763                 xge_hal_device_handle_tcode(channelh, dtr, t_code);
2764                 xge_hal_ring_dtr_post(channelh,dtr);
2765                 continue;
2766             }
2767
2768             /* Get the private data for this descriptor*/
2769             rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2770                 dtr);
2771             if(!rxd_priv) {
2772                 XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2773                     XGE_HAL_FAIL);
2774             }
2775
2776             /*
2777              * Prepare one buffer to send it to upper layer -- since the upper
2778              * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2779              * prepare a new buffer, do mapping, use it in the current
2780              * descriptor and post descriptor back to ring channel
2781              */
2782             mbuf_up = rxd_priv->bufferArray[0];
2783
2784             /* Gets details of mbuf i.e., packet length */
2785             xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2786
2787             status =
2788                 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2789                 xge_get_buf(dtr, rxd_priv, lldev, 0) :
2790                 xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2791
2792             if(status != XGE_HAL_OK) {
2793                 xge_trace(XGE_ERR, "No memory");
2794                 XGE_DRV_STATS(rx_no_buf);
2795
2796                 /*
2797                  * Unable to allocate buffer. Instead of discarding, post
2798                  * descriptor back to channel for future processing of same
2799                  * packet.
2800                  */
2801                 xge_hal_ring_dtr_post(channelh, dtr);
2802                 continue;
2803             }
2804
2805             /* Get the extended information */
2806             xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2807
2808             /*
2809              * As we have allocated a new mbuf for this descriptor, post this
2810              * descriptor with new mbuf back to ring channel
2811              */
2812             vlan_tag = ext_info.vlan;
2813             xge_hal_ring_dtr_post(channelh, dtr);
2814             if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2815                 (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2816                 (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2817                 (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2818
2819                 /* set Checksum Flag */
2820                 xge_set_mbuf_cflags(mbuf_up);
2821
2822                 if(lldev->enabled_lro) {
2823                     xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2824                         rxd_priv);
2825                 }
2826                 else {
2827                     /* Post-Read sync for buffers*/
2828                     for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2829                         bus_dmamap_sync(lldev->dma_tag_rx,
2830                             rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2831                     }
2832                     (*ifnetp->if_input)(ifnetp, mbuf_up);
2833                 }
2834             }
2835             else {
2836                 /*
2837                  * Packet with erroneous checksum , let the upper layer deal
2838                  * with it
2839                  */
2840
2841                 /* Post-Read sync for buffers*/
2842                 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2843                     bus_dmamap_sync(lldev->dma_tag_rx,
2844                          rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2845                 }
2846
2847                 if(vlan_tag) {
2848                     mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2849                     mbuf_up->m_flags |= M_VLANTAG;
2850                 }
2851
2852                 if(lldev->enabled_lro)
2853                     xge_lro_flush_sessions(lldev);
2854
2855                 (*ifnetp->if_input)(ifnetp, mbuf_up);
2856             }
2857         } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2858             == XGE_HAL_OK);
2859
2860         if(lldev->enabled_lro)
2861             xge_lro_flush_sessions(lldev);
2862
2863 _exit:
2864         return status;
2865 }
2866
2867 /**
2868  * xge_ring_dtr_get
2869  * Get descriptors
2870  *
2871  * @mbuf_up Packet to send up
2872  * @channelh Ring Channel Handle
2873  * @dtr Descriptor
2874  * @lldev Per-adapter Data
2875  * @rxd_priv Rx Descriptor Private Data
2876  *
2877  * Returns XGE_HAL_OK or HAL error enums
2878  */
2879 int
2880 xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2881         xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2882 {
2883         mbuf_t           m;
2884         int              pkt_length[5]={0,0}, pkt_len=0;
2885         dma_addr_t       dma_data[5];
2886         int              index;
2887
2888         m = mbuf_up;
2889         pkt_len = 0;
2890
2891         if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2892             xge_os_memzero(pkt_length, sizeof(pkt_length));
2893
2894             /*
2895              * Retrieve data of interest from the completed descriptor -- This
2896              * returns the packet length
2897              */
2898             if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2899                 xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2900             }
2901             else {
2902                 xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2903             }
2904
2905             for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2906                 m->m_len  = pkt_length[index];
2907
2908                 if(index < (lldev->rxd_mbuf_cnt-1)) {
2909                     m->m_next = rxd_priv->bufferArray[index + 1];
2910                     m = m->m_next;
2911                 }
2912                 else {
2913                     m->m_next = NULL;
2914                 }
2915                 pkt_len+=pkt_length[index];
2916             }
2917
2918             /*
2919              * Since 2 buffer mode is an exceptional case where data is in 3rd
2920              * buffer but not in 2nd buffer
2921              */
2922             if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2923                 m->m_len = pkt_length[2];
2924                 pkt_len+=pkt_length[2];
2925             }
2926
2927             /*
2928              * Update length of newly created buffer to be sent up with packet
2929              * length
2930              */
2931             mbuf_up->m_pkthdr.len = pkt_len;
2932         }
2933         else {
2934             /*
2935              * Retrieve data of interest from the completed descriptor -- This
2936              * returns the packet length
2937              */
2938             xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2939
2940             /*
2941              * Update length of newly created buffer to be sent up with packet
2942              * length
2943              */
2944             mbuf_up->m_len =  mbuf_up->m_pkthdr.len = pkt_length[0];
2945         }
2946
2947         return XGE_HAL_OK;
2948 }
2949
2950 /**
2951  * xge_flush_txds
2952  * Flush Tx descriptors
2953  *
2954  * @channelh Channel handle
2955  */
2956 static void inline
2957 xge_flush_txds(xge_hal_channel_h channelh)
2958 {
2959         xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2960         xge_hal_dtr_h tx_dtr;
2961         xge_tx_priv_t *tx_priv;
2962         u8 t_code;
2963
2964         while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2965             == XGE_HAL_OK) {
2966             XGE_DRV_STATS(tx_desc_compl);
2967             if(t_code) {
2968                 xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2969                 XGE_DRV_STATS(tx_tcode);
2970                 xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2971             }
2972
2973             tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2974             bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2975             m_freem(tx_priv->buffer);
2976             tx_priv->buffer = NULL;
2977             xge_hal_fifo_dtr_free(channelh, tx_dtr);
2978         }
2979 }
2980
2981 /**
2982  * xge_send
2983  * Transmit function
2984  *
2985  * @ifnetp Interface Handle
2986  */
2987 void
2988 xge_send(struct ifnet *ifnetp)
2989 {
2990         int qindex = 0;
2991         xge_lldev_t *lldev = ifnetp->if_softc;
2992
2993         for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2994             if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2995                 XGE_DRV_STATS(tx_lock_fail);
2996                 break;
2997             }
2998             xge_send_locked(ifnetp, qindex);
2999             mtx_unlock(&lldev->mtx_tx[qindex]);
3000         }
3001 }
3002
3003 static void inline
3004 xge_send_locked(struct ifnet *ifnetp, int qindex)
3005 {
3006         xge_hal_dtr_h            dtr;
3007         static bus_dma_segment_t segs[XGE_MAX_SEGS];
3008         xge_hal_status_e         status;
3009         unsigned int             max_fragments;
3010         xge_lldev_t              *lldev          = ifnetp->if_softc;
3011         xge_hal_channel_h        channelh        = lldev->fifo_channel[qindex];
3012         mbuf_t                   m_head          = NULL;
3013         mbuf_t                   m_buf           = NULL;
3014         xge_tx_priv_t            *ll_tx_priv     = NULL;
3015         register unsigned int    count           = 0;
3016         unsigned int             nsegs           = 0;
3017         u16                      vlan_tag;
3018
3019         max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3020
3021         /* If device is not initialized, return */
3022         if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3023             return;
3024
3025         XGE_DRV_STATS(tx_calls);
3026
3027         /*
3028          * This loop will be executed for each packet in the kernel maintained
3029          * queue -- each packet can be with fragments as an mbuf chain
3030          */
3031         for(;;) {
3032             IF_DEQUEUE(&ifnetp->if_snd, m_head);
3033             if (m_head == NULL) {
3034                 ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3035                 return;
3036             }
3037
3038             for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3039                 if(m_buf->m_len) count += 1;
3040             }
3041
3042             if(count >= max_fragments) {
3043                 m_buf = m_defrag(m_head, M_NOWAIT);
3044                 if(m_buf != NULL) m_head = m_buf;
3045                 XGE_DRV_STATS(tx_defrag);
3046             }
3047
3048             /* Reserve descriptors */
3049             status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3050             if(status != XGE_HAL_OK) {
3051                 XGE_DRV_STATS(tx_no_txd);
3052                 xge_flush_txds(channelh);
3053                 break;
3054             }
3055
3056             vlan_tag =
3057                 (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3058             xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3059
3060             /* Update Tx private structure for this descriptor */
3061             ll_tx_priv         = xge_hal_fifo_dtr_private(dtr);
3062             ll_tx_priv->buffer = m_head;
3063
3064             /*
3065              * Do mapping -- Required DMA tag has been created in xge_init
3066              * function and DMA maps have already been created in the
3067              * xgell_tx_replenish function.
3068              * Returns number of segments through nsegs
3069              */
3070             if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3071                 ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3072                 xge_trace(XGE_TRACE, "DMA map load failed");
3073                 XGE_DRV_STATS(tx_map_fail);
3074                 break;
3075             }
3076
3077             if(lldev->driver_stats.tx_max_frags < nsegs)
3078                 lldev->driver_stats.tx_max_frags = nsegs;
3079
3080             /* Set descriptor buffer for header and each fragment/segment */
3081             count = 0;
3082             do {
3083                 xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3084                     (dma_addr_t)htole64(segs[count].ds_addr),
3085                     segs[count].ds_len);
3086                 count++;
3087             } while(count < nsegs);
3088
3089             /* Pre-write Sync of mapping */
3090             bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3091                 BUS_DMASYNC_PREWRITE);
3092
3093             if((lldev->enabled_tso) &&
3094                 (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3095                 XGE_DRV_STATS(tx_tso);
3096                 xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3097             }
3098
3099             /* Checksum */
3100             if(ifnetp->if_hwassist > 0) {
3101                 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3102                     | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3103             }
3104
3105             /* Post descriptor to FIFO channel */
3106             xge_hal_fifo_dtr_post(channelh, dtr);
3107             XGE_DRV_STATS(tx_posted);
3108
3109             /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3110              * listener so that we can use tools like tcpdump */
3111             ETHER_BPF_MTAP(ifnetp, m_head);
3112         }
3113
3114         /* Prepend the packet back to queue */
3115         IF_PREPEND(&ifnetp->if_snd, m_head);
3116         ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3117
3118         xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3119             XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3120         XGE_DRV_STATS(tx_again);
3121 }
3122
3123 /**
3124  * xge_get_buf
3125  * Allocates new mbufs to be placed into descriptors
3126  *
3127  * @dtrh Descriptor Handle
3128  * @rxd_priv Rx Descriptor Private Data
3129  * @lldev Per-adapter Data
3130  * @index Buffer Index (if multi-buffer mode)
3131  *
3132  * Returns XGE_HAL_OK or HAL error enums
3133  */
3134 int
3135 xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3136         xge_lldev_t *lldev, int index)
3137 {
3138         register mbuf_t mp            = NULL;
3139         struct          ifnet *ifnetp = lldev->ifnetp;
3140         int             status        = XGE_HAL_OK;
3141         int             buffer_size = 0, cluster_size = 0, count;
3142         bus_dmamap_t    map = rxd_priv->dmainfo[index].dma_map;
3143         bus_dma_segment_t segs[3];
3144
3145         buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3146             ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3147             lldev->rxd_mbuf_len[index];
3148
3149         if(buffer_size <= MCLBYTES) {
3150             cluster_size = MCLBYTES;
3151             mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3152         }
3153         else {
3154             cluster_size = MJUMPAGESIZE;
3155             if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3156                 (buffer_size > MJUMPAGESIZE)) {
3157                 cluster_size = MJUM9BYTES;
3158             }
3159             mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, cluster_size);
3160         }
3161         if(!mp) {
3162             xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3163             status = XGE_HAL_FAIL;
3164             goto getbuf_out;
3165         }
3166
3167         /* Update mbuf's length, packet length and receive interface */
3168         mp->m_len = mp->m_pkthdr.len = buffer_size;
3169         mp->m_pkthdr.rcvif = ifnetp;
3170
3171         /* Load DMA map */
3172         if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3173             mp, segs, &count, BUS_DMA_NOWAIT)) {
3174             XGE_DRV_STATS(rx_map_fail);
3175             m_freem(mp);
3176             XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3177         }
3178
3179         /* Update descriptor private data */
3180         rxd_priv->bufferArray[index]         = mp;
3181         rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3182         rxd_priv->dmainfo[index].dma_map     = lldev->extra_dma_map;
3183         lldev->extra_dma_map = map;
3184
3185         /* Pre-Read/Write sync */
3186         bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3187
3188         /* Unload DMA map of mbuf in current descriptor */
3189         bus_dmamap_unload(lldev->dma_tag_rx, map);
3190
3191         /* Set descriptor buffer */
3192         if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3193             xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3194                 cluster_size);
3195         }
3196
3197 getbuf_out:
3198         return status;
3199 }
3200
3201 /**
3202  * xge_get_buf_3b_5b
3203  * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3204  *
3205  * @dtrh Descriptor Handle
3206  * @rxd_priv Rx Descriptor Private Data
3207  * @lldev Per-adapter Data
3208  *
3209  * Returns XGE_HAL_OK or HAL error enums
3210  */
3211 int
3212 xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3213         xge_lldev_t *lldev)
3214 {
3215         bus_addr_t  dma_pointers[5];
3216         int         dma_sizes[5];
3217         int         status = XGE_HAL_OK, index;
3218         int         newindex = 0;
3219
3220         for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3221             status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3222             if(status != XGE_HAL_OK) {
3223                 for(newindex = 0; newindex < index; newindex++) {
3224                     m_freem(rxd_priv->bufferArray[newindex]);
3225                 }
3226                 XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3227             }
3228         }
3229
3230         for(index = 0; index < lldev->buffer_mode; index++) {
3231             if(lldev->rxd_mbuf_len[index] != 0) {
3232                 dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3233                 dma_sizes[index]    = lldev->rxd_mbuf_len[index];
3234             }
3235             else {
3236                 dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3237                 dma_sizes[index]    = 1;
3238             }
3239         }
3240
3241         /* Assigning second buffer to third pointer in 2 buffer mode */
3242         if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3243             dma_pointers[2] = dma_pointers[1];
3244             dma_sizes[2]    = dma_sizes[1];
3245             dma_sizes[1]    = 1;
3246         }
3247
3248         if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3249             xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3250         }
3251         else {
3252             xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3253         }
3254
3255 _exit:
3256         return status;
3257 }
3258
3259 /**
3260  * xge_tx_compl
3261  * If the interrupt is due to Tx completion, free the sent buffer
3262  *
3263  * @channelh Channel Handle
3264  * @dtr Descriptor
3265  * @t_code Transfer Code indicating success or error
3266  * @userdata Per-adapter Data
3267  *
3268  * Returns XGE_HAL_OK or HAL error enum
3269  */
3270 xge_hal_status_e
3271 xge_tx_compl(xge_hal_channel_h channelh,
3272         xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3273 {
3274         xge_tx_priv_t *ll_tx_priv = NULL;
3275         xge_lldev_t   *lldev  = (xge_lldev_t *)userdata;
3276         struct ifnet  *ifnetp = lldev->ifnetp;
3277         mbuf_t         m_buffer = NULL;
3278         int            qindex   = xge_hal_channel_id(channelh);
3279
3280         mtx_lock(&lldev->mtx_tx[qindex]);
3281
3282         XGE_DRV_STATS(tx_completions);
3283
3284         /*
3285          * For each completed descriptor: Get private structure, free buffer,
3286          * do unmapping, and free descriptor
3287          */
3288         do {
3289             XGE_DRV_STATS(tx_desc_compl);
3290
3291             if(t_code) {
3292                 XGE_DRV_STATS(tx_tcode);
3293                 xge_trace(XGE_TRACE, "t_code %d", t_code);
3294                 xge_hal_device_handle_tcode(channelh, dtr, t_code);
3295             }
3296
3297             ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3298             m_buffer   = ll_tx_priv->buffer;
3299             bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3300             m_freem(m_buffer);
3301             ll_tx_priv->buffer = NULL;
3302             xge_hal_fifo_dtr_free(channelh, dtr);
3303         } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3304             == XGE_HAL_OK);
3305         xge_send_locked(ifnetp, qindex);
3306         ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3307
3308         mtx_unlock(&lldev->mtx_tx[qindex]);
3309
3310         return XGE_HAL_OK;
3311 }
3312
3313 /**
3314  * xge_tx_initial_replenish
3315  * Initially allocate buffers and set them into descriptors for later use
3316  *
3317  * @channelh Tx Channel Handle
3318  * @dtrh Descriptor Handle
3319  * @index
3320  * @userdata Per-adapter Data
3321  * @reopen Channel open/reopen option
3322  *
3323  * Returns XGE_HAL_OK or HAL error enums
3324  */
3325 xge_hal_status_e
3326 xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3327         int index, void *userdata, xge_hal_channel_reopen_e reopen)
3328 {
3329         xge_tx_priv_t *txd_priv = NULL;
3330         int            status   = XGE_HAL_OK;
3331
3332         /* Get the user data portion from channel handle */
3333         xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3334         if(lldev == NULL) {
3335             XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3336                 XGE_HAL_FAIL);
3337         }
3338
3339         /* Get the private data */
3340         txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3341         if(txd_priv == NULL) {
3342             XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3343                 XGE_HAL_FAIL);
3344         }
3345
3346         /* Create DMA map for this descriptor */
3347         if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3348             &txd_priv->dma_map)) {
3349             XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3350                 txinit_out, XGE_HAL_FAIL);
3351         }
3352
3353 txinit_out:
3354         return status;
3355 }
3356
3357 /**
3358  * xge_rx_initial_replenish
3359  * Initially allocate buffers and set them into descriptors for later use
3360  *
3361  * @channelh Tx Channel Handle
3362  * @dtrh Descriptor Handle
3363  * @index Ring Index
3364  * @userdata Per-adapter Data
3365  * @reopen Channel open/reopen option
3366  *
3367  * Returns XGE_HAL_OK or HAL error enums
3368  */
3369 xge_hal_status_e
3370 xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3371         int index, void *userdata, xge_hal_channel_reopen_e reopen)
3372 {
3373         xge_rx_priv_t  *rxd_priv = NULL;
3374         int             status   = XGE_HAL_OK;
3375         int             index1 = 0, index2 = 0;
3376
3377         /* Get the user data portion from channel handle */
3378         xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3379         if(lldev == NULL) {
3380             XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3381                 XGE_HAL_FAIL);
3382         }
3383
3384         /* Get the private data */
3385         rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3386         if(rxd_priv == NULL) {
3387             XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3388                 XGE_HAL_FAIL);
3389         }
3390
3391         rxd_priv->bufferArray = xge_os_malloc(NULL,
3392                 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3393
3394         if(rxd_priv->bufferArray == NULL) {
3395             XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3396                 XGE_HAL_FAIL);
3397         }
3398
3399         if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3400             /* Create DMA map for these descriptors*/
3401             if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3402                 &rxd_priv->dmainfo[0].dma_map)) {
3403                 XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3404                     rxinit_err_out, XGE_HAL_FAIL);
3405             }
3406             /* Get a buffer, attach it to this descriptor */
3407             status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3408         }
3409         else {
3410             for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3411                 /* Create DMA map for this descriptor */
3412                 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3413                     &rxd_priv->dmainfo[index1].dma_map)) {
3414                     for(index2 = index1 - 1; index2 >= 0; index2--) {
3415                         bus_dmamap_destroy(lldev->dma_tag_rx,
3416                             rxd_priv->dmainfo[index2].dma_map);
3417                     }
3418                     XGE_EXIT_ON_ERR(
3419                         "Jumbo DMA map creation for Rx descriptor failed",
3420                         rxinit_err_out, XGE_HAL_FAIL);
3421                 }
3422             }
3423             status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3424         }
3425
3426         if(status != XGE_HAL_OK) {
3427             for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3428                 bus_dmamap_destroy(lldev->dma_tag_rx,
3429                     rxd_priv->dmainfo[index1].dma_map);
3430             }
3431             goto rxinit_err_out;
3432         }
3433         else {
3434             goto rxinit_out;
3435         }
3436
3437 rxinit_err_out:
3438         xge_os_free(NULL, rxd_priv->bufferArray,
3439             (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3440 rxinit_out:
3441         return status;
3442 }
3443
3444 /**
3445  * xge_rx_term
3446  * During unload terminate and free all descriptors
3447  *
3448  * @channelh Rx Channel Handle
3449  * @dtrh Rx Descriptor Handle
3450  * @state Descriptor State
3451  * @userdata Per-adapter Data
3452  * @reopen Channel open/reopen option
3453  */
3454 void
3455 xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3456         xge_hal_dtr_state_e state, void *userdata,
3457         xge_hal_channel_reopen_e reopen)
3458 {
3459         xge_rx_priv_t *rxd_priv = NULL;
3460         xge_lldev_t   *lldev    = NULL;
3461         int            index = 0;
3462
3463         /* Descriptor state is not "Posted" */
3464         if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3465
3466         /* Get the user data portion */
3467         lldev = xge_hal_channel_userdata(channelh);
3468
3469         /* Get the private data */
3470         rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3471
3472         for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3473             if(rxd_priv->dmainfo[index].dma_map != NULL) {
3474                 bus_dmamap_sync(lldev->dma_tag_rx,
3475                     rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3476                 bus_dmamap_unload(lldev->dma_tag_rx,
3477                     rxd_priv->dmainfo[index].dma_map);
3478                 if(rxd_priv->bufferArray[index] != NULL)
3479                     m_free(rxd_priv->bufferArray[index]);
3480                 bus_dmamap_destroy(lldev->dma_tag_rx,
3481                     rxd_priv->dmainfo[index].dma_map);
3482             }
3483         }
3484         xge_os_free(NULL, rxd_priv->bufferArray,
3485             (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3486
3487         /* Free the descriptor */
3488         xge_hal_ring_dtr_free(channelh, dtrh);
3489
3490 rxterm_out:
3491         return;
3492 }
3493
3494 /**
3495  * xge_tx_term
3496  * During unload terminate and free all descriptors
3497  *
3498  * @channelh Rx Channel Handle
3499  * @dtrh Rx Descriptor Handle
3500  * @state Descriptor State
3501  * @userdata Per-adapter Data
3502  * @reopen Channel open/reopen option
3503  */
3504 void
3505 xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3506         xge_hal_dtr_state_e state, void *userdata,
3507         xge_hal_channel_reopen_e reopen)
3508 {
3509         xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3510         xge_lldev_t   *lldev      = (xge_lldev_t *)userdata;
3511
3512         /* Destroy DMA map */
3513         bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3514 }
3515
3516 /**
3517  * xge_methods
3518  *
3519  * FreeBSD device interface entry points
3520  */
3521 static device_method_t xge_methods[] = {
3522         DEVMETHOD(device_probe,     xge_probe),
3523         DEVMETHOD(device_attach,    xge_attach),
3524         DEVMETHOD(device_detach,    xge_detach),
3525         DEVMETHOD(device_shutdown,  xge_shutdown),
3526
3527         DEVMETHOD_END
3528 };
3529
3530 static driver_t xge_driver = {
3531         "nxge",
3532         xge_methods,
3533         sizeof(xge_lldev_t),
3534 };
3535 static devclass_t xge_devclass;
3536 DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);
3537