2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002-2007 Neterion, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <dev/nxge/if_nxge.h>
32 #include <dev/nxge/xge-osdep.h>
33 #include <net/if_arp.h>
34 #include <sys/types.h>
36 #include <net/if_var.h>
37 #include <net/if_vlan_var.h>
39 int copyright_print = 0;
40 int hal_driver_init_count = 0;
41 size_t size = sizeof(int);
43 static void inline xge_flush_txds(xge_hal_channel_h);
47 * Probes for Xframe devices
52 * BUS_PROBE_DEFAULT if device is supported
53 * ENXIO if device is not supported
56 xge_probe(device_t dev)
58 int devid = pci_get_device(dev);
59 int vendorid = pci_get_vendor(dev);
62 if(vendorid == XGE_PCI_VENDOR_ID) {
63 if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
64 (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
65 if(!copyright_print) {
66 xge_os_printf(XGE_COPYRIGHT);
69 device_set_desc_copy(dev,
70 "Neterion Xframe 10 Gigabit Ethernet Adapter");
71 retValue = BUS_PROBE_DEFAULT;
80 * Sets HAL parameter values (from kenv).
82 * @dconfig Device Configuration
86 xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
88 int qindex, tindex, revision;
90 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
92 dconfig->mtu = XGE_DEFAULT_INITIAL_MTU;
93 dconfig->pci_freq_mherz = XGE_DEFAULT_USER_HARDCODED;
94 dconfig->device_poll_millis = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
95 dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
96 dconfig->mac.rmac_bcast_en = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
97 dconfig->fifo.alignment_size = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
99 XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
100 XGE_DEFAULT_ENABLED_TSO);
101 XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
102 XGE_DEFAULT_ENABLED_LRO);
103 XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
104 XGE_DEFAULT_ENABLED_MSI);
106 XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
107 XGE_DEFAULT_LATENCY_TIMER);
108 XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
109 XGE_DEFAULT_MAX_SPLITS_TRANS);
110 XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
111 XGE_DEFAULT_MMRB_COUNT);
112 XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
113 XGE_DEFAULT_SHARED_SPLITS);
114 XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
115 XGE_DEFAULT_ISR_POLLING_CNT);
116 XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
117 stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
119 XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
120 XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
121 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
122 XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
123 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
124 XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
125 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
126 XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
127 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
128 XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
129 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
130 mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
131 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
132 mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
134 XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
135 XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
136 XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
137 XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
138 XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
139 XGE_DEFAULT_FIFO_MAX_FRAGS);
141 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
142 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
143 XGE_DEFAULT_FIFO_QUEUE_INTR);
144 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
145 XGE_DEFAULT_FIFO_QUEUE_MAX);
146 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
147 qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
149 for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
150 dconfig->fifo.queue[qindex].tti[tindex].enabled = 1;
151 dconfig->fifo.queue[qindex].configured = 1;
153 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
154 urange_a, qindex, tindex,
155 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
156 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
157 urange_b, qindex, tindex,
158 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
159 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
160 urange_c, qindex, tindex,
161 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
162 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
163 ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
164 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
165 ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
166 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
167 ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
168 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
169 ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
170 XGE_GET_PARAM_FIFO_QUEUE_TTI(
171 "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
172 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
173 XGE_GET_PARAM_FIFO_QUEUE_TTI(
174 "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
175 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
176 XGE_GET_PARAM_FIFO_QUEUE_TTI(
177 "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
178 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
182 XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
183 XGE_DEFAULT_RING_MEMBLOCK_SIZE);
185 XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
186 XGE_DEFAULT_RING_STRIP_VLAN_TAG);
188 XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
189 XGE_DEFAULT_BUFFER_MODE);
190 if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
191 (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
192 xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
193 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
196 for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
197 dconfig->ring.queue[qindex].max_frm_len = XGE_HAL_RING_USE_MTU;
198 dconfig->ring.queue[qindex].priority = 0;
199 dconfig->ring.queue[qindex].configured = 1;
200 dconfig->ring.queue[qindex].buffer_mode =
201 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
202 XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
204 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
205 XGE_DEFAULT_RING_QUEUE_MAX);
206 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
207 qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
208 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
209 dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
210 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
211 indicate_max_pkts, qindex,
212 XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
213 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
214 backoff_interval_us, qindex,
215 XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
217 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
218 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
219 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
220 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
221 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
222 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
223 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
224 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
225 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
226 timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
227 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
228 timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
229 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
230 urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
231 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
232 urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
233 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
234 urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
237 if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
238 xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
239 xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
240 (int)(PAGE_SIZE / 32))
241 xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
242 dconfig->fifo.max_frags = (PAGE_SIZE / 32);
245 checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
246 if(checkdev != NULL) {
247 /* Check Revision for 0x12 */
248 revision = pci_read_config(checkdev,
249 xge_offsetof(xge_hal_pci_config_t, revision), 1);
250 if(revision <= 0x12) {
251 /* Set mmrb_count to 1k and max splits = 2 */
252 dconfig->mmrb_count = 1;
253 dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
259 * xge_buffer_sizes_set
260 * Set buffer sizes based on Rx buffer mode
262 * @lldev Per-adapter Data
263 * @buffer_mode Rx Buffer Mode
266 xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
269 int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
270 int buffer_size = mtu + frame_header;
272 xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
274 if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
275 lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
277 lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
279 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
280 lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
282 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
284 buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
285 while(buffer_size > MJUMPAGESIZE) {
286 lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
287 buffer_size -= MJUMPAGESIZE;
289 XGE_ALIGN_TO(buffer_size, 128);
290 lldev->rxd_mbuf_len[index] = buffer_size;
291 lldev->rxd_mbuf_cnt = index + 1;
294 for(index = 0; index < buffer_mode; index++)
295 xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
296 lldev->rxd_mbuf_len[index]);
300 * xge_buffer_mode_init
301 * Init Rx buffer mode
303 * @lldev Per-adapter Data
307 xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
309 int index = 0, buffer_size = 0;
310 xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
312 buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
314 if(lldev->enabled_lro)
315 (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
317 (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
319 lldev->rxd_mbuf_cnt = lldev->buffer_mode;
320 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
321 XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
322 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
325 XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
326 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
328 xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
330 xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
331 ((lldev->enabled_tso) ? "Enabled":"Disabled"));
332 xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
333 ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
334 xge_os_printf("%s: Rx %d Buffer Mode Enabled",
335 device_get_nameunit(lldev->device), lldev->buffer_mode);
339 * xge_driver_initialize
340 * Initializes HAL driver (common for all devices)
343 * XGE_HAL_OK if success
344 * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
347 xge_driver_initialize(void)
349 xge_hal_uld_cbs_t uld_callbacks;
350 xge_hal_driver_config_t driver_config;
351 xge_hal_status_e status = XGE_HAL_OK;
353 /* Initialize HAL driver */
354 if(!hal_driver_init_count) {
355 xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
356 xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
359 * Initial and maximum size of the queue used to store the events
360 * like Link up/down (xge_hal_event_e)
362 driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
363 driver_config.queue_size_max = XGE_HAL_MAX_QUEUE_SIZE_MAX;
365 uld_callbacks.link_up = xge_callback_link_up;
366 uld_callbacks.link_down = xge_callback_link_down;
367 uld_callbacks.crit_err = xge_callback_crit_err;
368 uld_callbacks.event = xge_callback_event;
370 status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
371 if(status != XGE_HAL_OK) {
372 XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
376 hal_driver_init_count = hal_driver_init_count + 1;
378 xge_hal_driver_debug_module_mask_set(0xffffffff);
379 xge_hal_driver_debug_level_set(XGE_TRACE);
387 * Initializes, adds and sets media
389 * @devc Device Handle
392 xge_media_init(device_t devc)
394 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
396 /* Initialize Media */
397 ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
400 /* Add supported media */
401 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
402 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
403 ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO, 0, NULL);
404 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
405 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
408 ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
413 * Save PCI configuration space
418 xge_pci_space_save(device_t dev)
420 struct pci_devinfo *dinfo = NULL;
422 dinfo = device_get_ivars(dev);
423 xge_trace(XGE_TRACE, "Saving PCI configuration space");
424 pci_cfg_save(dev, dinfo, 0);
428 * xge_pci_space_restore
429 * Restore saved PCI configuration space
434 xge_pci_space_restore(device_t dev)
436 struct pci_devinfo *dinfo = NULL;
438 dinfo = device_get_ivars(dev);
439 xge_trace(XGE_TRACE, "Restoring PCI configuration space");
440 pci_cfg_restore(dev, dinfo);
447 * @lldev Per-adapter Data
450 xge_msi_info_save(xge_lldev_t * lldev)
452 xge_os_pci_read16(lldev->pdev, NULL,
453 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
454 &lldev->msi_info.msi_control);
455 xge_os_pci_read32(lldev->pdev, NULL,
456 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
457 &lldev->msi_info.msi_lower_address);
458 xge_os_pci_read32(lldev->pdev, NULL,
459 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
460 &lldev->msi_info.msi_higher_address);
461 xge_os_pci_read16(lldev->pdev, NULL,
462 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
463 &lldev->msi_info.msi_data);
467 * xge_msi_info_restore
468 * Restore saved MSI info
473 xge_msi_info_restore(xge_lldev_t *lldev)
476 * If interface is made down and up, traffic fails. It was observed that
477 * MSI information were getting reset on down. Restoring them.
479 xge_os_pci_write16(lldev->pdev, NULL,
480 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
481 lldev->msi_info.msi_control);
483 xge_os_pci_write32(lldev->pdev, NULL,
484 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
485 lldev->msi_info.msi_lower_address);
487 xge_os_pci_write32(lldev->pdev, NULL,
488 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
489 lldev->msi_info.msi_higher_address);
491 xge_os_pci_write16(lldev->pdev, NULL,
492 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
493 lldev->msi_info.msi_data);
498 * Initializes mutexes used in driver
500 * @lldev Per-adapter Data
503 xge_mutex_init(xge_lldev_t *lldev)
507 sprintf(lldev->mtx_name_drv, "%s_drv",
508 device_get_nameunit(lldev->device));
509 mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
512 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
513 sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
514 device_get_nameunit(lldev->device), qindex);
515 mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
522 * Destroys mutexes used in driver
524 * @lldev Per-adapter Data
527 xge_mutex_destroy(xge_lldev_t *lldev)
531 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
532 mtx_destroy(&lldev->mtx_tx[qindex]);
533 mtx_destroy(&lldev->mtx_drv);
538 * Print device and driver information
540 * @lldev Per-adapter Data
543 xge_print_info(xge_lldev_t *lldev)
545 device_t dev = lldev->device;
546 xge_hal_device_t *hldev = lldev->devh;
547 xge_hal_status_e status = XGE_HAL_OK;
549 const char *xge_pci_bus_speeds[17] = {
552 "PCIX(M1) 66MHz Bus",
553 "PCIX(M1) 100MHz Bus",
554 "PCIX(M1) 133MHz Bus",
555 "PCIX(M2) 133MHz Bus",
556 "PCIX(M2) 200MHz Bus",
557 "PCIX(M2) 266MHz Bus",
559 "PCIX(M1) 66MHz Bus (Not Supported)",
560 "PCIX(M1) 100MHz Bus (Not Supported)",
561 "PCIX(M1) 133MHz Bus (Not Supported)",
569 xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
570 device_get_nameunit(dev),
571 ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
572 hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
573 xge_os_printf("%s: Serial Number %s",
574 device_get_nameunit(dev), hldev->vpd_data.serial_num);
576 if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
577 status = xge_hal_mgmt_reg_read(hldev, 0,
578 xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
579 if(status != XGE_HAL_OK)
580 xge_trace(XGE_ERR, "Error for getting bus speed");
582 xge_os_printf("%s: Adapter is on %s bit %s",
583 device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
584 (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
587 xge_os_printf("%s: Using %s Interrupts",
588 device_get_nameunit(dev),
589 (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
593 * xge_create_dma_tags
594 * Creates DMA tags for both Tx and Rx
598 * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
601 xge_create_dma_tags(device_t dev)
603 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
604 xge_hal_status_e status = XGE_HAL_FAIL;
605 int mtu = (lldev->ifnetp)->if_mtu, maxsize;
608 status = bus_dma_tag_create(
609 bus_get_dma_tag(dev), /* Parent */
610 PAGE_SIZE, /* Alignment */
612 BUS_SPACE_MAXADDR, /* Low Address */
613 BUS_SPACE_MAXADDR, /* High Address */
614 NULL, /* Filter Function */
615 NULL, /* Filter Function Arguments */
616 MCLBYTES * XGE_MAX_SEGS, /* Maximum Size */
617 XGE_MAX_SEGS, /* Number of Segments */
618 MCLBYTES, /* Maximum Segment Size */
619 BUS_DMA_ALLOCNOW, /* Flags */
620 NULL, /* Lock Function */
621 NULL, /* Lock Function Arguments */
622 (&lldev->dma_tag_tx)); /* DMA Tag */
626 maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
627 if(maxsize <= MCLBYTES) {
631 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
632 maxsize = MJUMPAGESIZE;
634 maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
638 status = bus_dma_tag_create(
639 bus_get_dma_tag(dev), /* Parent */
640 PAGE_SIZE, /* Alignment */
642 BUS_SPACE_MAXADDR, /* Low Address */
643 BUS_SPACE_MAXADDR, /* High Address */
644 NULL, /* Filter Function */
645 NULL, /* Filter Function Arguments */
646 maxsize, /* Maximum Size */
647 1, /* Number of Segments */
648 maxsize, /* Maximum Segment Size */
649 BUS_DMA_ALLOCNOW, /* Flags */
650 NULL, /* Lock Function */
651 NULL, /* Lock Function Arguments */
652 (&lldev->dma_tag_rx)); /* DMA Tag */
656 status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
657 &lldev->extra_dma_map);
665 status = bus_dma_tag_destroy(lldev->dma_tag_rx);
667 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
669 status = bus_dma_tag_destroy(lldev->dma_tag_tx);
671 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
672 status = XGE_HAL_FAIL;
678 * xge_confirm_changes
679 * Disables and Enables interface to apply requested change
681 * @lldev Per-adapter Data
682 * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
684 * Returns 0 or Error Number
687 xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
689 if(lldev->initialized == 0) goto _exit1;
691 mtx_lock(&lldev->mtx_drv);
692 if_down(lldev->ifnetp);
693 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
695 if(option == XGE_SET_MTU)
696 (lldev->ifnetp)->if_mtu = lldev->mtu;
698 xge_buffer_mode_init(lldev, lldev->mtu);
700 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
701 if_up(lldev->ifnetp);
702 mtx_unlock(&lldev->mtx_drv);
706 /* Request was to change MTU and device not initialized */
707 if(option == XGE_SET_MTU) {
708 (lldev->ifnetp)->if_mtu = lldev->mtu;
709 xge_buffer_mode_init(lldev, lldev->mtu);
716 * xge_change_lro_status
717 * Enable/Disable LRO feature
719 * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
721 * Returns 0 or error number.
724 xge_change_lro_status(SYSCTL_HANDLER_ARGS)
726 xge_lldev_t *lldev = (xge_lldev_t *)arg1;
727 int request = lldev->enabled_lro, status = XGE_HAL_OK;
729 status = sysctl_handle_int(oidp, &request, arg2, req);
730 if((status != XGE_HAL_OK) || (!req->newptr))
733 if((request < 0) || (request > 1)) {
738 /* Return if current and requested states are same */
739 if(request == lldev->enabled_lro){
740 xge_trace(XGE_ERR, "LRO is already %s",
741 ((request) ? "enabled" : "disabled"));
745 lldev->enabled_lro = request;
746 xge_confirm_changes(lldev, XGE_CHANGE_LRO);
747 arg2 = lldev->enabled_lro;
754 * xge_add_sysctl_handlers
755 * Registers sysctl parameter value update handlers
757 * @lldev Per-adapter data
760 xge_add_sysctl_handlers(xge_lldev_t *lldev)
762 struct sysctl_ctx_list *context_list =
763 device_get_sysctl_ctx(lldev->device);
764 struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
766 SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
767 "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
768 xge_change_lro_status, "I", "Enable or disable LRO feature");
773 * Connects driver to the system if probe was success
778 xge_attach(device_t dev)
780 xge_hal_device_config_t *device_config;
781 xge_hal_device_attr_t attr;
783 xge_hal_device_t *hldev;
784 xge_pci_info_t *pci_info;
785 struct ifnet *ifnetp;
786 int rid, rid0, rid1, error;
787 int msi_count = 0, status = XGE_HAL_OK;
788 int enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
790 device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
792 XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
793 attach_out_config, ENOMEM);
796 lldev = (xge_lldev_t *) device_get_softc(dev);
798 XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
802 xge_mutex_init(lldev);
804 error = xge_driver_initialize();
805 if(error != XGE_HAL_OK) {
806 xge_resources_free(dev, xge_free_mutex);
807 XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
812 (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
814 xge_resources_free(dev, xge_free_terminate_hal_driver);
815 XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
820 /* Our private structure */
822 (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
824 xge_resources_free(dev, xge_free_hal_device);
825 XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
828 lldev->pdev = pci_info;
829 pci_info->device = dev;
832 pci_enable_busmaster(dev);
834 /* Get virtual address for BAR0 */
836 pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
838 if(pci_info->regmap0 == NULL) {
839 xge_resources_free(dev, xge_free_pci_info);
840 XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
843 attr.bar0 = (char *)pci_info->regmap0;
845 pci_info->bar0resource = (xge_bus_resource_t*)
846 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
847 if(pci_info->bar0resource == NULL) {
848 xge_resources_free(dev, xge_free_bar0);
849 XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
852 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
853 rman_get_bustag(pci_info->regmap0);
854 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
855 rman_get_bushandle(pci_info->regmap0);
856 ((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
859 /* Get virtual address for BAR1 */
861 pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
863 if(pci_info->regmap1 == NULL) {
864 xge_resources_free(dev, xge_free_bar0_resource);
865 XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
868 attr.bar1 = (char *)pci_info->regmap1;
870 pci_info->bar1resource = (xge_bus_resource_t*)
871 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
872 if(pci_info->bar1resource == NULL) {
873 xge_resources_free(dev, xge_free_bar1);
874 XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
877 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
878 rman_get_bustag(pci_info->regmap1);
879 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
880 rman_get_bushandle(pci_info->regmap1);
881 ((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
884 /* Save PCI config space */
885 xge_pci_space_save(dev);
887 attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
888 attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
889 attr.irqh = lldev->irqhandle;
890 attr.cfgh = pci_info;
891 attr.pdev = pci_info;
893 /* Initialize device configuration parameters */
894 xge_init_params(device_config, dev);
897 if(lldev->enabled_msi) {
898 /* Number of MSI messages supported by device */
899 msi_count = pci_msi_count(dev);
901 /* Device supports MSI */
903 xge_trace(XGE_ERR, "MSI count: %d", msi_count);
904 xge_trace(XGE_ERR, "Now, driver supporting 1 message");
907 error = pci_alloc_msi(dev, &msi_count);
910 xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
911 enable_msi = XGE_HAL_INTR_MODE_MSI;
916 xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
920 lldev->enabled_msi = enable_msi;
922 /* Allocate resource for irq */
923 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
924 (RF_SHAREABLE | RF_ACTIVE));
925 if(lldev->irq == NULL) {
926 xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
927 ((rid == 0) ? "line interrupt" : "MSI"));
929 error = pci_release_msi(dev);
931 xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
933 xge_trace(XGE_ERR, "Requires reboot to use MSI again");
935 xge_trace(XGE_ERR, "Trying line interrupts");
937 lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
938 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
939 (RF_SHAREABLE | RF_ACTIVE));
941 if(lldev->irq == NULL) {
942 xge_trace(XGE_ERR, "Allocating irq resource failed");
943 xge_resources_free(dev, xge_free_bar1_resource);
949 device_config->intr_mode = lldev->enabled_msi;
951 xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
952 lldev->enabled_msi, msi_count);
955 /* Initialize HAL device */
956 error = xge_hal_device_initialize(hldev, &attr, device_config);
957 if(error != XGE_HAL_OK) {
958 xge_resources_free(dev, xge_free_irq_resource);
959 XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
963 xge_hal_device_private_set(hldev, lldev);
965 error = xge_interface_setup(dev);
971 ifnetp = lldev->ifnetp;
972 ifnetp->if_mtu = device_config->mtu;
976 /* Associate interrupt handler with the device */
977 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
978 error = bus_setup_intr(dev, lldev->irq,
979 (INTR_TYPE_NET | INTR_MPSAFE),
980 #if __FreeBSD_version > 700030
983 xge_isr_msi, lldev, &lldev->irqhandle);
984 xge_msi_info_save(lldev);
987 error = bus_setup_intr(dev, lldev->irq,
988 (INTR_TYPE_NET | INTR_MPSAFE),
989 #if __FreeBSD_version > 700030
992 xge_isr_line, lldev, &lldev->irqhandle);
995 xge_resources_free(dev, xge_free_media_interface);
996 XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
1000 xge_print_info(lldev);
1002 xge_add_sysctl_handlers(lldev);
1004 xge_buffer_mode_init(lldev, device_config->mtu);
1007 xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1013 * xge_resources_free
1014 * Undo what-all we did during load/attach
1016 * @dev Device Handle
1017 * @error Identifies what-all to undo
1020 xge_resources_free(device_t dev, xge_lables_e error)
1023 xge_pci_info_t *pci_info;
1024 xge_hal_device_t *hldev;
1028 lldev = (xge_lldev_t *) device_get_softc(dev);
1029 pci_info = lldev->pdev;
1032 hldev = lldev->devh;
1036 /* Teardown interrupt handler - device association */
1037 bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1039 case xge_free_media_interface:
1041 ifmedia_removeall(&lldev->media);
1044 ether_ifdetach(lldev->ifnetp);
1045 if_free(lldev->ifnetp);
1047 xge_hal_device_private_set(hldev, NULL);
1048 xge_hal_device_disable(hldev);
1050 case xge_free_terminate_hal_device:
1052 xge_hal_device_terminate(hldev);
1054 case xge_free_irq_resource:
1055 /* Release IRQ resource */
1056 bus_release_resource(dev, SYS_RES_IRQ,
1057 ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1060 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1061 status = pci_release_msi(dev);
1065 "pci_release_msi returned %d", status);
1070 case xge_free_bar1_resource:
1071 /* Restore PCI configuration space */
1072 xge_pci_space_restore(dev);
1074 /* Free bar1resource */
1075 xge_os_free(NULL, pci_info->bar1resource,
1076 sizeof(xge_bus_resource_t));
1081 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1084 case xge_free_bar0_resource:
1085 /* Free bar0resource */
1086 xge_os_free(NULL, pci_info->bar0resource,
1087 sizeof(xge_bus_resource_t));
1092 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1095 case xge_free_pci_info:
1096 /* Disable Bus Master */
1097 pci_disable_busmaster(dev);
1099 /* Free pci_info_t */
1101 xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1103 case xge_free_hal_device:
1104 /* Free device configuration struct and HAL device */
1105 xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1107 case xge_free_terminate_hal_driver:
1108 /* Terminate HAL driver */
1109 hal_driver_init_count = hal_driver_init_count - 1;
1110 if(!hal_driver_init_count) {
1111 xge_hal_driver_terminate();
1114 case xge_free_mutex:
1115 xge_mutex_destroy(lldev);
1121 * Detaches driver from the Kernel subsystem
1123 * @dev Device Handle
1126 xge_detach(device_t dev)
1128 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1130 if(lldev->in_detach == 0) {
1131 lldev->in_detach = 1;
1133 xge_resources_free(dev, xge_free_all);
1141 * To shutdown device before system shutdown
1143 * @dev Device Handle
1146 xge_shutdown(device_t dev)
1148 xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1155 * xge_interface_setup
1158 * @dev Device Handle
1160 * Returns 0 on success, ENXIO/ENOMEM on failure
1163 xge_interface_setup(device_t dev)
1165 u8 mcaddr[ETHER_ADDR_LEN];
1166 xge_hal_status_e status;
1167 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1168 struct ifnet *ifnetp;
1169 xge_hal_device_t *hldev = lldev->devh;
1171 /* Get the MAC address of the device */
1172 status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1173 if(status != XGE_HAL_OK) {
1174 xge_resources_free(dev, xge_free_terminate_hal_device);
1175 XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1178 /* Get interface ifnet structure for this Ether device */
1179 ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1180 if(ifnetp == NULL) {
1181 xge_resources_free(dev, xge_free_terminate_hal_device);
1182 XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1185 /* Initialize interface ifnet structure */
1186 if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1187 ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU;
1188 ifnetp->if_baudrate = XGE_BAUDRATE;
1189 ifnetp->if_init = xge_init;
1190 ifnetp->if_softc = lldev;
1191 ifnetp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1192 ifnetp->if_ioctl = xge_ioctl;
1193 ifnetp->if_start = xge_send;
1195 /* TODO: Check and assign optimal value */
1196 ifnetp->if_snd.ifq_maxlen = ifqmaxlen;
1198 ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1200 if(lldev->enabled_tso)
1201 ifnetp->if_capabilities |= IFCAP_TSO4;
1202 if(lldev->enabled_lro)
1203 ifnetp->if_capabilities |= IFCAP_LRO;
1205 ifnetp->if_capenable = ifnetp->if_capabilities;
1207 /* Attach the interface */
1208 ether_ifattach(ifnetp, mcaddr);
1215 * xge_callback_link_up
1216 * Callback for Link-up indication from HAL
1218 * @userdata Per-adapter data
1221 xge_callback_link_up(void *userdata)
1223 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1224 struct ifnet *ifnetp = lldev->ifnetp;
1226 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1227 if_link_state_change(ifnetp, LINK_STATE_UP);
1231 * xge_callback_link_down
1232 * Callback for Link-down indication from HAL
1234 * @userdata Per-adapter data
1237 xge_callback_link_down(void *userdata)
1239 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1240 struct ifnet *ifnetp = lldev->ifnetp;
1242 ifnetp->if_flags |= IFF_DRV_OACTIVE;
1243 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1247 * xge_callback_crit_err
1248 * Callback for Critical error indication from HAL
1250 * @userdata Per-adapter data
1251 * @type Event type (Enumerated hardware error)
1252 * @serr_data Hardware status
1255 xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1257 xge_trace(XGE_ERR, "Critical Error");
1258 xge_reset(userdata);
1262 * xge_callback_event
1263 * Callback from HAL indicating that some event has been queued
1265 * @item Queued event item
1268 xge_callback_event(xge_queue_item_t *item)
1270 xge_lldev_t *lldev = NULL;
1271 xge_hal_device_t *hldev = NULL;
1272 struct ifnet *ifnetp = NULL;
1274 hldev = item->context;
1275 lldev = xge_hal_device_private(hldev);
1276 ifnetp = lldev->ifnetp;
1278 switch((int)item->event_type) {
1279 case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1280 if(lldev->initialized) {
1281 if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1282 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1285 xge_queue_produce_context(
1286 xge_hal_device_queue(lldev->devh),
1287 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1292 case XGE_LL_EVENT_DEVICE_RESETTING:
1293 xge_reset(item->context);
1302 * xge_ifmedia_change
1303 * Media change driver callback
1305 * @ifnetp Interface Handle
1307 * Returns 0 if media is Ether else EINVAL
1310 xge_ifmedia_change(struct ifnet *ifnetp)
1312 xge_lldev_t *lldev = ifnetp->if_softc;
1313 struct ifmedia *ifmediap = &lldev->media;
1315 return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ? EINVAL:0;
1319 * xge_ifmedia_status
1320 * Media status driver callback
1322 * @ifnetp Interface Handle
1323 * @ifmr Interface Media Settings
1326 xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1328 xge_hal_status_e status;
1330 xge_lldev_t *lldev = ifnetp->if_softc;
1331 xge_hal_device_t *hldev = lldev->devh;
1333 ifmr->ifm_status = IFM_AVALID;
1334 ifmr->ifm_active = IFM_ETHER;
1336 status = xge_hal_mgmt_reg_read(hldev, 0,
1337 xge_offsetof(xge_hal_pci_bar0_t, adapter_status), ®value);
1338 if(status != XGE_HAL_OK) {
1339 xge_trace(XGE_TRACE, "Getting adapter status failed");
1343 if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1344 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1345 ifmr->ifm_status |= IFM_ACTIVE;
1346 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1347 if_link_state_change(ifnetp, LINK_STATE_UP);
1350 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1358 * IOCTL to get statistics
1360 * @lldev Per-adapter data
1361 * @ifreqp Interface request
1364 xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1366 xge_hal_status_e status = XGE_HAL_OK;
1369 int retValue = EINVAL;
1371 cmd = fubyte(ifr_data_get_ptr(ifreqp));
1376 case XGE_QUERY_STATS:
1377 mtx_lock(&lldev->mtx_drv);
1378 status = xge_hal_stats_hw(lldev->devh,
1379 (xge_hal_stats_hw_info_t **)&info);
1380 mtx_unlock(&lldev->mtx_drv);
1381 if(status == XGE_HAL_OK) {
1382 if(copyout(info, ifr_data_get_ptr(ifreqp),
1383 sizeof(xge_hal_stats_hw_info_t)) == 0)
1387 xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1392 case XGE_QUERY_PCICONF:
1393 info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1395 mtx_lock(&lldev->mtx_drv);
1396 status = xge_hal_mgmt_pci_config(lldev->devh, info,
1397 sizeof(xge_hal_pci_config_t));
1398 mtx_unlock(&lldev->mtx_drv);
1399 if(status == XGE_HAL_OK) {
1400 if(copyout(info, ifr_data_get_ptr(ifreqp),
1401 sizeof(xge_hal_pci_config_t)) == 0)
1406 "Getting PCI configuration failed (%d)", status);
1408 xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1412 case XGE_QUERY_DEVSTATS:
1413 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1415 mtx_lock(&lldev->mtx_drv);
1416 status =xge_hal_mgmt_device_stats(lldev->devh, info,
1417 sizeof(xge_hal_stats_device_info_t));
1418 mtx_unlock(&lldev->mtx_drv);
1419 if(status == XGE_HAL_OK) {
1420 if(copyout(info, ifr_data_get_ptr(ifreqp),
1421 sizeof(xge_hal_stats_device_info_t)) == 0)
1425 xge_trace(XGE_ERR, "Getting device info failed (%d)",
1428 xge_os_free(NULL, info,
1429 sizeof(xge_hal_stats_device_info_t));
1433 case XGE_QUERY_SWSTATS:
1434 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1436 mtx_lock(&lldev->mtx_drv);
1437 status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1438 sizeof(xge_hal_stats_sw_err_t));
1439 mtx_unlock(&lldev->mtx_drv);
1440 if(status == XGE_HAL_OK) {
1441 if(copyout(info, ifr_data_get_ptr(ifreqp),
1442 sizeof(xge_hal_stats_sw_err_t)) == 0)
1447 "Getting tcode statistics failed (%d)", status);
1449 xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1453 case XGE_QUERY_DRIVERSTATS:
1454 if(copyout(&lldev->driver_stats, ifr_data_get_ptr(ifreqp),
1455 sizeof(xge_driver_stats_t)) == 0) {
1460 "Copyout of driver statistics failed (%d)", status);
1464 case XGE_READ_VERSION:
1465 info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1467 strcpy(info, XGE_DRIVER_VERSION);
1468 if(copyout(info, ifr_data_get_ptr(ifreqp),
1469 XGE_BUFFER_SIZE) == 0)
1471 xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1475 case XGE_QUERY_DEVCONF:
1476 info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1478 mtx_lock(&lldev->mtx_drv);
1479 status = xge_hal_mgmt_device_config(lldev->devh, info,
1480 sizeof(xge_hal_device_config_t));
1481 mtx_unlock(&lldev->mtx_drv);
1482 if(status == XGE_HAL_OK) {
1483 if(copyout(info, ifr_data_get_ptr(ifreqp),
1484 sizeof(xge_hal_device_config_t)) == 0)
1488 xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1491 xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1495 case XGE_QUERY_BUFFER_MODE:
1496 if(copyout(&lldev->buffer_mode, ifr_data_get_ptr(ifreqp),
1501 case XGE_SET_BUFFER_MODE_1:
1502 case XGE_SET_BUFFER_MODE_2:
1503 case XGE_SET_BUFFER_MODE_5:
1504 mode = (cmd == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1505 if(copyout(&mode, ifr_data_get_ptr(ifreqp), sizeof(mode)) == 0)
1509 xge_trace(XGE_TRACE, "Nothing is matching");
1517 * xge_ioctl_registers
1518 * IOCTL to get registers
1520 * @lldev Per-adapter data
1521 * @ifreqp Interface request
1524 xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1526 xge_register_t tmpdata;
1527 xge_register_t *data;
1528 xge_hal_status_e status = XGE_HAL_OK;
1529 int retValue = EINVAL, offset = 0, index = 0;
1533 error = copyin(ifr_data_get_ptr(ifreqp), &tmpdata, sizeof(tmpdata));
1538 /* Reading a register */
1539 if(strcmp(data->option, "-r") == 0) {
1540 data->value = 0x0000;
1541 mtx_lock(&lldev->mtx_drv);
1542 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1544 mtx_unlock(&lldev->mtx_drv);
1545 if(status == XGE_HAL_OK) {
1546 if(copyout(data, ifr_data_get_ptr(ifreqp),
1547 sizeof(xge_register_t)) == 0)
1551 /* Writing to a register */
1552 else if(strcmp(data->option, "-w") == 0) {
1553 mtx_lock(&lldev->mtx_drv);
1554 status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1556 if(status == XGE_HAL_OK) {
1558 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1560 if(status != XGE_HAL_OK) {
1561 xge_trace(XGE_ERR, "Reading back updated register failed");
1564 if(val64 != data->value) {
1566 "Read and written register values mismatched");
1572 xge_trace(XGE_ERR, "Getting register value failed");
1574 mtx_unlock(&lldev->mtx_drv);
1577 mtx_lock(&lldev->mtx_drv);
1578 for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1579 index++, offset += 0x0008) {
1581 status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1582 if(status != XGE_HAL_OK) {
1583 xge_trace(XGE_ERR, "Getting register value failed");
1586 *((u64 *)((u64 *)data + index)) = val64;
1589 mtx_unlock(&lldev->mtx_drv);
1592 if(copyout(data, ifr_data_get_ptr(ifreqp),
1593 sizeof(xge_hal_pci_bar0_t)) != 0) {
1594 xge_trace(XGE_ERR, "Copyout of register values failed");
1599 xge_trace(XGE_ERR, "Getting register values failed");
1607 * Callback to control the device - Interface configuration
1609 * @ifnetp Interface Handle
1610 * @command Device control command
1611 * @data Parameters associated with command (if any)
1614 xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1616 struct ifreq *ifreqp = (struct ifreq *)data;
1617 xge_lldev_t *lldev = ifnetp->if_softc;
1618 struct ifmedia *ifmediap = &lldev->media;
1619 int retValue = 0, mask = 0;
1621 if(lldev->in_detach) {
1626 /* Set/Get ifnet address */
1629 ether_ioctl(ifnetp, command, data);
1634 retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1637 /* Set ifnet flags */
1639 if(ifnetp->if_flags & IFF_UP) {
1640 /* Link status is UP */
1641 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1644 xge_disable_promisc(lldev);
1645 xge_enable_promisc(lldev);
1648 /* Link status is DOWN */
1649 /* If device is in running, make it down */
1650 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1656 /* Add/delete multicast address */
1659 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1660 xge_setmulti(lldev);
1664 /* Set/Get net media */
1667 retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1670 /* Set capabilities */
1672 mtx_lock(&lldev->mtx_drv);
1673 mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1674 if(mask & IFCAP_TXCSUM) {
1675 if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1676 ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1677 ifnetp->if_hwassist &=
1678 ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1681 ifnetp->if_capenable |= IFCAP_TXCSUM;
1682 ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1685 if(mask & IFCAP_TSO4) {
1686 if(ifnetp->if_capenable & IFCAP_TSO4) {
1687 ifnetp->if_capenable &= ~IFCAP_TSO4;
1688 ifnetp->if_hwassist &= ~CSUM_TSO;
1690 xge_os_printf("%s: TSO Disabled",
1691 device_get_nameunit(lldev->device));
1693 else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1694 ifnetp->if_capenable |= IFCAP_TSO4;
1695 ifnetp->if_hwassist |= CSUM_TSO;
1697 xge_os_printf("%s: TSO Enabled",
1698 device_get_nameunit(lldev->device));
1702 mtx_unlock(&lldev->mtx_drv);
1705 /* Custom IOCTL 0 */
1706 case SIOCGPRIVATE_0:
1707 retValue = xge_ioctl_stats(lldev, ifreqp);
1710 /* Custom IOCTL 1 */
1711 case SIOCGPRIVATE_1:
1712 retValue = xge_ioctl_registers(lldev, ifreqp);
1724 * Initialize the interface
1726 * @plldev Per-adapter Data
1729 xge_init(void *plldev)
1731 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1733 mtx_lock(&lldev->mtx_drv);
1734 xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1735 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1736 mtx_unlock(&lldev->mtx_drv);
1741 * Initialize the interface (called by holding lock)
1743 * @pdevin Per-adapter Data
1746 xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1748 struct ifnet *ifnetp = lldev->ifnetp;
1749 xge_hal_device_t *hldev = lldev->devh;
1750 struct ifaddr *ifaddrp;
1751 unsigned char *macaddr;
1752 struct sockaddr_dl *sockaddrp;
1753 int status = XGE_HAL_OK;
1755 mtx_assert((&lldev->mtx_drv), MA_OWNED);
1757 /* If device is in running state, initializing is not required */
1758 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1761 /* Initializing timer */
1762 callout_init(&lldev->timer, 1);
1764 xge_trace(XGE_TRACE, "Set MTU size");
1765 status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1766 if(status != XGE_HAL_OK) {
1767 xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1771 /* Enable HAL device */
1772 xge_hal_device_enable(hldev);
1774 /* Get MAC address and update in HAL */
1775 ifaddrp = ifnetp->if_addr;
1776 sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1777 sockaddrp->sdl_type = IFT_ETHER;
1778 sockaddrp->sdl_alen = ifnetp->if_addrlen;
1779 macaddr = LLADDR(sockaddrp);
1780 xge_trace(XGE_TRACE,
1781 "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1782 *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1783 *(macaddr + 4), *(macaddr + 5));
1784 status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1785 if(status != XGE_HAL_OK)
1786 xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1788 /* Opening channels */
1789 mtx_unlock(&lldev->mtx_drv);
1790 status = xge_channel_open(lldev, option);
1791 mtx_lock(&lldev->mtx_drv);
1792 if(status != XGE_HAL_OK)
1795 /* Set appropriate flags */
1796 ifnetp->if_drv_flags |= IFF_DRV_RUNNING;
1797 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1799 /* Checksum capability */
1800 ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1801 (CSUM_TCP | CSUM_UDP) : 0;
1803 if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1804 ifnetp->if_hwassist |= CSUM_TSO;
1806 /* Enable interrupts */
1807 xge_hal_device_intr_enable(hldev);
1809 callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1811 /* Disable promiscuous mode */
1812 xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1813 xge_enable_promisc(lldev);
1815 /* Device is initialized */
1816 lldev->initialized = 1;
1817 xge_os_mdelay(1000);
1825 * Timer timeout function to handle link status
1827 * @devp Per-adapter Data
1830 xge_timer(void *devp)
1832 xge_lldev_t *lldev = (xge_lldev_t *)devp;
1833 xge_hal_device_t *hldev = lldev->devh;
1835 /* Poll for changes */
1836 xge_hal_device_poll(hldev);
1839 callout_reset(&lldev->timer, hz, xge_timer, lldev);
1846 * De-activate the interface
1848 * @lldev Per-adater Data
1851 xge_stop(xge_lldev_t *lldev)
1853 mtx_lock(&lldev->mtx_drv);
1854 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1855 mtx_unlock(&lldev->mtx_drv);
1860 * ISR filter function - to filter interrupts from other devices (shared)
1862 * @handle Per-adapter Data
1865 * FILTER_STRAY if interrupt is from other device
1866 * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1869 xge_isr_filter(void *handle)
1871 xge_lldev_t *lldev = (xge_lldev_t *)handle;
1872 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1873 u16 retValue = FILTER_STRAY;
1876 XGE_DRV_STATS(isr_filter);
1878 val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1879 &bar0->general_int_status);
1880 retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1887 * Interrupt service routine for Line interrupts
1889 * @plldev Per-adapter Data
1892 xge_isr_line(void *plldev)
1894 xge_hal_status_e status;
1895 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1896 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1897 struct ifnet *ifnetp = lldev->ifnetp;
1899 XGE_DRV_STATS(isr_line);
1901 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1902 status = xge_hal_device_handle_irq(hldev);
1903 if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1910 * ISR for Message signaled interrupts
1913 xge_isr_msi(void *plldev)
1915 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1916 XGE_DRV_STATS(isr_msi);
1917 xge_hal_device_continue_irq(lldev->devh);
1922 * Initiate and open all Rx channels
1925 * @lldev Per-adapter Data
1926 * @rflag Channel open/close/reopen flag
1928 * Returns 0 or Error Number
1931 xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1933 u64 adapter_status = 0x0;
1934 xge_hal_status_e status = XGE_HAL_FAIL;
1936 xge_hal_channel_attr_t attr = {
1939 .callback = xge_rx_compl,
1940 .per_dtr_space = sizeof(xge_rx_priv_t),
1942 .type = XGE_HAL_CHANNEL_TYPE_RING,
1944 .dtr_init = xge_rx_initial_replenish,
1945 .dtr_term = xge_rx_term
1948 /* If device is not ready, return */
1949 status = xge_hal_device_status(lldev->devh, &adapter_status);
1950 if(status != XGE_HAL_OK) {
1951 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1952 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1955 status = xge_hal_channel_open(lldev->devh, &attr,
1956 &lldev->ring_channel[qid], rflag);
1965 * Initialize and open all Tx channels
1967 * @lldev Per-adapter Data
1968 * @tflag Channel open/close/reopen flag
1970 * Returns 0 or Error Number
1973 xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1975 xge_hal_status_e status = XGE_HAL_FAIL;
1976 u64 adapter_status = 0x0;
1979 xge_hal_channel_attr_t attr = {
1981 .callback = xge_tx_compl,
1982 .per_dtr_space = sizeof(xge_tx_priv_t),
1984 .type = XGE_HAL_CHANNEL_TYPE_FIFO,
1986 .dtr_init = xge_tx_initial_replenish,
1987 .dtr_term = xge_tx_term
1990 /* If device is not ready, return */
1991 status = xge_hal_device_status(lldev->devh, &adapter_status);
1992 if(status != XGE_HAL_OK) {
1993 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1994 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1997 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1998 attr.post_qid = qindex,
1999 status = xge_hal_channel_open(lldev->devh, &attr,
2000 &lldev->fifo_channel[qindex], tflag);
2001 if(status != XGE_HAL_OK) {
2002 for(index = 0; index < qindex; index++)
2003 xge_hal_channel_close(lldev->fifo_channel[index], tflag);
2015 * @lldev Per-adapter Data
2018 xge_enable_msi(xge_lldev_t *lldev)
2020 xge_list_t *item = NULL;
2021 xge_hal_device_t *hldev = lldev->devh;
2022 xge_hal_channel_t *channel = NULL;
2023 u16 offset = 0, val16 = 0;
2025 xge_os_pci_read16(lldev->pdev, NULL,
2026 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2028 /* Update msi_data */
2029 offset = (val16 & 0x80) ? 0x4c : 0x48;
2030 xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2035 xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2037 /* Update msi_control */
2038 xge_os_pci_read16(lldev->pdev, NULL,
2039 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2041 xge_os_pci_write16(lldev->pdev, NULL,
2042 xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2044 /* Set TxMAT and RxMAT registers with MSI */
2045 xge_list_for_each(item, &hldev->free_channels) {
2046 channel = xge_container_of(item, xge_hal_channel_t, item);
2047 xge_hal_channel_msi_set(channel, 1, (u32)val16);
2053 * Open both Tx and Rx channels
2055 * @lldev Per-adapter Data
2056 * @option Channel reopen option
2059 xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2061 xge_lro_entry_t *lro_session = NULL;
2062 xge_hal_status_e status = XGE_HAL_OK;
2063 int index = 0, index2 = 0;
2065 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2066 xge_msi_info_restore(lldev);
2067 xge_enable_msi(lldev);
2071 status = xge_create_dma_tags(lldev->device);
2072 if(status != XGE_HAL_OK)
2073 XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2075 /* Open ring (Rx) channel */
2076 for(index = 0; index < XGE_RING_COUNT; index++) {
2077 status = xge_rx_open(index, lldev, option);
2078 if(status != XGE_HAL_OK) {
2080 * DMA mapping fails in the unpatched Kernel which can't
2081 * allocate contiguous memory for Jumbo frames.
2082 * Try using 5 buffer mode.
2084 if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2085 (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2087 /* Close so far opened channels */
2088 for(index2 = 0; index2 < index; index2++) {
2089 xge_hal_channel_close(lldev->ring_channel[index2],
2093 /* Destroy DMA tags intended to use for 1 buffer mode */
2094 if(bus_dmamap_destroy(lldev->dma_tag_rx,
2095 lldev->extra_dma_map)) {
2096 xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2098 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2099 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2100 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2101 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2103 /* Switch to 5 buffer mode */
2104 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2105 xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2111 XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2117 if(lldev->enabled_lro) {
2118 SLIST_INIT(&lldev->lro_free);
2119 SLIST_INIT(&lldev->lro_active);
2120 lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2122 for(index = 0; index < lldev->lro_num; index++) {
2123 lro_session = (xge_lro_entry_t *)
2124 xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2125 if(lro_session == NULL) {
2126 lldev->lro_num = index;
2129 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2133 /* Open FIFO (Tx) channel */
2134 status = xge_tx_open(lldev, option);
2135 if(status != XGE_HAL_OK)
2136 XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2142 * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2143 * Initialization of LRO failed (index is XGE_RING_COUNT)
2144 * Opening Tx channel failed (index is XGE_RING_COUNT)
2146 for(index2 = 0; index2 < index; index2++)
2147 xge_hal_channel_close(lldev->ring_channel[index2], option);
2155 * Close both Tx and Rx channels
2157 * @lldev Per-adapter Data
2158 * @option Channel reopen option
2162 xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2168 /* Close FIFO (Tx) channel */
2169 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2170 xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2172 /* Close Ring (Rx) channels */
2173 for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2174 xge_hal_channel_close(lldev->ring_channel[qindex], option);
2176 if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2177 xge_trace(XGE_ERR, "Rx extra map destroy failed");
2178 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2179 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2180 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2181 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2188 * @arg Parameter passed from dmamap
2190 * @nseg Number of segments
2194 dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2197 *(bus_addr_t *) arg = segs->ds_addr;
2205 * @lldev Per-adapter Data
2208 xge_reset(xge_lldev_t *lldev)
2210 xge_trace(XGE_TRACE, "Reseting the chip");
2212 /* If the device is not initialized, return */
2213 if(lldev->initialized) {
2214 mtx_lock(&lldev->mtx_drv);
2215 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2216 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2217 mtx_unlock(&lldev->mtx_drv);
2225 * Set an address as a multicast address
2227 * @lldev Per-adapter Data
2230 xge_setmulti(xge_lldev_t *lldev)
2232 struct ifmultiaddr *ifma;
2234 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2235 struct ifnet *ifnetp = lldev->ifnetp;
2238 int table_size = 47;
2239 xge_hal_status_e status = XGE_HAL_OK;
2240 u8 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2242 if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2243 status = xge_hal_device_mcast_enable(hldev);
2244 lldev->all_multicast = 1;
2246 else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2247 status = xge_hal_device_mcast_disable(hldev);
2248 lldev->all_multicast = 0;
2251 if(status != XGE_HAL_OK) {
2252 xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2256 /* Updating address list */
2257 if_maddr_rlock(ifnetp);
2259 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2260 if(ifma->ifma_addr->sa_family != AF_LINK) {
2263 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2266 if_maddr_runlock(ifnetp);
2268 if((!lldev->all_multicast) && (index)) {
2269 lldev->macaddr_count = (index + 1);
2270 if(lldev->macaddr_count > table_size) {
2274 /* Clear old addresses */
2275 for(index = 0; index < 48; index++) {
2276 xge_hal_device_macaddr_set(hldev, (offset + index),
2281 /* Add new addresses */
2282 if_maddr_rlock(ifnetp);
2284 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2285 if(ifma->ifma_addr->sa_family != AF_LINK) {
2288 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2289 xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2292 if_maddr_runlock(ifnetp);
2299 * xge_enable_promisc
2300 * Enable Promiscuous Mode
2302 * @lldev Per-adapter Data
2305 xge_enable_promisc(xge_lldev_t *lldev)
2307 struct ifnet *ifnetp = lldev->ifnetp;
2308 xge_hal_device_t *hldev = lldev->devh;
2309 xge_hal_pci_bar0_t *bar0 = NULL;
2312 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2314 if(ifnetp->if_flags & IFF_PROMISC) {
2315 xge_hal_device_promisc_enable(lldev->devh);
2318 * When operating in promiscuous mode, don't strip the VLAN tag
2320 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2322 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2323 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2324 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2327 xge_trace(XGE_TRACE, "Promiscuous mode ON");
2332 * xge_disable_promisc
2333 * Disable Promiscuous Mode
2335 * @lldev Per-adapter Data
2338 xge_disable_promisc(xge_lldev_t *lldev)
2340 xge_hal_device_t *hldev = lldev->devh;
2341 xge_hal_pci_bar0_t *bar0 = NULL;
2344 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2346 xge_hal_device_promisc_disable(lldev->devh);
2349 * Strip VLAN tag when operating in non-promiscuous mode
2351 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2353 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2354 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2355 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2358 xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2363 * Change interface MTU to a requested valid size
2365 * @lldev Per-adapter Data
2366 * @NewMtu Requested MTU
2368 * Returns 0 or Error Number
2371 xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2373 int status = XGE_HAL_OK;
2375 /* Check requested MTU size for boundary */
2376 if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2377 XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2380 lldev->mtu = new_mtu;
2381 xge_confirm_changes(lldev, XGE_SET_MTU);
2390 * Common code for both stop and part of reset. Disables device, interrupts and
2393 * @dev Device Handle
2394 * @option Channel normal/reset option
2397 xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2399 xge_hal_device_t *hldev = lldev->devh;
2400 struct ifnet *ifnetp = lldev->ifnetp;
2403 mtx_assert((&lldev->mtx_drv), MA_OWNED);
2405 /* If device is not in "Running" state, return */
2406 if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2409 /* Set appropriate flags */
2410 ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2413 callout_stop(&lldev->timer);
2415 /* Disable interrupts */
2416 xge_hal_device_intr_disable(hldev);
2418 mtx_unlock(&lldev->mtx_drv);
2419 xge_queue_flush(xge_hal_device_queue(lldev->devh));
2420 mtx_lock(&lldev->mtx_drv);
2422 /* Disable HAL device */
2423 if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2424 xge_trace(XGE_ERR, "Disabling HAL device failed");
2425 xge_hal_device_status(hldev, &val64);
2426 xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2429 /* Close Tx and Rx channels */
2430 xge_channel_close(lldev, option);
2432 /* Reset HAL device */
2433 xge_hal_device_reset(hldev);
2435 xge_os_mdelay(1000);
2436 lldev->initialized = 0;
2438 if_link_state_change(ifnetp, LINK_STATE_DOWN);
2445 * xge_set_mbuf_cflags
2446 * set checksum flag for the mbuf
2451 xge_set_mbuf_cflags(mbuf_t pkt)
2453 pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2454 pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2455 pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2456 pkt->m_pkthdr.csum_data = htons(0xffff);
2460 * xge_lro_flush_sessions
2461 * Flush LRO session and send accumulated LRO packet to upper layer
2463 * @lldev Per-adapter Data
2466 xge_lro_flush_sessions(xge_lldev_t *lldev)
2468 xge_lro_entry_t *lro_session = NULL;
2470 while(!SLIST_EMPTY(&lldev->lro_active)) {
2471 lro_session = SLIST_FIRST(&lldev->lro_active);
2472 SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2473 xge_lro_flush(lldev, lro_session);
2479 * Flush LRO session. Send accumulated LRO packet to upper layer
2481 * @lldev Per-adapter Data
2482 * @lro LRO session to be flushed
2485 xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2487 struct ip *header_ip;
2488 struct tcphdr *header_tcp;
2491 if(lro_session->append_cnt) {
2492 header_ip = lro_session->lro_header_ip;
2493 header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2494 lro_session->m_head->m_pkthdr.len = lro_session->len;
2495 header_tcp = (struct tcphdr *)(header_ip + 1);
2496 header_tcp->th_ack = lro_session->ack_seq;
2497 header_tcp->th_win = lro_session->window;
2498 if(lro_session->timestamp) {
2499 ptr = (u32 *)(header_tcp + 1);
2500 ptr[1] = htonl(lro_session->tsval);
2501 ptr[2] = lro_session->tsecr;
2505 (*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2506 lro_session->m_head = NULL;
2507 lro_session->timestamp = 0;
2508 lro_session->append_cnt = 0;
2509 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2513 * xge_lro_accumulate
2514 * Accumulate packets to form a large LRO packet based on various conditions
2516 * @lldev Per-adapter Data
2517 * @m_head Current Packet
2519 * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2522 xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2524 struct ether_header *header_ethernet;
2525 struct ip *header_ip;
2526 struct tcphdr *header_tcp;
2528 struct mbuf *buffer_next, *buffer_tail;
2529 xge_lro_entry_t *lro_session;
2530 xge_hal_status_e status = XGE_HAL_FAIL;
2531 int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2534 /* Get Ethernet header */
2535 header_ethernet = mtod(m_head, struct ether_header *);
2537 /* Return if it is not IP packet */
2538 if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2542 header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2543 (struct ip *)(header_ethernet + 1) :
2544 mtod(m_head->m_next, struct ip *);
2546 /* Return if it is not TCP packet */
2547 if(header_ip->ip_p != IPPROTO_TCP)
2550 /* Return if packet has options */
2551 if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2554 /* Return if packet is fragmented */
2555 if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2558 /* Get TCP header */
2559 header_tcp = (struct tcphdr *)(header_ip + 1);
2561 /* Return if not ACK or PUSH */
2562 if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2565 /* Only timestamp option is handled */
2566 tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2567 tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2568 ptr = (u32 *)(header_tcp + 1);
2569 if(tcp_options != 0) {
2570 if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2571 (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2572 TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2577 /* Total length of packet (IP) */
2578 ip_len = ntohs(header_ip->ip_len);
2581 tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2583 /* If the frame is padded, trim it */
2584 tot_len = m_head->m_pkthdr.len;
2585 trim = tot_len - (ip_len + ETHER_HDR_LEN);
2589 m_adj(m_head, -trim);
2590 tot_len = m_head->m_pkthdr.len;
2593 buffer_next = m_head;
2595 while(buffer_next != NULL) {
2596 buffer_tail = buffer_next;
2597 buffer_next = buffer_tail->m_next;
2600 /* Total size of only headers */
2601 hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2603 /* Get sequence number */
2604 seq = ntohl(header_tcp->th_seq);
2606 SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2607 if(lro_session->source_port == header_tcp->th_sport &&
2608 lro_session->dest_port == header_tcp->th_dport &&
2609 lro_session->source_ip == header_ip->ip_src.s_addr &&
2610 lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2612 /* Unmatched sequence number, flush LRO session */
2613 if(__predict_false(seq != lro_session->next_seq)) {
2614 SLIST_REMOVE(&lldev->lro_active, lro_session,
2615 xge_lro_entry_t, next);
2616 xge_lro_flush(lldev, lro_session);
2620 /* Handle timestamp option */
2622 u32 tsval = ntohl(*(ptr + 1));
2623 if(__predict_false(lro_session->tsval > tsval ||
2627 lro_session->tsval = tsval;
2628 lro_session->tsecr = *(ptr + 2);
2631 lro_session->next_seq += tcp_data_len;
2632 lro_session->ack_seq = header_tcp->th_ack;
2633 lro_session->window = header_tcp->th_win;
2635 /* If TCP data/payload is of 0 size, free mbuf */
2636 if(tcp_data_len == 0) {
2638 status = XGE_HAL_OK;
2642 lro_session->append_cnt++;
2643 lro_session->len += tcp_data_len;
2645 /* Adjust mbuf so that m_data points to payload than headers */
2646 m_adj(m_head, hlen);
2648 /* Append this packet to LRO accumulated packet */
2649 lro_session->m_tail->m_next = m_head;
2650 lro_session->m_tail = buffer_tail;
2652 /* Flush if LRO packet is exceeding maximum size */
2653 if(lro_session->len >
2654 (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2655 SLIST_REMOVE(&lldev->lro_active, lro_session,
2656 xge_lro_entry_t, next);
2657 xge_lro_flush(lldev, lro_session);
2659 status = XGE_HAL_OK;
2664 if(SLIST_EMPTY(&lldev->lro_free))
2667 /* Start a new LRO session */
2668 lro_session = SLIST_FIRST(&lldev->lro_free);
2669 SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2670 SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2671 lro_session->source_port = header_tcp->th_sport;
2672 lro_session->dest_port = header_tcp->th_dport;
2673 lro_session->source_ip = header_ip->ip_src.s_addr;
2674 lro_session->dest_ip = header_ip->ip_dst.s_addr;
2675 lro_session->next_seq = seq + tcp_data_len;
2676 lro_session->mss = tcp_data_len;
2677 lro_session->ack_seq = header_tcp->th_ack;
2678 lro_session->window = header_tcp->th_win;
2680 lro_session->lro_header_ip = header_ip;
2682 /* Handle timestamp option */
2684 lro_session->timestamp = 1;
2685 lro_session->tsval = ntohl(*(ptr + 1));
2686 lro_session->tsecr = *(ptr + 2);
2689 lro_session->len = tot_len;
2690 lro_session->m_head = m_head;
2691 lro_session->m_tail = buffer_tail;
2692 status = XGE_HAL_OK;
2699 * xge_accumulate_large_rx
2700 * Accumulate packets to form a large LRO packet based on various conditions
2702 * @lldev Per-adapter Data
2703 * @pkt Current packet
2704 * @pkt_length Packet Length
2705 * @rxd_priv Rx Descriptor Private Data
2708 xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2709 xge_rx_priv_t *rxd_priv)
2711 if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2712 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2713 BUS_DMASYNC_POSTREAD);
2714 (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2720 * If the interrupt is due to received frame (Rx completion), send it up
2722 * @channelh Ring Channel Handle
2723 * @dtr Current Descriptor
2724 * @t_code Transfer Code indicating success or error
2725 * @userdata Per-adapter Data
2727 * Returns XGE_HAL_OK or HAL error enums
2730 xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2733 struct ifnet *ifnetp;
2734 xge_rx_priv_t *rxd_priv = NULL;
2735 mbuf_t mbuf_up = NULL;
2736 xge_hal_status_e status = XGE_HAL_OK;
2737 xge_hal_dtr_info_t ext_info;
2741 /*get the user data portion*/
2742 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2744 XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2747 XGE_DRV_STATS(rx_completions);
2749 /* get the interface pointer */
2750 ifnetp = lldev->ifnetp;
2753 XGE_DRV_STATS(rx_desc_compl);
2755 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2756 status = XGE_HAL_FAIL;
2761 xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2762 XGE_DRV_STATS(rx_tcode);
2763 xge_hal_device_handle_tcode(channelh, dtr, t_code);
2764 xge_hal_ring_dtr_post(channelh,dtr);
2768 /* Get the private data for this descriptor*/
2769 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2772 XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2777 * Prepare one buffer to send it to upper layer -- since the upper
2778 * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2779 * prepare a new buffer, do mapping, use it in the current
2780 * descriptor and post descriptor back to ring channel
2782 mbuf_up = rxd_priv->bufferArray[0];
2784 /* Gets details of mbuf i.e., packet length */
2785 xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2788 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2789 xge_get_buf(dtr, rxd_priv, lldev, 0) :
2790 xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2792 if(status != XGE_HAL_OK) {
2793 xge_trace(XGE_ERR, "No memory");
2794 XGE_DRV_STATS(rx_no_buf);
2797 * Unable to allocate buffer. Instead of discarding, post
2798 * descriptor back to channel for future processing of same
2801 xge_hal_ring_dtr_post(channelh, dtr);
2805 /* Get the extended information */
2806 xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2809 * As we have allocated a new mbuf for this descriptor, post this
2810 * descriptor with new mbuf back to ring channel
2812 vlan_tag = ext_info.vlan;
2813 xge_hal_ring_dtr_post(channelh, dtr);
2814 if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2815 (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2816 (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2817 (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2819 /* set Checksum Flag */
2820 xge_set_mbuf_cflags(mbuf_up);
2822 if(lldev->enabled_lro) {
2823 xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2827 /* Post-Read sync for buffers*/
2828 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2829 bus_dmamap_sync(lldev->dma_tag_rx,
2830 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2832 (*ifnetp->if_input)(ifnetp, mbuf_up);
2837 * Packet with erroneous checksum , let the upper layer deal
2841 /* Post-Read sync for buffers*/
2842 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2843 bus_dmamap_sync(lldev->dma_tag_rx,
2844 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2848 mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2849 mbuf_up->m_flags |= M_VLANTAG;
2852 if(lldev->enabled_lro)
2853 xge_lro_flush_sessions(lldev);
2855 (*ifnetp->if_input)(ifnetp, mbuf_up);
2857 } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2860 if(lldev->enabled_lro)
2861 xge_lro_flush_sessions(lldev);
2871 * @mbuf_up Packet to send up
2872 * @channelh Ring Channel Handle
2874 * @lldev Per-adapter Data
2875 * @rxd_priv Rx Descriptor Private Data
2877 * Returns XGE_HAL_OK or HAL error enums
2880 xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2881 xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2884 int pkt_length[5]={0,0}, pkt_len=0;
2885 dma_addr_t dma_data[5];
2891 if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2892 xge_os_memzero(pkt_length, sizeof(pkt_length));
2895 * Retrieve data of interest from the completed descriptor -- This
2896 * returns the packet length
2898 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2899 xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2902 xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2905 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2906 m->m_len = pkt_length[index];
2908 if(index < (lldev->rxd_mbuf_cnt-1)) {
2909 m->m_next = rxd_priv->bufferArray[index + 1];
2915 pkt_len+=pkt_length[index];
2919 * Since 2 buffer mode is an exceptional case where data is in 3rd
2920 * buffer but not in 2nd buffer
2922 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2923 m->m_len = pkt_length[2];
2924 pkt_len+=pkt_length[2];
2928 * Update length of newly created buffer to be sent up with packet
2931 mbuf_up->m_pkthdr.len = pkt_len;
2935 * Retrieve data of interest from the completed descriptor -- This
2936 * returns the packet length
2938 xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2941 * Update length of newly created buffer to be sent up with packet
2944 mbuf_up->m_len = mbuf_up->m_pkthdr.len = pkt_length[0];
2952 * Flush Tx descriptors
2954 * @channelh Channel handle
2957 xge_flush_txds(xge_hal_channel_h channelh)
2959 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2960 xge_hal_dtr_h tx_dtr;
2961 xge_tx_priv_t *tx_priv;
2964 while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2966 XGE_DRV_STATS(tx_desc_compl);
2968 xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2969 XGE_DRV_STATS(tx_tcode);
2970 xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2973 tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2974 bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2975 m_freem(tx_priv->buffer);
2976 tx_priv->buffer = NULL;
2977 xge_hal_fifo_dtr_free(channelh, tx_dtr);
2985 * @ifnetp Interface Handle
2988 xge_send(struct ifnet *ifnetp)
2991 xge_lldev_t *lldev = ifnetp->if_softc;
2993 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2994 if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2995 XGE_DRV_STATS(tx_lock_fail);
2998 xge_send_locked(ifnetp, qindex);
2999 mtx_unlock(&lldev->mtx_tx[qindex]);
3004 xge_send_locked(struct ifnet *ifnetp, int qindex)
3007 static bus_dma_segment_t segs[XGE_MAX_SEGS];
3008 xge_hal_status_e status;
3009 unsigned int max_fragments;
3010 xge_lldev_t *lldev = ifnetp->if_softc;
3011 xge_hal_channel_h channelh = lldev->fifo_channel[qindex];
3012 mbuf_t m_head = NULL;
3013 mbuf_t m_buf = NULL;
3014 xge_tx_priv_t *ll_tx_priv = NULL;
3015 register unsigned int count = 0;
3016 unsigned int nsegs = 0;
3019 max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3021 /* If device is not initialized, return */
3022 if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3025 XGE_DRV_STATS(tx_calls);
3028 * This loop will be executed for each packet in the kernel maintained
3029 * queue -- each packet can be with fragments as an mbuf chain
3032 IF_DEQUEUE(&ifnetp->if_snd, m_head);
3033 if (m_head == NULL) {
3034 ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3038 for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3039 if(m_buf->m_len) count += 1;
3042 if(count >= max_fragments) {
3043 m_buf = m_defrag(m_head, M_NOWAIT);
3044 if(m_buf != NULL) m_head = m_buf;
3045 XGE_DRV_STATS(tx_defrag);
3048 /* Reserve descriptors */
3049 status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3050 if(status != XGE_HAL_OK) {
3051 XGE_DRV_STATS(tx_no_txd);
3052 xge_flush_txds(channelh);
3057 (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3058 xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3060 /* Update Tx private structure for this descriptor */
3061 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3062 ll_tx_priv->buffer = m_head;
3065 * Do mapping -- Required DMA tag has been created in xge_init
3066 * function and DMA maps have already been created in the
3067 * xgell_tx_replenish function.
3068 * Returns number of segments through nsegs
3070 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3071 ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3072 xge_trace(XGE_TRACE, "DMA map load failed");
3073 XGE_DRV_STATS(tx_map_fail);
3077 if(lldev->driver_stats.tx_max_frags < nsegs)
3078 lldev->driver_stats.tx_max_frags = nsegs;
3080 /* Set descriptor buffer for header and each fragment/segment */
3083 xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3084 (dma_addr_t)htole64(segs[count].ds_addr),
3085 segs[count].ds_len);
3087 } while(count < nsegs);
3089 /* Pre-write Sync of mapping */
3090 bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3091 BUS_DMASYNC_PREWRITE);
3093 if((lldev->enabled_tso) &&
3094 (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3095 XGE_DRV_STATS(tx_tso);
3096 xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3100 if(ifnetp->if_hwassist > 0) {
3101 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3102 | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3105 /* Post descriptor to FIFO channel */
3106 xge_hal_fifo_dtr_post(channelh, dtr);
3107 XGE_DRV_STATS(tx_posted);
3109 /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3110 * listener so that we can use tools like tcpdump */
3111 ETHER_BPF_MTAP(ifnetp, m_head);
3114 /* Prepend the packet back to queue */
3115 IF_PREPEND(&ifnetp->if_snd, m_head);
3116 ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3118 xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3119 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3120 XGE_DRV_STATS(tx_again);
3125 * Allocates new mbufs to be placed into descriptors
3127 * @dtrh Descriptor Handle
3128 * @rxd_priv Rx Descriptor Private Data
3129 * @lldev Per-adapter Data
3130 * @index Buffer Index (if multi-buffer mode)
3132 * Returns XGE_HAL_OK or HAL error enums
3135 xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3136 xge_lldev_t *lldev, int index)
3138 register mbuf_t mp = NULL;
3139 struct ifnet *ifnetp = lldev->ifnetp;
3140 int status = XGE_HAL_OK;
3141 int buffer_size = 0, cluster_size = 0, count;
3142 bus_dmamap_t map = rxd_priv->dmainfo[index].dma_map;
3143 bus_dma_segment_t segs[3];
3145 buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3146 ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3147 lldev->rxd_mbuf_len[index];
3149 if(buffer_size <= MCLBYTES) {
3150 cluster_size = MCLBYTES;
3151 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3154 cluster_size = MJUMPAGESIZE;
3155 if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3156 (buffer_size > MJUMPAGESIZE)) {
3157 cluster_size = MJUM9BYTES;
3159 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, cluster_size);
3162 xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3163 status = XGE_HAL_FAIL;
3167 /* Update mbuf's length, packet length and receive interface */
3168 mp->m_len = mp->m_pkthdr.len = buffer_size;
3169 mp->m_pkthdr.rcvif = ifnetp;
3172 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3173 mp, segs, &count, BUS_DMA_NOWAIT)) {
3174 XGE_DRV_STATS(rx_map_fail);
3176 XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3179 /* Update descriptor private data */
3180 rxd_priv->bufferArray[index] = mp;
3181 rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3182 rxd_priv->dmainfo[index].dma_map = lldev->extra_dma_map;
3183 lldev->extra_dma_map = map;
3185 /* Pre-Read/Write sync */
3186 bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3188 /* Unload DMA map of mbuf in current descriptor */
3189 bus_dmamap_unload(lldev->dma_tag_rx, map);
3191 /* Set descriptor buffer */
3192 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3193 xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3203 * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3205 * @dtrh Descriptor Handle
3206 * @rxd_priv Rx Descriptor Private Data
3207 * @lldev Per-adapter Data
3209 * Returns XGE_HAL_OK or HAL error enums
3212 xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3215 bus_addr_t dma_pointers[5];
3217 int status = XGE_HAL_OK, index;
3220 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3221 status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3222 if(status != XGE_HAL_OK) {
3223 for(newindex = 0; newindex < index; newindex++) {
3224 m_freem(rxd_priv->bufferArray[newindex]);
3226 XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3230 for(index = 0; index < lldev->buffer_mode; index++) {
3231 if(lldev->rxd_mbuf_len[index] != 0) {
3232 dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3233 dma_sizes[index] = lldev->rxd_mbuf_len[index];
3236 dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3237 dma_sizes[index] = 1;
3241 /* Assigning second buffer to third pointer in 2 buffer mode */
3242 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3243 dma_pointers[2] = dma_pointers[1];
3244 dma_sizes[2] = dma_sizes[1];
3248 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3249 xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3252 xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3261 * If the interrupt is due to Tx completion, free the sent buffer
3263 * @channelh Channel Handle
3265 * @t_code Transfer Code indicating success or error
3266 * @userdata Per-adapter Data
3268 * Returns XGE_HAL_OK or HAL error enum
3271 xge_tx_compl(xge_hal_channel_h channelh,
3272 xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3274 xge_tx_priv_t *ll_tx_priv = NULL;
3275 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3276 struct ifnet *ifnetp = lldev->ifnetp;
3277 mbuf_t m_buffer = NULL;
3278 int qindex = xge_hal_channel_id(channelh);
3280 mtx_lock(&lldev->mtx_tx[qindex]);
3282 XGE_DRV_STATS(tx_completions);
3285 * For each completed descriptor: Get private structure, free buffer,
3286 * do unmapping, and free descriptor
3289 XGE_DRV_STATS(tx_desc_compl);
3292 XGE_DRV_STATS(tx_tcode);
3293 xge_trace(XGE_TRACE, "t_code %d", t_code);
3294 xge_hal_device_handle_tcode(channelh, dtr, t_code);
3297 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3298 m_buffer = ll_tx_priv->buffer;
3299 bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3301 ll_tx_priv->buffer = NULL;
3302 xge_hal_fifo_dtr_free(channelh, dtr);
3303 } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3305 xge_send_locked(ifnetp, qindex);
3306 ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3308 mtx_unlock(&lldev->mtx_tx[qindex]);
3314 * xge_tx_initial_replenish
3315 * Initially allocate buffers and set them into descriptors for later use
3317 * @channelh Tx Channel Handle
3318 * @dtrh Descriptor Handle
3320 * @userdata Per-adapter Data
3321 * @reopen Channel open/reopen option
3323 * Returns XGE_HAL_OK or HAL error enums
3326 xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3327 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3329 xge_tx_priv_t *txd_priv = NULL;
3330 int status = XGE_HAL_OK;
3332 /* Get the user data portion from channel handle */
3333 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3335 XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3339 /* Get the private data */
3340 txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3341 if(txd_priv == NULL) {
3342 XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3346 /* Create DMA map for this descriptor */
3347 if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3348 &txd_priv->dma_map)) {
3349 XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3350 txinit_out, XGE_HAL_FAIL);
3358 * xge_rx_initial_replenish
3359 * Initially allocate buffers and set them into descriptors for later use
3361 * @channelh Tx Channel Handle
3362 * @dtrh Descriptor Handle
3364 * @userdata Per-adapter Data
3365 * @reopen Channel open/reopen option
3367 * Returns XGE_HAL_OK or HAL error enums
3370 xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3371 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3373 xge_rx_priv_t *rxd_priv = NULL;
3374 int status = XGE_HAL_OK;
3375 int index1 = 0, index2 = 0;
3377 /* Get the user data portion from channel handle */
3378 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3380 XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3384 /* Get the private data */
3385 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3386 if(rxd_priv == NULL) {
3387 XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3391 rxd_priv->bufferArray = xge_os_malloc(NULL,
3392 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3394 if(rxd_priv->bufferArray == NULL) {
3395 XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3399 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3400 /* Create DMA map for these descriptors*/
3401 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3402 &rxd_priv->dmainfo[0].dma_map)) {
3403 XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3404 rxinit_err_out, XGE_HAL_FAIL);
3406 /* Get a buffer, attach it to this descriptor */
3407 status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3410 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3411 /* Create DMA map for this descriptor */
3412 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3413 &rxd_priv->dmainfo[index1].dma_map)) {
3414 for(index2 = index1 - 1; index2 >= 0; index2--) {
3415 bus_dmamap_destroy(lldev->dma_tag_rx,
3416 rxd_priv->dmainfo[index2].dma_map);
3419 "Jumbo DMA map creation for Rx descriptor failed",
3420 rxinit_err_out, XGE_HAL_FAIL);
3423 status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3426 if(status != XGE_HAL_OK) {
3427 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3428 bus_dmamap_destroy(lldev->dma_tag_rx,
3429 rxd_priv->dmainfo[index1].dma_map);
3431 goto rxinit_err_out;
3438 xge_os_free(NULL, rxd_priv->bufferArray,
3439 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3446 * During unload terminate and free all descriptors
3448 * @channelh Rx Channel Handle
3449 * @dtrh Rx Descriptor Handle
3450 * @state Descriptor State
3451 * @userdata Per-adapter Data
3452 * @reopen Channel open/reopen option
3455 xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3456 xge_hal_dtr_state_e state, void *userdata,
3457 xge_hal_channel_reopen_e reopen)
3459 xge_rx_priv_t *rxd_priv = NULL;
3460 xge_lldev_t *lldev = NULL;
3463 /* Descriptor state is not "Posted" */
3464 if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3466 /* Get the user data portion */
3467 lldev = xge_hal_channel_userdata(channelh);
3469 /* Get the private data */
3470 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3472 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3473 if(rxd_priv->dmainfo[index].dma_map != NULL) {
3474 bus_dmamap_sync(lldev->dma_tag_rx,
3475 rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3476 bus_dmamap_unload(lldev->dma_tag_rx,
3477 rxd_priv->dmainfo[index].dma_map);
3478 if(rxd_priv->bufferArray[index] != NULL)
3479 m_free(rxd_priv->bufferArray[index]);
3480 bus_dmamap_destroy(lldev->dma_tag_rx,
3481 rxd_priv->dmainfo[index].dma_map);
3484 xge_os_free(NULL, rxd_priv->bufferArray,
3485 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3487 /* Free the descriptor */
3488 xge_hal_ring_dtr_free(channelh, dtrh);
3496 * During unload terminate and free all descriptors
3498 * @channelh Rx Channel Handle
3499 * @dtrh Rx Descriptor Handle
3500 * @state Descriptor State
3501 * @userdata Per-adapter Data
3502 * @reopen Channel open/reopen option
3505 xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3506 xge_hal_dtr_state_e state, void *userdata,
3507 xge_hal_channel_reopen_e reopen)
3509 xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3510 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3512 /* Destroy DMA map */
3513 bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3519 * FreeBSD device interface entry points
3521 static device_method_t xge_methods[] = {
3522 DEVMETHOD(device_probe, xge_probe),
3523 DEVMETHOD(device_attach, xge_attach),
3524 DEVMETHOD(device_detach, xge_detach),
3525 DEVMETHOD(device_shutdown, xge_shutdown),
3530 static driver_t xge_driver = {
3533 sizeof(xge_lldev_t),
3535 static devclass_t xge_devclass;
3536 DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);