2 * Copyright (c) 2002-2007 Neterion, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * FreeBSD specific initialization & routines
35 #include <dev/nxge/if_nxge.h>
36 #include <dev/nxge/xge-osdep.h>
37 #include <net/if_arp.h>
38 #include <sys/types.h>
40 #include <net/if_vlan_var.h>
42 int copyright_print = 0;
43 int hal_driver_init_count = 0;
44 size_t size = sizeof(int);
46 /******************************************
48 * Parameters: Device structure
49 * Return: BUS_PROBE_DEFAULT/ENXIO/ENOMEM
50 * Description: Probes for Xframe device
51 ******************************************/
53 xge_probe(device_t dev)
55 int devid = pci_get_device(dev);
56 int vendorid = pci_get_vendor(dev);
61 if(vendorid == XGE_PCI_VENDOR_ID) {
62 if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
63 (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
64 if(!copyright_print) {
68 device_set_desc_copy(dev,
69 "Neterion Xframe 10 Gigabit Ethernet Adapter");
70 retValue = BUS_PROBE_DEFAULT;
78 /******************************************
80 * Parameters: HAL device configuration
81 * structure, device pointer
83 * Description: Sets parameter values in
84 * xge_hal_device_config_t structure
85 ******************************************/
87 xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
94 #define SAVE_PARAM(to, what, value) to.what = value;
96 #define GET_PARAM(str_kenv, to, param, hardcode) { \
97 static int param##__LINE__; \
98 if(testenv(str_kenv) == 1) { \
99 getenv_int(str_kenv, ¶m##__LINE__); \
102 param##__LINE__ = hardcode; \
104 SAVE_PARAM(to, param, param##__LINE__); \
107 #define GET_PARAM_MAC(str_kenv, param, hardcode) \
108 GET_PARAM(str_kenv, ((*dconfig).mac), param, hardcode);
110 #define GET_PARAM_FIFO(str_kenv, param, hardcode) \
111 GET_PARAM(str_kenv, ((*dconfig).fifo), param, hardcode);
113 #define GET_PARAM_FIFO_QUEUE(str_kenv, param, qindex, hardcode) \
114 GET_PARAM(str_kenv, ((*dconfig).fifo.queue[qindex]), param, hardcode);
116 #define GET_PARAM_FIFO_QUEUE_TTI(str_kenv, param, qindex, tindex, hardcode) \
117 GET_PARAM(str_kenv, ((*dconfig).fifo.queue[qindex].tti[tindex]), \
120 #define GET_PARAM_RING(str_kenv, param, hardcode) \
121 GET_PARAM(str_kenv, ((*dconfig).ring), param, hardcode);
123 #define GET_PARAM_RING_QUEUE(str_kenv, param, qindex, hardcode) \
124 GET_PARAM(str_kenv, ((*dconfig).ring.queue[qindex]), param, hardcode);
126 #define GET_PARAM_RING_QUEUE_RTI(str_kenv, param, qindex, hardcode) \
127 GET_PARAM(str_kenv, ((*dconfig).ring.queue[qindex].rti), param, \
130 dconfig->mtu = XGE_DEFAULT_INITIAL_MTU;
131 dconfig->pci_freq_mherz = XGE_DEFAULT_USER_HARDCODED;
132 dconfig->device_poll_millis = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
133 dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
134 dconfig->mac.rmac_bcast_en = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
135 dconfig->fifo.alignment_size = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
137 GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
138 XGE_DEFAULT_LATENCY_TIMER);
139 GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
140 XGE_DEFAULT_MAX_SPLITS_TRANS);
141 GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
142 XGE_DEFAULT_MMRB_COUNT);
143 GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
144 XGE_DEFAULT_SHARED_SPLITS);
145 GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
146 XGE_DEFAULT_ISR_POLLING_CNT);
147 GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
148 stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
150 GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
151 XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
152 GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
153 XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
154 GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
155 XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
156 GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
157 XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
158 GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
159 XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
160 GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
161 mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
162 GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
163 mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
165 GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
166 XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
167 GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
168 XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
169 GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
170 XGE_DEFAULT_FIFO_MAX_FRAGS);
172 GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, 0,
173 XGE_DEFAULT_FIFO_QUEUE_INTR);
174 GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, 0,
175 XGE_DEFAULT_FIFO_QUEUE_MAX);
176 GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial, 0,
177 XGE_DEFAULT_FIFO_QUEUE_INITIAL);
179 for (index = 0; index < XGE_HAL_MAX_FIFO_TTI_NUM; index++) {
180 dconfig->fifo.queue[0].tti[index].enabled = 1;
181 dconfig->fifo.queue[0].configured = 1;
183 GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
184 urange_a, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
185 GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
186 urange_b, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
187 GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
188 urange_c, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
189 GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
190 ufc_a, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
191 GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
192 ufc_b, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
193 GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
194 ufc_c, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
195 GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
196 ufc_d, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
197 GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_timer_ci_en",
198 timer_ci_en, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
199 GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_timer_ac_en",
200 timer_ac_en, 0, index, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
201 GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_timer_val_us",
202 timer_val_us, 0, index,
203 XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
206 GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
207 XGE_DEFAULT_RING_MEMBLOCK_SIZE);
209 GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
210 XGE_DEFAULT_RING_STRIP_VLAN_TAG);
212 for (index = 0; index < XGE_HAL_MIN_RING_NUM; index++) {
213 dconfig->ring.queue[index].max_frm_len = XGE_HAL_RING_USE_MTU;
214 dconfig->ring.queue[index].priority = 0;
215 dconfig->ring.queue[index].configured = 1;
216 dconfig->ring.queue[index].buffer_mode =
217 XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
219 GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, index,
220 XGE_DEFAULT_RING_QUEUE_MAX);
221 GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial, index,
222 XGE_DEFAULT_RING_QUEUE_INITIAL);
223 GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb", dram_size_mb,
224 index, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
225 GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
226 indicate_max_pkts, index,
227 XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
228 GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
229 backoff_interval_us, index,
230 XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
232 GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
233 index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
234 GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
235 index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
236 GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
237 index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
238 GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
239 index, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
240 GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
241 timer_ac_en, index, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
242 GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
243 timer_val_us, index, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
244 GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a", urange_a,
245 index, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
246 GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b", urange_b,
247 index, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
248 GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c", urange_c,
249 index, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
252 if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
253 xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags);
254 xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
255 (int)(PAGE_SIZE / 32));
256 xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32));
257 dconfig->fifo.max_frags = (PAGE_SIZE / 32);
260 checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
261 if(checkdev != NULL) {
262 /* Check Revision for 0x12 */
263 revision = pci_read_config(checkdev,
264 xge_offsetof(xge_hal_pci_config_t, revision), 1);
265 if(revision <= 0x12) {
266 /* Set mmrb_count to 1k and max splits = 2 */
267 dconfig->mmrb_count = 1;
268 dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
272 #ifdef XGE_FEATURE_LRO
273 /* updating the LRO frame's sg size and frame len size. */
274 dconfig->lro_sg_size = 20;
275 dconfig->lro_frm_len = 65536;
281 /******************************************
282 * xge_driver_initialize
285 * Description: Defines HAL-ULD callbacks
286 * and initializes the HAL driver
287 ******************************************/
289 xge_driver_initialize(void)
291 xge_hal_uld_cbs_t uld_callbacks;
292 xge_hal_driver_config_t driver_config;
293 xge_hal_status_e status = XGE_HAL_OK;
297 /* Initialize HAL driver */
298 if(!hal_driver_init_count) {
299 xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
302 * Initial and maximum size of the queue used to store the events
303 * like Link up/down (xge_hal_event_e)
305 driver_config.queue_size_initial = 1;
306 driver_config.queue_size_max = 4;
308 uld_callbacks.link_up = xgell_callback_link_up;
309 uld_callbacks.link_down = xgell_callback_link_down;
310 uld_callbacks.crit_err = xgell_callback_crit_err;
311 uld_callbacks.event = xgell_callback_event;
313 status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
314 if(status != XGE_HAL_OK) {
315 xge_os_printf("xgeX: Initialization failed (Status: %d)",
320 hal_driver_init_count = hal_driver_init_count + 1;
322 xge_hal_driver_debug_module_mask_set(0xffffffff);
323 xge_hal_driver_debug_level_set(XGE_TRACE);
330 /******************************************
331 * Function: xge_media_init
332 * Parameters: Device pointer
334 * Description: Initializes, adds and sets
336 ******************************************/
338 xge_media_init(device_t devc)
340 xgelldev_t *lldev = (xgelldev_t *)device_get_softc(devc);
344 /* Initialize Media */
345 ifmedia_init(&lldev->xge_media, IFM_IMASK, xge_ifmedia_change,
348 /* Add supported media */
349 ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
351 ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_1000_SX, 0, NULL);
352 ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_AUTO, 0, NULL);
353 ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_10G_SR, 0, NULL);
354 ifmedia_add(&lldev->xge_media, IFM_ETHER | IFM_10G_LR, 0, NULL);
357 ifmedia_set(&lldev->xge_media, IFM_ETHER | IFM_AUTO);
364 * Save PCI configuration space
365 * @dev Device structure
368 xge_pci_space_save(device_t dev)
372 struct pci_devinfo *dinfo = NULL;
374 dinfo = device_get_ivars(dev);
375 xge_trace(XGE_TRACE, "Saving PCI configuration space");
376 pci_cfg_save(dev, dinfo, 0);
382 * xge_pci_space_restore
383 * Restore saved PCI configuration space
384 * @dev Device structure
387 xge_pci_space_restore(device_t dev)
391 struct pci_devinfo *dinfo = NULL;
393 dinfo = device_get_ivars(dev);
394 xge_trace(XGE_TRACE, "Restoring PCI configuration space");
395 pci_cfg_restore(dev, dinfo);
400 /******************************************
402 * Parameters: Per adapter xgelldev_t
405 * Description: Connects the driver to the
406 * system if the probe routine returned success
407 ******************************************/
409 xge_attach(device_t dev)
411 xge_hal_device_config_t *device_config;
412 xge_hal_ring_config_t *pRingConfig;
413 xge_hal_device_attr_t attr;
415 xge_hal_device_t *hldev;
416 pci_info_t *pci_info;
417 struct ifnet *ifnetp;
427 int buffer_index, buffer_length, index;
431 device_config = xge_malloc(sizeof(xge_hal_device_config_t));
433 xge_ctrace(XGE_ERR, "Malloc of device config failed");
435 goto attach_out_config;
438 lldev = (xgelldev_t *) device_get_softc(dev);
440 xge_ctrace(XGE_ERR, "Adapter softc structure allocation failed");
446 /* Initialize mutex */
447 if(mtx_initialized(&lldev->xge_lock) == 0) {
448 mtx_init((&lldev->xge_lock), "xge", MTX_NETWORK_LOCK, MTX_DEF);
451 error = xge_driver_initialize();
452 if(error != XGE_HAL_OK) {
453 xge_ctrace(XGE_ERR, "Initializing driver failed");
454 freeResources(dev, 1);
460 hldev = (xge_hal_device_t *)xge_malloc(sizeof(xge_hal_device_t));
462 xge_trace(XGE_ERR, "Allocating memory for xge_hal_device_t failed");
463 freeResources(dev, 2);
469 /* Our private structure */
470 pci_info = (pci_info_t*) xge_malloc(sizeof(pci_info_t));
472 xge_trace(XGE_ERR, "Allocating memory for pci_info_t failed");
473 freeResources(dev, 3);
477 lldev->pdev = pci_info;
478 pci_info->device = dev;
481 pci_enable_busmaster(dev);
483 /* Get virtual address for BAR0 */
485 pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
487 if(pci_info->regmap0 == NULL) {
488 xge_trace(XGE_ERR, "NULL handler for BAR0");
489 freeResources(dev, 4);
493 attr.bar0 = (char *)pci_info->regmap0;
495 pci_info->bar0resource =
496 (busresource_t*) xge_malloc(sizeof(busresource_t));
497 if(pci_info->bar0resource == NULL) {
498 xge_trace(XGE_ERR, "Allocating memory for bar0resources failed");
499 freeResources(dev, 5);
503 ((struct busresources *)(pci_info->bar0resource))->bus_tag =
504 rman_get_bustag(pci_info->regmap0);
505 ((struct busresources *)(pci_info->bar0resource))->bus_handle =
506 rman_get_bushandle(pci_info->regmap0);
507 ((struct busresources *)(pci_info->bar0resource))->bar_start_addr =
510 /* Get virtual address for BAR1 */
512 pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
514 if(pci_info->regmap1 == NULL) {
515 xge_trace(XGE_ERR, "NULL handler for BAR1");
516 freeResources(dev, 6);
520 attr.bar1 = (char *)pci_info->regmap1;
522 pci_info->bar1resource =
523 (busresource_t*) xge_malloc(sizeof(busresource_t));
524 if(pci_info->bar1resource == NULL) {
525 xge_trace(XGE_ERR, "Allocating memory for bar0resources failed");
526 freeResources(dev, 7);
530 ((struct busresources *)(pci_info->bar1resource))->bus_tag =
531 rman_get_bustag(pci_info->regmap1);
532 ((struct busresources *)(pci_info->bar1resource))->bus_handle =
533 rman_get_bushandle(pci_info->regmap1);
534 ((struct busresources *)(pci_info->bar1resource))->bar_start_addr =
537 /* Save PCI config space */
538 xge_pci_space_save(dev);
540 attr.regh0 = (busresource_t *) pci_info->bar0resource;
541 attr.regh1 = (busresource_t *) pci_info->bar1resource;
542 attr.irqh = lldev->irqhandle;
543 attr.cfgh = pci_info;
544 attr.pdev = pci_info;
546 /* Initialize device configuration parameters */
547 xge_init_params(device_config, dev);
549 /* Initialize HAL device */
550 error = xge_hal_device_initialize(hldev, &attr, device_config);
551 if(error != XGE_HAL_OK) {
553 case XGE_HAL_ERR_DRIVER_NOT_INITIALIZED:
554 xge_trace(XGE_ERR, "XGE_HAL_ERR_DRIVER_NOT_INITIALIZED");
557 case XGE_HAL_ERR_OUT_OF_MEMORY:
558 xge_trace(XGE_ERR, "XGE_HAL_ERR_OUT_OF_MEMORY");
561 case XGE_HAL_ERR_BAD_SUBSYSTEM_ID:
562 xge_trace(XGE_ERR, "XGE_HAL_ERR_BAD_SUBSYSTEM_ID");
565 case XGE_HAL_ERR_INVALID_MAC_ADDRESS:
566 xge_trace(XGE_ERR, "XGE_HAL_ERR_INVALID_MAC_ADDRESS");
569 case XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING:
570 xge_trace(XGE_ERR, "XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING");
573 case XGE_HAL_ERR_SWAPPER_CTRL:
574 xge_trace(XGE_ERR, "XGE_HAL_ERR_SWAPPER_CTRL");
577 case XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT:
578 xge_trace(XGE_ERR, "XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT");
581 xge_trace(XGE_ERR, "Initializing HAL device failed (error: %d)\n",
583 freeResources(dev, 8);
588 desc = (char *) malloc(100, M_DEVBUF, M_NOWAIT);
593 sprintf(desc, "%s (Rev %d) Driver v%s \n%s: Serial Number: %s ",
594 hldev->vpd_data.product_name, hldev->revision, DRIVER_VERSION,
595 device_get_nameunit(dev), hldev->vpd_data.serial_num);
596 printf("%s: Xframe%s %s\n", device_get_nameunit(dev),
597 ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I": "II"),
599 free(desc, M_DEVBUF);
603 if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
604 error = xge_hal_mgmt_reg_read(hldev, 0,
605 xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
606 if(error != XGE_HAL_OK) {
607 xge_trace(XGE_ERR, "Error for getting bus speed");
609 mesg = (char *) xge_malloc(20);
611 freeResources(dev, 8);
616 sprintf(mesg, "%s: Device is on %s bit", device_get_nameunit(dev),
617 (val64 & BIT(8)) ? "32":"64");
619 mode = (u8)((val64 & vBIT(0xF, 0, 4)) >> 60);
621 case 0x00: xge_os_printf("%s PCI 33MHz bus", mesg); break;
622 case 0x01: xge_os_printf("%s PCI 66MHz bus", mesg); break;
623 case 0x02: xge_os_printf("%s PCIX(M1) 66MHz bus", mesg); break;
624 case 0x03: xge_os_printf("%s PCIX(M1) 100MHz bus", mesg); break;
625 case 0x04: xge_os_printf("%s PCIX(M1) 133MHz bus", mesg); break;
626 case 0x05: xge_os_printf("%s PCIX(M2) 133MHz bus", mesg); break;
627 case 0x06: xge_os_printf("%s PCIX(M2) 200MHz bus", mesg); break;
628 case 0x07: xge_os_printf("%s PCIX(M2) 266MHz bus", mesg); break;
630 free(mesg, M_DEVBUF);
633 xge_hal_device_private_set(hldev, lldev);
635 error = xge_interface_setup(dev);
641 ifnetp = lldev->ifnetp;
642 ifnetp->if_mtu = device_config->mtu;
648 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
649 RF_SHAREABLE | RF_ACTIVE);
650 if(lldev->irq == NULL) {
651 xge_trace(XGE_ERR, "NULL handler for IRQ");
652 freeResources(dev, 10);
657 /* Associate interrupt handler with the device */
658 error = bus_setup_intr(dev, lldev->irq, INTR_TYPE_NET | INTR_MPSAFE,
659 #if __FreeBSD_version > 700030
662 (void *)xge_intr, lldev, &lldev->irqhandle);
665 "Associating interrupt handler with device failed");
666 freeResources(dev, 11);
671 /* Create DMA tags */
672 error = bus_dma_tag_create(
673 bus_get_dma_tag(dev), /* Parent */
674 PAGE_SIZE, /* Alignment */
676 BUS_SPACE_MAXADDR, /* Low Address */
677 BUS_SPACE_MAXADDR, /* High Address */
678 NULL, /* Filter Function */
679 NULL, /* Filter Function Arguments */
680 MCLBYTES * MAX_SEGS, /* Maximum Size */
681 MAX_SEGS, /* Number of Segments */
682 MCLBYTES, /* Maximum Segment Size */
683 BUS_DMA_ALLOCNOW, /* Flags */
684 NULL, /* Lock Function */
685 NULL, /* Lock Function Arguments */
686 (&lldev->dma_tag_tx)); /* DMA Tag */
688 xge_trace(XGE_ERR, "Tx DMA tag creation failed");
689 freeResources(dev, 12);
694 error = bus_dma_tag_create(
695 bus_get_dma_tag(dev), /* Parent */
696 PAGE_SIZE, /* Alignment */
698 BUS_SPACE_MAXADDR, /* Low Address */
699 BUS_SPACE_MAXADDR, /* High Address */
700 NULL, /* Filter Function */
701 NULL, /* Filter Function Arguments */
702 MJUMPAGESIZE, /* Maximum Size */
703 1, /* Number of Segments */
704 MJUMPAGESIZE, /* Maximum Segment Size */
705 BUS_DMA_ALLOCNOW, /* Flags */
706 NULL, /* Lock Function */
707 NULL, /* Lock Function Arguments */
708 (&lldev->dma_tag_rx)); /* DMA Tag */
711 xge_trace(XGE_ERR, "Rx DMA tag creation failed");
712 freeResources(dev, 13);
717 /*Updating lldev->buffer_mode parameter*/
718 pRingConfig = &(hldev->config.ring);
720 if((device_config->mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) <= PAGE_SIZE) {
721 #if defined(XGE_FEATURE_BUFFER_MODE_3)
722 xge_os_printf("%s: 3 Buffer Mode Enabled",
723 device_get_nameunit(dev));
724 for(index = 0; index < XGE_RING_COUNT; index++) {
725 pRingConfig->queue[index].buffer_mode =
726 XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
728 pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
729 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
730 lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
731 lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
732 lldev->rxd_mbuf_len[2] = device_config->mtu;
733 lldev->rxd_mbuf_cnt = 3;
735 #if defined(XGE_FEATURE_BUFFER_MODE_2)
736 xge_os_printf("%s: 2 Buffer Mode Enabled",
737 device_get_nameunit(dev));
738 for(index = 0; index < XGE_RING_COUNT; index++) {
739 pRingConfig->queue[index].buffer_mode =
740 XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
742 pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
743 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_2;
744 lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
745 lldev->rxd_mbuf_len[1] = device_config->mtu;
746 lldev->rxd_mbuf_cnt = 2;
748 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
749 lldev->rxd_mbuf_len[0] = device_config->mtu;
750 lldev->rxd_mbuf_cnt = 1;
755 xge_os_printf("%s: 5 Buffer Mode Enabled",
756 device_get_nameunit(dev));
757 xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
758 for(index = 0; index < XGE_RING_COUNT; index++) {
759 pRingConfig->queue[index].buffer_mode =
760 XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
762 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
763 buffer_length = device_config->mtu;
765 lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
766 lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
768 while(buffer_length > PAGE_SIZE) {
769 buffer_length -= PAGE_SIZE;
770 lldev->rxd_mbuf_len[buffer_index] = PAGE_SIZE;
774 BUFALIGN(buffer_length);
776 lldev->rxd_mbuf_len[buffer_index] = buffer_length;
777 lldev->rxd_mbuf_cnt = buffer_index;
780 #ifdef XGE_FEATURE_LRO
781 xge_os_printf("%s: LRO (Large Receive Offload) Enabled",
782 device_get_nameunit(dev));
785 #ifdef XGE_FEATURE_TSO
786 xge_os_printf("%s: TSO (TCP Segmentation Offload) enabled",
787 device_get_nameunit(dev));
791 free(device_config, M_DEVBUF);
797 /******************************************
799 * Parameters: Device structure, error (used
802 * Description: Frees allocated resources
803 ******************************************/
805 freeResources(device_t dev, int error)
808 pci_info_t *pci_info;
809 xge_hal_device_t *hldev;
815 lldev = (xgelldev_t *) device_get_softc(dev);
816 pci_info = lldev->pdev;
823 status = bus_dma_tag_destroy(lldev->dma_tag_rx);
825 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
829 status = bus_dma_tag_destroy(lldev->dma_tag_tx);
831 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
835 /* Teardown interrupt handler - device association */
836 bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
840 bus_release_resource(dev, SYS_RES_IRQ, 0, lldev->irq);
844 ifmedia_removeall(&lldev->xge_media);
847 ether_ifdetach(lldev->ifnetp);
848 if_free(lldev->ifnetp);
850 xge_hal_device_private_set(hldev, NULL);
851 xge_hal_device_disable(hldev);
855 xge_hal_device_terminate(hldev);
858 /* Restore PCI configuration space */
859 xge_pci_space_restore(dev);
861 /* Free bar1resource */
862 free(pci_info->bar1resource, M_DEVBUF);
867 bus_release_resource(dev, SYS_RES_MEMORY, rid,
871 /* Free bar0resource */
872 free(pci_info->bar0resource, M_DEVBUF);
877 bus_release_resource(dev, SYS_RES_MEMORY, rid,
881 /* Disable Bus Master */
882 pci_disable_busmaster(dev);
884 /* Free pci_info_t */
886 free(pci_info, M_DEVBUF);
889 /* Free device configuration struct and HAL device */
890 free(hldev, M_DEVBUF);
893 /* Terminate HAL driver */
894 hal_driver_init_count = hal_driver_init_count - 1;
895 if(!hal_driver_init_count) {
896 xge_hal_driver_terminate();
900 if(mtx_initialized(&lldev->xge_lock) != 0) {
901 mtx_destroy(&lldev->xge_lock);
908 /******************************************
910 * Parameters: Device structure
912 * Description: Detaches the driver from the
914 ******************************************/
916 xge_detach(device_t dev)
918 xgelldev_t *lldev = (xgelldev_t *)device_get_softc(dev);
922 mtx_lock(&lldev->xge_lock);
923 lldev->in_detach = 1;
925 mtx_unlock(&lldev->xge_lock);
927 freeResources(dev, 0);
934 /******************************************
936 * Parameters: Per adapter xgelldev_t
939 * Description: Gets called when the system
940 * is about to be shutdown.
941 ******************************************/
943 xge_shutdown(device_t dev)
945 xgelldev_t *lldev = (xgelldev_t *) device_get_softc(dev);
948 mtx_lock(&lldev->xge_lock);
950 mtx_unlock(&lldev->xge_lock);
955 /******************************************
956 * Function: xge_interface_setup
957 * Parameters: Device pointer
958 * Return: 0/ENXIO/ENOMEM
959 * Description: Sets up the interface
960 * through ifnet pointer
961 ******************************************/
963 xge_interface_setup(device_t dev)
965 u8 mcaddr[ETHER_ADDR_LEN];
966 xge_hal_status_e status_code;
967 xgelldev_t *lldev = (xgelldev_t *)device_get_softc(dev);
968 struct ifnet *ifnetp;
969 xge_hal_device_t *hldev = lldev->devh;
974 /* Get the MAC address of the device */
975 status_code = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
976 if(status_code != XGE_HAL_OK) {
977 switch(status_code) {
978 case XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING:
980 "Failed to retrieve MAC address (timeout)");
983 case XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES:
984 xge_trace(XGE_ERR, "Invalid MAC address index");
988 xge_trace(XGE_TRACE, "Default Case");
991 freeResources(dev, 9);
996 /* Get interface ifnet structure for this Ether device */
997 ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
999 xge_trace(XGE_ERR, "Allocating/getting ifnet structure failed");
1000 freeResources(dev, 9);
1005 /* Initialize interface ifnet structure */
1006 if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1007 ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU;
1010 * TODO: Can't set more than 2Gbps. -- Higher value results in overflow.
1011 * But there is no effect in performance even if you set this to 10 Mbps
1013 ifnetp->if_baudrate = IF_Gbps(2);
1014 ifnetp->if_init = xge_init;
1015 ifnetp->if_softc = lldev;
1016 ifnetp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1017 ifnetp->if_ioctl = xge_ioctl;
1018 ifnetp->if_start = xge_send;
1020 /* TODO: Check and assign optimal value */
1021 ifnetp->if_snd.ifq_maxlen = IFQ_MAXLEN;
1023 ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1026 ifnetp->if_capenable = ifnetp->if_capabilities;
1028 #ifdef XGE_FEATURE_TSO
1029 ifnetp->if_capabilities |= IFCAP_TSO4;
1030 ifnetp->if_capenable |= IFCAP_TSO4;
1033 /* Attach the interface */
1034 ether_ifattach(ifnetp, mcaddr);
1042 /******************************************
1043 * xgell_callback_link_up
1044 * Parameters: Per adapter xgelldev_t
1045 * structure pointer as void *
1047 * Description: Called by HAL to notify
1048 * hardware link up state change
1049 ******************************************/
1051 xgell_callback_link_up(void *userdata)
1053 xgelldev_t *lldev = (xgelldev_t *)userdata;
1054 struct ifnet *ifnetp = lldev->ifnetp;
1058 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1059 if_link_state_change(ifnetp, LINK_STATE_UP);
1064 /******************************************
1065 * xgell_callback_link_down
1066 * Parameters: Per adapter xgelldev_t
1067 * structure pointer as void *
1069 * Description: Called by HAL to notify
1070 * hardware link up state change
1071 ******************************************/
1073 xgell_callback_link_down(void *userdata)
1075 xgelldev_t *lldev = (xgelldev_t *)userdata;
1076 struct ifnet *ifnetp = lldev->ifnetp;
1080 ifnetp->if_flags |= IFF_DRV_OACTIVE;
1081 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1086 /******************************************
1087 * xgell_callback_crit_err
1088 * Parameters: Per adapter xgelldev_t
1089 * structure pointer as void *, event,
1092 * Description: Called by HAL on serious
1094 ******************************************/
1096 xgell_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1100 xge_trace(XGE_ERR, "Critical Error");
1101 xgell_reset(userdata);
1106 /******************************************
1107 * xgell_callback_event
1108 * Parameters: Queue item
1110 * Description: Called by HAL in case of
1111 * some unknown to HAL events.
1112 ******************************************/
1114 xgell_callback_event(xge_queue_item_t *item)
1116 xgelldev_t *lldev = NULL;
1117 xge_hal_device_t *hldev = NULL;
1118 struct ifnet *ifnetp = NULL;
1122 hldev = item->context;
1123 lldev = xge_hal_device_private(hldev);
1124 ifnetp = lldev->ifnetp;
1126 if(item->event_type == XGE_LL_EVENT_TRY_XMIT_AGAIN) {
1127 if(lldev->initialized) {
1128 if(xge_hal_channel_dtr_count(lldev->fifo_channel_0) > 0) {
1129 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1133 xge_queue_produce_context(
1134 xge_hal_device_queue(lldev->devh),
1135 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1139 else if(item->event_type == XGE_LL_EVENT_DEVICE_RESETTING) {
1140 xgell_reset(item->context);
1146 /******************************************
1147 * Function: xge_ifmedia_change
1148 * Parameters: Pointer to ifnet structure
1149 * Return: 0 for success, EINVAL if media
1150 * type is not IFM_ETHER.
1151 * Description: Media change driver callback
1152 ******************************************/
1154 xge_ifmedia_change(struct ifnet *ifnetp)
1156 xgelldev_t *lldev = ifnetp->if_softc;
1157 struct ifmedia *ifmediap = &lldev->xge_media;
1162 return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ? EINVAL:0;
1165 /******************************************
1166 * Function: xge_ifmedia_status
1167 * Parameters: Pointer to ifnet structure
1168 * ifmediareq structure pointer
1169 * through which status of media
1172 * Description: Media status driver callback
1173 ******************************************/
1175 xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1177 xge_hal_status_e status;
1179 xgelldev_t *lldev = ifnetp->if_softc;
1180 xge_hal_device_t *hldev = lldev->devh;
1184 ifmr->ifm_status = IFM_AVALID;
1185 ifmr->ifm_active = IFM_ETHER;
1187 status = xge_hal_mgmt_reg_read(hldev, 0,
1188 xge_offsetof(xge_hal_pci_bar0_t, adapter_status), ®value);
1189 if(status != XGE_HAL_OK) {
1190 xge_trace(XGE_ERR, "Getting adapter status failed");
1194 if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1195 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1196 ifmr->ifm_status |= IFM_ACTIVE;
1197 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1198 if_link_state_change(ifnetp, LINK_STATE_UP);
1201 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1207 /******************************************
1208 * Function: xge_ioctl
1209 * Parameters: Pointer to ifnet structure,
1210 * command -> indicates requests,
1211 * data -> passed values (if any)
1213 * Description: IOCTL entry point. Called
1214 * when the user wants to
1215 * configure the interface
1216 ******************************************/
1218 xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1220 struct ifmedia *ifmediap;
1221 xge_hal_stats_hw_info_t *hw_stats;
1222 xge_hal_pci_config_t *pci_conf;
1223 xge_hal_device_config_t *device_conf;
1224 xge_hal_stats_sw_err_t *tcode;
1225 xge_hal_stats_device_info_t *intr;
1227 xge_hal_status_e status_code;
1228 xge_hal_device_t *hldev;
1234 int retValue = 0, index = 0, buffer_mode = 0;
1235 struct ifreq *ifreqp = (struct ifreq *) data;
1236 xgelldev_t *lldev = ifnetp->if_softc;
1238 ifmediap = &lldev->xge_media;
1239 hldev = lldev->devh;
1241 if(lldev->in_detach) {
1246 /* Set/Get ifnet address */
1249 ether_ioctl(ifnetp, command, data);
1254 retValue = changeMtu(lldev, ifreqp->ifr_mtu);
1257 /* Set ifnet flags */
1259 mtx_lock(&lldev->xge_lock);
1260 if(ifnetp->if_flags & IFF_UP) {
1261 /* Link status is UP */
1262 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1263 xge_init_locked(lldev);
1265 xge_disable_promisc(lldev);
1266 xge_enable_promisc(lldev);
1269 /* Link status is DOWN */
1270 /* If device is in running, make it down */
1271 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1275 mtx_unlock(&lldev->xge_lock);
1278 /* Add/delete multicast address */
1281 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1282 xge_setmulti(lldev);
1286 /* Set/Get net media */
1289 retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1292 /* Set capabilities */
1294 mtx_lock(&lldev->xge_lock);
1296 mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1297 #if defined(__FreeBSD_version) && (__FreeBSD_version >= 700026)
1298 if(mask & IFCAP_TSO4) {
1299 if(ifnetp->if_capenable & IFCAP_TSO4) {
1300 ifnetp->if_capenable &= ~IFCAP_TSO4;
1301 ifnetp->if_hwassist &= ~CSUM_TSO;
1304 /*enable tso only if txcsum is enabled*/
1305 if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1306 ifnetp->if_capenable |= IFCAP_TSO4;
1307 ifnetp->if_hwassist |= CSUM_TSO;
1311 mtx_unlock(&lldev->xge_lock);
1315 * Used to get Statistics & PCI configuration through application */
1316 case SIOCGPRIVATE_0:
1317 pAccess = (char*) ifreqp->ifr_data;
1318 if(*pAccess == XGE_QUERY_STATS) {
1319 mtx_lock(&lldev->xge_lock);
1320 status_code = xge_hal_stats_hw(hldev, &hw_stats);
1321 if(status_code != XGE_HAL_OK) {
1323 "Getting statistics failed (Status: %d)",
1325 mtx_unlock(&lldev->xge_lock);
1328 copyout(hw_stats, ifreqp->ifr_data,
1329 sizeof(xge_hal_stats_hw_info_t));
1330 mtx_unlock(&lldev->xge_lock);
1332 else if(*pAccess == XGE_QUERY_PCICONF) {
1333 pci_conf = xge_malloc(sizeof(xge_hal_pci_config_t));
1334 if(pci_conf == NULL) {
1337 mtx_lock(&lldev->xge_lock);
1338 status_code = xge_hal_mgmt_pci_config(hldev, pci_conf,
1339 sizeof(xge_hal_pci_config_t));
1340 if(status_code != XGE_HAL_OK) {
1342 "Getting PCIconfiguration failed (Status: %d)",
1344 mtx_unlock(&lldev->xge_lock);
1347 copyout(pci_conf, ifreqp->ifr_data,
1348 sizeof(xge_hal_pci_config_t));
1349 mtx_unlock(&lldev->xge_lock);
1350 free(pci_conf, M_DEVBUF);
1352 else if(*pAccess ==XGE_QUERY_INTRSTATS) {
1353 intr = xge_malloc(sizeof(xge_hal_stats_device_info_t));
1357 mtx_lock(&lldev->xge_lock);
1358 status_code =xge_hal_mgmt_device_stats(hldev, intr,
1359 sizeof(xge_hal_stats_device_info_t));
1360 if(status_code != XGE_HAL_OK) {
1362 "Getting intr statistics failed (Status: %d)",
1364 mtx_unlock(&lldev->xge_lock);
1367 copyout(intr, ifreqp->ifr_data,
1368 sizeof(xge_hal_stats_device_info_t));
1369 mtx_unlock(&lldev->xge_lock);
1370 free(intr, M_DEVBUF);
1372 else if(*pAccess == XGE_QUERY_TCODE) {
1373 tcode = xge_malloc(sizeof(xge_hal_stats_sw_err_t));
1377 mtx_lock(&lldev->xge_lock);
1378 status_code =xge_hal_mgmt_sw_stats(hldev, tcode,
1379 sizeof(xge_hal_stats_sw_err_t));
1380 if(status_code != XGE_HAL_OK) {
1382 "Getting tcode statistics failed (Status: %d)",
1384 mtx_unlock(&lldev->xge_lock);
1387 copyout(tcode, ifreqp->ifr_data,
1388 sizeof(xge_hal_stats_sw_err_t));
1389 mtx_unlock(&lldev->xge_lock);
1390 free(tcode, M_DEVBUF);
1392 else if(*pAccess ==XGE_READ_VERSION) {
1393 version = xge_malloc(BUFFER_SIZE);
1394 if(version == NULL) {
1397 mtx_lock(&lldev->xge_lock);
1398 strcpy(version,DRIVER_VERSION);
1399 copyout(version, ifreqp->ifr_data, BUFFER_SIZE);
1400 mtx_unlock(&lldev->xge_lock);
1401 free(version, M_DEVBUF);
1403 else if(*pAccess == XGE_QUERY_DEVCONF) {
1404 device_conf = xge_malloc(sizeof(xge_hal_device_config_t));
1405 if(device_conf == NULL) {
1408 mtx_lock(&lldev->xge_lock);
1409 status_code = xge_hal_mgmt_device_config(hldev, device_conf,
1410 sizeof(xge_hal_device_config_t));
1411 if(status_code != XGE_HAL_OK) {
1413 "Getting devconfig failed (Status: %d)",
1415 mtx_unlock(&lldev->xge_lock);
1418 if(copyout(device_conf, ifreqp->ifr_data,
1419 sizeof(xge_hal_device_config_t)) != 0) {
1420 xge_trace(XGE_ERR, "Device configuration copyout erro");
1422 mtx_unlock(&lldev->xge_lock);
1423 free(device_conf, M_DEVBUF);
1425 else if(*pAccess == XGE_QUERY_BUFFER_MODE) {
1426 buffer_mode = lldev->buffer_mode;
1427 if(copyout(&buffer_mode, ifreqp->ifr_data,
1428 sizeof(int)) != 0) {
1429 xge_trace(XGE_ERR, "Error with copyout of buffermode");
1433 else if((*pAccess == XGE_SET_BUFFER_MODE_1) ||
1434 (*pAccess == XGE_SET_BUFFER_MODE_2) ||
1435 (*pAccess == XGE_SET_BUFFER_MODE_3) ||
1436 (*pAccess == XGE_SET_BUFFER_MODE_5)) {
1438 case XGE_SET_BUFFER_MODE_1: *pAccess = 'Y'; break;
1439 case XGE_SET_BUFFER_MODE_2:
1440 case XGE_SET_BUFFER_MODE_3:
1441 case XGE_SET_BUFFER_MODE_5: *pAccess = 'N'; break;
1443 if(copyout(pAccess, ifreqp->ifr_data,
1444 sizeof(pAccess)) != 0) {
1446 "Copyout of chgbufmode result failed");
1450 xge_trace(XGE_TRACE, "Nothing is matching");
1456 * Used to get BAR0 register values through application program
1458 case SIOCGPRIVATE_1:
1459 reg = (bar0reg_t *) ifreqp->ifr_data;
1460 if(strcmp(reg->option,"-r") == 0) {
1461 offset = reg->offset;
1463 mtx_lock(&lldev->xge_lock);
1464 status_code = xge_hal_mgmt_reg_read(hldev, 0, offset,
1466 if(status_code == XGE_HAL_OK) {
1470 xge_trace(XGE_ERR, "Getting register value failed");
1471 mtx_unlock(&lldev->xge_lock);
1475 copyout(reg, ifreqp->ifr_data, sizeof(bar0reg_t));
1476 mtx_unlock(&lldev->xge_lock);
1478 else if(strcmp(reg->option,"-w") == 0) {
1479 offset = reg->offset;
1481 mtx_lock(&lldev->xge_lock);
1482 status_code = xge_hal_mgmt_reg_write(hldev, 0, offset,
1484 if(status_code != XGE_HAL_OK) {
1485 xge_trace(XGE_ERR, "Getting register value failed");
1486 mtx_unlock(&lldev->xge_lock);
1491 status_code = xge_hal_mgmt_reg_read(hldev, 0, offset,
1493 if(status_code != XGE_HAL_OK) {
1494 xge_trace(XGE_ERR, "Getting register value failed");
1495 mtx_unlock(&lldev->xge_lock);
1499 if(reg->value != value) {
1500 mtx_unlock(&lldev->xge_lock);
1504 mtx_unlock(&lldev->xge_lock);
1510 regInfo = (void *)ifreqp->ifr_data;
1512 mtx_lock(&lldev->xge_lock);
1513 for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1514 index++, offset += 0x0008) {
1515 status_code = xge_hal_mgmt_reg_read(hldev, 0, offset,
1517 if(status_code == XGE_HAL_OK) {
1518 *( ( u64 *)( ( u64 * )regInfo + index ) ) = value;
1521 xge_trace(XGE_ERR, "Getting register value failed");
1522 mtx_unlock(&lldev->xge_lock);
1528 copyout(regInfo, ifreqp->ifr_data,
1529 sizeof(xge_hal_pci_bar0_t));
1530 mtx_unlock(&lldev->xge_lock);
1541 /******************************************
1542 * Function: xge_init
1543 * Parameters: Pointer to per-device
1544 * xgelldev_t structure as void*.
1546 * Description: Init entry point.
1547 ******************************************/
1549 xge_init(void *plldev)
1553 xgelldev_t *lldev = (xgelldev_t *)plldev;
1555 mtx_lock(&lldev->xge_lock);
1556 xge_init_locked(lldev);
1557 mtx_unlock(&lldev->xge_lock);
1563 xge_init_locked(void *pdevin)
1567 xgelldev_t *lldev = (xgelldev_t *)pdevin;
1568 struct ifnet *ifnetp = lldev->ifnetp;
1569 device_t dev = lldev->device;
1571 mtx_assert((&lldev->xge_lock), MA_OWNED);
1573 /* If device is in running state, initializing is not required */
1574 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1578 /* Initializing timer */
1579 callout_init(&lldev->timer, CALLOUT_MPSAFE);
1581 xge_initialize(dev, XGE_HAL_CHANNEL_OC_NORMAL);
1586 /******************************************
1587 * Function: xge_timer
1588 * Parameters: Pointer to per-device
1589 * xgelldev_t structure as void*.
1591 * Description: Polls the changes.
1592 ******************************************/
1594 xge_timer(void *devp)
1596 xgelldev_t *lldev = (xgelldev_t *)devp;
1597 xge_hal_device_t *hldev = lldev->devh;
1599 /* Poll for changes */
1600 xge_hal_device_poll(hldev);
1603 callout_reset(&lldev->timer, hz, xge_timer, lldev);
1608 /******************************************
1609 * Function: xge_stop
1610 * Parameters: Per adapter xgelldev_t
1613 * Description: Deactivates the interface
1614 * (Called on "ifconfig down"
1615 ******************************************/
1617 xge_stop(xgelldev_t *lldev)
1619 struct ifnet *ifnetp = lldev->ifnetp;
1620 device_t dev = lldev->device;
1624 mtx_assert((&lldev->xge_lock), MA_OWNED);
1626 /* If device is not in "Running" state, return */
1627 if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1631 xge_terminate(dev, XGE_HAL_CHANNEL_OC_NORMAL);
1641 * ISR filter function
1642 * @handle softc/lldev per device structure
1645 xge_intr_filter(void *handle)
1647 xgelldev_t *lldev = NULL;
1648 xge_hal_device_t *hldev = NULL;
1649 xge_hal_pci_bar0_t *bar0 = NULL;
1650 device_t dev = NULL;
1651 u16 retValue = FILTER_STRAY;
1654 lldev = (xgelldev_t *)handle;
1655 hldev = lldev->devh;
1656 dev = lldev->device;
1657 bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
1659 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
1660 &bar0->general_int_status);
1661 retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1666 /******************************************
1668 * Parameters: Per adapter xgelldev_t
1671 * Description: Interrupt service routine
1672 ******************************************/
1674 xge_intr(void *plldev)
1676 xge_hal_status_e status;
1677 xgelldev_t *lldev = (xgelldev_t *)plldev;
1678 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1679 struct ifnet *ifnetp = lldev->ifnetp;
1681 mtx_lock(&lldev->xge_lock);
1682 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1683 status = xge_hal_device_handle_irq(hldev);
1685 if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd))) {
1686 xge_send_locked(ifnetp);
1689 mtx_unlock(&lldev->xge_lock);
1693 /********************************************
1694 * Function : xgell_rx_open
1695 * Parameters: Queue index, channel
1696 * open/close/reopen flag
1697 * Return: 0 or ENODEV
1698 * Description: Initialize and open all Rx
1700 ******************************************/
1702 xgell_rx_open(int qid, xgelldev_t *lldev, xge_hal_channel_reopen_e rflag)
1704 u64 adapter_status = 0x0;
1706 xge_hal_status_e status_code;
1710 xge_hal_channel_attr_t attr = {
1713 .callback = xgell_rx_compl,
1714 .per_dtr_space = sizeof(xgell_rx_priv_t),
1716 .type = XGE_HAL_CHANNEL_TYPE_RING,
1718 .dtr_init = xgell_rx_initial_replenish,
1719 .dtr_term = xgell_rx_term
1722 /* If device is not ready, return */
1723 if(xge_hal_device_status(lldev->devh, &adapter_status)) {
1724 xge_trace(XGE_ERR, "Device is not ready. Adapter status: 0x%llx",
1725 (unsigned long long) adapter_status);
1730 /* Open ring channel */
1731 status_code = xge_hal_channel_open(lldev->devh, &attr,
1732 &lldev->ring_channel[qid], rflag);
1733 if(status_code != XGE_HAL_OK) {
1734 xge_trace(XGE_ERR, "Can not open Rx RING channel, Status: %d\n",
1746 /******************************************
1747 * Function: xgell_tx_open
1748 * Parameters: Channel
1749 * open/close/reopen flag
1750 * Return: 0 or ENODEV
1751 * Description: Initialize and open all Tx
1753 ******************************************/
1755 xgell_tx_open(xgelldev_t *lldev, xge_hal_channel_reopen_e tflag)
1757 xge_hal_status_e status_code;
1758 u64 adapter_status = 0x0;
1763 xge_hal_channel_attr_t attr = {
1766 .callback = xgell_tx_compl,
1767 .per_dtr_space = sizeof(xgell_tx_priv_t),
1769 .type = XGE_HAL_CHANNEL_TYPE_FIFO,
1771 .dtr_init = xgell_tx_initial_replenish,
1772 .dtr_term = xgell_tx_term
1775 /* If device is not ready, return */
1776 if(xge_hal_device_status(lldev->devh, &adapter_status)) {
1777 xge_trace(XGE_ERR, "Device is not ready. Adapter status: 0x%llx\n",
1778 (unsigned long long) adapter_status);
1783 /* Open FIFO channel */
1784 status_code = xge_hal_channel_open(lldev->devh, &attr,
1785 &lldev->fifo_channel_0, tflag);
1786 if(status_code != XGE_HAL_OK) {
1787 xge_trace(XGE_ERR, "Can not open Tx FIFO channel, Status: %d\n",
1799 /******************************************
1800 * Function: xgell_channel_open
1801 * Parameters: Per adapter xgelldev_t
1804 * Description: Opens both Rx and Tx channels.
1805 ******************************************/
1807 xgell_channel_open(xgelldev_t *lldev, xge_hal_channel_reopen_e option)
1809 int status = XGE_HAL_OK;
1815 /* Open ring (Rx) channel */
1816 for(index = 0; index < XGE_RING_COUNT; index++) {
1817 if((status = xgell_rx_open(index, lldev, option))) {
1818 xge_trace(XGE_ERR, "Opening Rx channel failed (Status: %d)\n",
1820 for(index2 = 0; index2 < index; index2++) {
1821 xge_hal_channel_close(lldev->ring_channel[index2], option);
1826 #ifdef XGE_FEATURE_LRO
1827 status = xge_hal_lro_init(1, lldev->devh);
1828 if (status != XGE_HAL_OK) {
1829 xge_trace(XGE_ERR, "cannot init Rx LRO got status code %d", status);
1834 /* Open FIFO (Tx) channel */
1835 if((status = xgell_tx_open(lldev, option))) {
1836 xge_trace(XGE_ERR, "Opening Tx channel failed (Status: %d)\n",
1838 for(index = 0; index < XGE_RING_COUNT; index++) {
1839 xge_hal_channel_close(lldev->ring_channel[index], option);
1847 /******************************************
1848 * Function: xgell_channel_close
1849 * Parameters: Per adapter xgelldev_t
1851 * Return: 0 for success, non-zero for
1853 * Description: Closes both Tx and Rx channels
1854 ******************************************/
1856 xgell_channel_close(xgelldev_t *lldev, xge_hal_channel_reopen_e option)
1864 /* Close FIFO (Tx) channel */
1865 xge_hal_channel_close(lldev->fifo_channel_0, option);
1867 /* Close Ring (Rx) channel */
1868 for(index = 0; index < XGE_RING_COUNT; index++) {
1869 xge_hal_channel_close(lldev->ring_channel[index], option);
1878 /******************************************
1879 * Function: dmamap_cb
1880 * Parameters: Parameter passed from dmamap
1881 * function, Segment, Number of
1882 * segments, error (if any)
1884 * Description: Callback function used for
1886 ******************************************/
1888 dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1891 *(bus_addr_t *) arg = segs->ds_addr;
1895 /******************************************
1896 * Function: xgell_reset
1897 * Parameters: Per adapter xgelldev_t
1899 * Return: HAL status code/EPERM
1900 * Description: Resets the device
1901 ******************************************/
1903 xgell_reset(xgelldev_t *lldev)
1905 device_t dev = lldev->device;
1909 xge_trace(XGE_TRACE, "Reseting the chip");
1911 mtx_lock(&lldev->xge_lock);
1913 /* If the device is not initialized, return */
1914 if(!lldev->initialized) {
1918 xge_terminate(dev, XGE_HAL_CHANNEL_OC_NORMAL);
1920 xge_initialize(dev, XGE_HAL_CHANNEL_OC_NORMAL);
1924 mtx_unlock(&lldev->xge_lock);
1929 /******************************************
1930 * Function: xge_setmulti
1931 * Parameters: Per adapter xgelldev_t
1934 * Description: Set an address as a multicast
1936 ******************************************/
1938 xge_setmulti(xgelldev_t *lldev)
1941 struct ifmultiaddr *ifma;
1943 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1944 struct ifnet *ifnetp = lldev->ifnetp;
1947 int table_size = 47;
1948 xge_hal_status_e status = XGE_HAL_OK;
1949 u8 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1951 if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
1952 status = xge_hal_device_mcast_enable(hldev);
1953 lldev->all_multicast = 1;
1955 else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
1956 status = xge_hal_device_mcast_disable(hldev);
1957 lldev->all_multicast = 0;
1960 if(status != XGE_HAL_OK) {
1961 printf("Failed to %s multicast (status: %d)\n",
1962 (ifnetp->if_flags & IFF_ALLMULTI ? "enable" : "disable"),
1966 /* Updating address list */
1967 IF_ADDR_LOCK(ifnetp);
1969 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
1970 if(ifma->ifma_addr->sa_family != AF_LINK) {
1973 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1976 IF_ADDR_UNLOCK(ifnetp);
1978 if((!lldev->all_multicast) && (index)) {
1979 lldev->macaddr_count = (index + 1);
1980 if(lldev->macaddr_count > table_size) {
1984 /* Clear old addresses */
1985 for(index = 0; index < 48; index++) {
1986 xge_hal_device_macaddr_set(hldev, (offset + index),
1991 /* Add new addresses */
1992 IF_ADDR_LOCK(ifnetp);
1994 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
1995 if(ifma->ifma_addr->sa_family != AF_LINK) {
1998 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1999 xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2002 IF_ADDR_UNLOCK(ifnetp);
2007 /******************************************
2008 * Function: xge_enable_promisc
2009 * Parameters: Adapter structure
2011 * Description: Enables promiscuous mode
2012 ******************************************/
2014 xge_enable_promisc(xgelldev_t *lldev)
2016 struct ifnet *ifnetp = lldev->ifnetp;
2017 xge_hal_device_t *hldev = lldev->devh;
2018 xge_hal_pci_bar0_t *bar0 = NULL;
2023 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2025 if(ifnetp->if_flags & IFF_PROMISC) {
2026 xge_hal_device_promisc_enable(lldev->devh);
2029 * When operating in promiscuous mode, don't strip the VLAN tag
2031 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2033 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2034 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2035 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2038 xge_trace(XGE_TRACE, "Promiscuous mode ON");
2044 /******************************************
2045 * Function: xge_disable_promisc
2046 * Parameters: Adapter structure
2048 * Description: Disables promiscuous mode
2049 ******************************************/
2051 xge_disable_promisc(xgelldev_t *lldev)
2053 xge_hal_device_t *hldev = lldev->devh;
2054 xge_hal_pci_bar0_t *bar0 = NULL;
2059 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2061 xge_hal_device_promisc_disable(lldev->devh);
2064 * Strip VLAN tag when operating in non-promiscuous mode
2066 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2068 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2069 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2070 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2073 xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2078 /******************************************
2079 * Function: changeMtu
2080 * Parameters: Pointer to per-device
2081 * xgelldev_t structure, New
2084 * Description: Changes MTU size to requested
2085 ******************************************/
2087 changeMtu(xgelldev_t *lldev, int NewMtu)
2089 struct ifnet *ifnetp = lldev->ifnetp;
2090 xge_hal_device_t *hldev = lldev->devh;
2096 /* Check requested MTU size for boundary */
2097 if(xge_hal_device_mtu_check(hldev, NewMtu) != XGE_HAL_OK) {
2098 xge_trace(XGE_ERR, "Invalid MTU");
2103 if(lldev->initialized != 0) {
2104 mtx_lock(&lldev->xge_lock);
2107 ifnetp->if_mtu = NewMtu;
2108 changeBufmode(lldev, NewMtu);
2109 xge_init_locked((void *)lldev);
2111 mtx_unlock(&lldev->xge_lock);
2114 ifnetp->if_mtu = NewMtu;
2115 changeBufmode(lldev, NewMtu);
2123 /******************************************
2124 * Function: changeBufmode
2125 * Parameters: Pointer to per-device
2126 * xgelldev_t structure, New
2129 * Description: Updates RingConfiguration structure
2130 * depending the NewMtu size.
2131 ******************************************/
2133 changeBufmode (xgelldev_t *lldev, int NewMtu)
2135 xge_hal_ring_config_t * pRingConfig;
2136 xge_hal_device_t *hldev = lldev->devh;
2137 device_t dev = lldev->device;
2138 int buffer_length = 0, buffer_index = 0, index;
2140 pRingConfig = &(hldev->config.ring);
2141 xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
2143 if((NewMtu + XGE_HAL_MAC_HEADER_MAX_SIZE) <= MJUMPAGESIZE) {
2144 #if defined(XGE_FEATURE_BUFFER_MODE_3)
2145 xge_os_printf("%s: 3 Buffer Mode Enabled",
2146 device_get_nameunit(dev));
2147 for(index = 0; index < XGE_RING_COUNT; index++) {
2148 pRingConfig->queue[index].buffer_mode =
2149 XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
2151 pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
2152 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
2153 lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
2154 lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
2155 lldev->rxd_mbuf_len[2] = NewMtu;
2156 lldev->rxd_mbuf_cnt = 3;
2158 #if defined(XGE_FEATURE_BUFFER_MODE_2)
2159 xge_os_printf("%s: 2 Buffer Mode Enabled",
2160 device_get_nameunit(dev));
2161 for(index = 0; index < XGE_RING_COUNT; index++) {
2162 pRingConfig->queue[index].buffer_mode =
2163 XGE_HAL_RING_QUEUE_BUFFER_MODE_3;
2165 pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
2166 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_2;
2167 lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
2168 lldev->rxd_mbuf_len[1] = NewMtu;
2169 lldev->rxd_mbuf_cnt = 2;
2171 for(index = 0; index < XGE_RING_COUNT; index++) {
2172 pRingConfig->queue[index].buffer_mode =
2173 XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
2175 pRingConfig->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
2176 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
2177 lldev->rxd_mbuf_len[0] = NewMtu;
2178 lldev->rxd_mbuf_cnt = 1;
2183 #if defined(XGE_FEATURE_BUFFER_MODE_3) || defined (XGE_FEATURE_BUFFER_MODE_2)
2184 xge_os_printf("2 or 3 Buffer mode is not supported for given MTU");
2185 xge_os_printf("So changing buffer mode to 5 buffer mode\n");
2187 xge_os_printf("%s: 5 Buffer Mode Enabled",
2188 device_get_nameunit(dev));
2189 for(index = 0; index < XGE_RING_COUNT; index++) {
2190 pRingConfig->queue[index].buffer_mode =
2191 XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2193 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2194 buffer_length = NewMtu;
2196 lldev->rxd_mbuf_len[0] = XGE_HAL_MAC_HEADER_MAX_SIZE;
2197 lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
2199 while(buffer_length > MJUMPAGESIZE) {
2200 buffer_length -= MJUMPAGESIZE;
2201 lldev->rxd_mbuf_len[buffer_index] = MJUMPAGESIZE;
2205 BUFALIGN(buffer_length);
2207 lldev->rxd_mbuf_len[buffer_index] = buffer_length;
2208 lldev->rxd_mbuf_cnt = buffer_index+1;
2214 /*************************************************************
2217 * @dev: Device structure
2218 * @option: Normal/Reset option for channels
2220 * Called by both init and reset functions to enable device, interrupts, and to
2223 **************************************************************/
2224 void xge_initialize(device_t dev, xge_hal_channel_reopen_e option)
2228 struct ifaddr *ifaddrp;
2229 struct sockaddr_dl *sockaddrp;
2230 unsigned char *macaddr;
2231 xgelldev_t *lldev = (xgelldev_t *) device_get_softc(dev);
2232 xge_hal_device_t *hldev = lldev->devh;
2233 struct ifnet *ifnetp = lldev->ifnetp;
2234 int status = XGE_HAL_OK;
2236 xge_trace(XGE_TRACE, "Set MTU size");
2237 status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
2238 if(status != XGE_HAL_OK) {
2239 xge_trace(XGE_ERR, "Setting HAL device MTU failed (Status: %d)",
2245 /* Enable HAL device */
2246 xge_hal_device_enable(hldev);
2248 /* Get MAC address and update in HAL */
2249 ifaddrp = ifnetp->if_addr;
2250 sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr;
2251 sockaddrp->sdl_type = IFT_ETHER;
2252 sockaddrp->sdl_alen = ifnetp->if_addrlen;
2253 macaddr = LLADDR(sockaddrp);
2254 xge_trace(XGE_TRACE,
2255 "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
2256 *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
2257 *(macaddr + 4), *(macaddr + 5));
2258 status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
2259 if(status != XGE_HAL_OK) {
2261 "Setting MAC address failed (Status: %d)\n", status);
2264 /* Opening channels */
2265 mtx_unlock(&lldev->xge_lock);
2266 status = xgell_channel_open(lldev, option);
2267 mtx_lock(&lldev->xge_lock);
2272 /* Set appropriate flags */
2273 ifnetp->if_drv_flags |= IFF_DRV_RUNNING;
2274 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
2276 /* Checksum capability */
2277 ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
2278 (CSUM_TCP | CSUM_UDP) : 0;
2280 #ifdef XGE_FEATURE_TSO
2281 if(ifnetp->if_capenable & IFCAP_TSO4)
2282 ifnetp->if_hwassist |= CSUM_TSO;
2285 /* Enable interrupts */
2286 xge_hal_device_intr_enable(hldev);
2288 callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
2290 /* Disable promiscuous mode */
2291 xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
2292 xge_enable_promisc(lldev);
2294 /* Device is initialized */
2295 lldev->initialized = 1;
2296 xge_os_mdelay(1000);
2303 /*******************************************************
2306 * @dev: Device structure
2307 * @option: Normal/Reset option for channels
2309 * Called by both stop and reset functions to disable device, interrupts, and to
2311 ******************************************************/
2312 void xge_terminate(device_t dev, xge_hal_channel_reopen_e option)
2316 xgelldev_t *lldev = (xgelldev_t *)device_get_softc(dev);
2317 xge_hal_device_t *hldev = lldev->devh;
2318 struct ifnet *ifnetp = lldev->ifnetp;
2320 /* Set appropriate flags */
2321 ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2324 callout_stop(&lldev->timer);
2326 /* Disable interrupts */
2327 xge_hal_device_intr_disable(hldev);
2329 mtx_unlock(&lldev->xge_lock);
2330 xge_queue_flush(xge_hal_device_queue(lldev->devh));
2331 mtx_lock(&lldev->xge_lock);
2333 /* Disable HAL device */
2334 if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2335 xge_trace(XGE_ERR, "Disabling HAL device failed");
2338 /* Close Tx and Rx channels */
2339 xgell_channel_close(lldev, option);
2341 /* Reset HAL device */
2342 xge_hal_device_reset(hldev);
2344 xge_os_mdelay(1000);
2345 lldev->initialized = 0;
2347 if_link_state_change(ifnetp, LINK_STATE_DOWN);
2352 /******************************************
2353 * Function: xgell_set_mbuf_cflags
2354 * Parameters: mbuf structure pointer
2356 * Description: This fuction will set the csum_flag of the mbuf
2357 ******************************************/
2358 void xgell_set_mbuf_cflags(mbuf_t pkt)
2360 pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2361 pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2362 pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2363 pkt->m_pkthdr.csum_data = htons(0xffff);
2366 #ifdef XGE_FEATURE_LRO
2367 /******************************************
2368 * Function: xgell_lro_flush_sessions
2369 * Parameters: Per adapter xgelldev_t
2371 * Description: This function will flush the LRO session and send the
2372 * accumulated LRO packet to Upper layer.
2373 ******************************************/
2374 void xgell_lro_flush_sessions(xgelldev_t *lldev)
2377 struct ifnet *ifnetp = lldev->ifnetp;
2378 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2380 while (NULL != (lro = xge_hal_lro_get_next_session(hldev))) {
2381 xgell_set_mbuf_cflags(lro->os_buf);
2384 mtx_unlock(&lldev->xge_lock);
2385 (*ifnetp->if_input)(ifnetp, lro->os_buf);
2386 mtx_lock(&lldev->xge_lock);
2388 xge_hal_lro_close_session(lro);
2392 /******************************************
2393 * Function: xgell_accumulate_large_rx
2394 * Parameters: Descriptor info structure, current mbuf structure,
2395 * packet length, Per adapter structure, Rx Desc private structure
2397 * Description: This function will accumulate packets to form the LRO
2398 * packets based on various condition.
2399 ******************************************/
2400 void xgell_accumulate_large_rx(xge_hal_dtr_info_t *ext_info,mbuf_t pkt,
2401 int pkt_length, xgelldev_t *lldev, xgell_rx_priv_t *rxd_priv)
2404 lro_t *lro, *lro_end3;
2405 xge_hal_status_e status;
2406 unsigned char * temp;
2407 struct ifnet *ifnetp = lldev->ifnetp;
2409 status = xge_hal_accumulate_large_rx(pkt->m_data, &tcp, &pkt_length,
2410 &lro, ext_info, lldev->devh, &lro_end3);
2412 temp = (unsigned char *)tcp;
2414 if(status == XGE_HAL_INF_LRO_BEGIN) {
2415 pkt->m_flags |= M_PKTHDR;
2416 pkt->m_pkthdr.rcvif = ifnetp;
2417 lro->os_buf = lro->os_buf_end = pkt;
2419 else if(status == XGE_HAL_INF_LRO_CONT) {
2421 * Current mbuf will be combine to form LRO frame,
2422 * So mask the pkthdr of the flag variable for current mbuf
2424 pkt->m_flags = pkt->m_flags & 0xFFFD; //Mask pkthdr
2425 pkt->m_data = (u8 *)tcp;
2426 pkt->m_len = pkt_length;
2429 * Combine the current mbuf to the LRO frame and update
2430 * the LRO's pkthdr len accordingly
2432 lro->os_buf_end->m_next = pkt;
2433 lro->os_buf_end = pkt;
2434 lro->os_buf->m_pkthdr.len += pkt_length;
2436 else if(status == XGE_HAL_INF_LRO_END_2) {
2437 lro->os_buf->m_flags |= M_EOR;
2439 /* Update the Checksum flags of the LRO frames */
2440 xgell_set_mbuf_cflags(lro->os_buf);
2442 /* Post-Read sync */
2443 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2444 BUS_DMASYNC_POSTREAD);
2447 * Current packet can not be combined with LRO frame.
2448 * Flush the previous LRO frames and send the current packet
2451 mtx_unlock(&lldev->xge_lock);
2452 (*ifnetp->if_input)(ifnetp, lro->os_buf);
2453 (*ifnetp->if_input)(ifnetp, pkt);
2454 mtx_lock(&lldev->xge_lock);
2455 xge_hal_lro_close_session(lro);
2457 else if(status == XGE_HAL_INF_LRO_END_1) {
2458 pkt->m_flags = pkt->m_flags & 0xFFFD;
2459 pkt->m_data = (u8 *)tcp;
2460 pkt->m_len = pkt_length;
2461 lro->os_buf_end->m_next = pkt;
2462 lro->os_buf->m_pkthdr.len += pkt_length;
2463 xgell_set_mbuf_cflags(lro->os_buf);
2464 lro->os_buf->m_flags |= M_EOR;
2466 /* Post-Read sync */
2467 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2468 BUS_DMASYNC_POSTREAD);
2471 mtx_unlock(&lldev->xge_lock);
2472 (*ifnetp->if_input)(ifnetp, lro->os_buf);
2473 mtx_lock(&lldev->xge_lock);
2475 xge_hal_lro_close_session(lro);
2477 else if(status == XGE_HAL_INF_LRO_END_3) {
2478 pkt->m_flags |= M_PKTHDR;
2479 pkt->m_len = pkt_length;
2480 pkt->m_pkthdr.len = pkt_length;
2481 lro_end3->os_buf = lro_end3->os_buf_end = pkt;
2482 lro->os_buf->m_flags |= M_EOR;
2483 xgell_set_mbuf_cflags(lro->os_buf);
2485 /* Post-Read sync */
2486 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2487 BUS_DMASYNC_POSTREAD);
2490 mtx_unlock(&lldev->xge_lock);
2491 (*ifnetp->if_input)(ifnetp, lro->os_buf);
2492 mtx_lock(&lldev->xge_lock);
2493 xge_hal_lro_close_session(lro);
2495 else if((status == XGE_HAL_INF_LRO_UNCAPABLE) ||
2496 (status == XGE_HAL_INF_LRO_SESSIONS_XCDED)) {
2497 pkt->m_flags |= M_PKTHDR;
2498 pkt->m_len = pkt_length;
2499 pkt->m_pkthdr.len = pkt_length;
2501 /* Post-Read sync */
2502 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2503 BUS_DMASYNC_POSTREAD);
2506 mtx_unlock(&lldev->xge_lock);
2507 (*ifnetp->if_input)(ifnetp, pkt);
2508 mtx_lock(&lldev->xge_lock);
2513 /******************************************
2514 * Function: xgell_rx_compl
2515 * Parameters: Channel handle, descriptor,
2516 * transfer code, userdata
2518 * Return: HAL status code
2519 * Description: If the interrupt is because
2520 * of a received frame or if
2521 * the receive ring contains
2522 * fresh as yet un-processed
2523 * frames, this function is
2525 ******************************************/
2527 xgell_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2530 xge_hal_dtr_info_t ext_info;
2531 xge_hal_status_e status_code;
2532 struct ifnet *ifnetp;
2535 mbuf_t mbuf_up = NULL;
2536 xgell_rx_priv_t *rxd_priv = NULL, old_rxd_priv;
2542 /*get the user data portion*/
2543 xgelldev_t *lldev = xge_hal_channel_userdata(channelh);
2545 xge_ctrace(XGE_TRACE, "xgeX: %s: Failed to get user data",
2547 return XGE_HAL_FAIL;
2549 dev = lldev->device;
2551 mtx_assert((&lldev->xge_lock), MA_OWNED);
2553 /* get the interface pointer */
2554 ifnetp = lldev->ifnetp;
2557 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2558 return XGE_HAL_FAIL;
2562 xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2563 xge_hal_device_handle_tcode(channelh, dtr, t_code);
2564 xge_hal_ring_dtr_post(channelh,dtr);
2568 /* Get the private data for this descriptor*/
2569 rxd_priv = (xgell_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2572 xge_trace(XGE_ERR, "Failed to get descriptor private data");
2573 return XGE_HAL_FAIL;
2576 /* Taking backup of rxd_priv structure details of current packet */
2577 xge_os_memcpy(&old_rxd_priv, rxd_priv, sizeof(xgell_rx_priv_t));
2579 /* Prepare one buffer to send it to upper layer -- since the upper
2580 * layer frees the buffer do not use rxd_priv->buffer
2581 * Meanwhile prepare a new buffer, do mapping, use it in the
2582 * current descriptor and post descriptor back to ring channel */
2583 mbuf_up = rxd_priv->bufferArray[0];
2585 /* Gets details of mbuf i.e., packet length */
2586 xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2589 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2590 xgell_get_buf(dtr, rxd_priv, lldev, 0) :
2591 xgell_get_buf_3b_5b(dtr, rxd_priv, lldev);
2593 if(status_code != XGE_HAL_OK) {
2594 xge_trace(XGE_ERR, "No memory");
2597 * Do not deliver the received buffer to the stack. Instead,
2598 * Re-post the descriptor with the same buffer
2601 /* Get back previous rxd_priv structure before posting */
2602 xge_os_memcpy(rxd_priv, &old_rxd_priv, sizeof(xgell_rx_priv_t));
2604 xge_hal_ring_dtr_post(channelh, dtr);
2608 /* Get the extended information */
2609 xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2611 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2613 * As we have allocated a new mbuf for this descriptor, post
2614 * this descriptor with new mbuf back to ring channel
2616 vlan_tag = ext_info.vlan;
2617 xge_hal_ring_dtr_post(channelh, dtr);
2618 if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2619 (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2620 (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2621 (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2622 /* set Checksum Flag */
2623 xgell_set_mbuf_cflags(mbuf_up);
2624 #ifdef XGE_FEATURE_LRO
2625 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2626 xgell_accumulate_large_rx(&ext_info, mbuf_up,
2627 mbuf_up->m_len, lldev, rxd_priv);
2630 /* Post-Read sync for buffers*/
2631 bus_dmamap_sync(lldev->dma_tag_rx,
2632 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2635 mtx_unlock(&lldev->xge_lock);
2636 (*ifnetp->if_input)(ifnetp, mbuf_up);
2637 mtx_lock(&lldev->xge_lock);
2642 * Packet with erroneous checksum , let the upper layer
2646 /* Post-Read sync for buffers*/
2647 bus_dmamap_sync(lldev->dma_tag_rx,
2648 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2650 #ifdef XGE_FEATURE_LRO
2651 xgell_lro_flush_sessions(lldev);
2655 mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2656 mbuf_up->m_flags |= M_VLANTAG;
2659 mtx_unlock(&lldev->xge_lock);
2660 (*ifnetp->if_input)(ifnetp, mbuf_up);
2661 mtx_lock(&lldev->xge_lock);
2666 * As we have allocated a new mbuf for this descriptor, post
2667 * this descriptor with new mbuf back to ring channel
2669 xge_hal_ring_dtr_post(channelh, dtr);
2670 if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2671 (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2672 (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2673 (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2674 /* set Checksum Flag */
2675 xgell_set_mbuf_cflags(mbuf_up);
2676 #ifdef XGE_FEATURE_LRO
2677 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2678 xgell_accumulate_large_rx(&ext_info, mbuf_up,
2679 mbuf_up->m_len, lldev, rxd_priv);
2682 /* Post-Read sync for buffers*/
2683 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2684 /* Post-Read sync */
2685 bus_dmamap_sync(lldev->dma_tag_rx,
2686 rxd_priv->dmainfo[index].dma_map,
2687 BUS_DMASYNC_POSTREAD);
2691 mtx_unlock(&lldev->xge_lock);
2692 (*ifnetp->if_input)(ifnetp, mbuf_up);
2693 mtx_lock(&lldev->xge_lock);
2698 * Packet with erroneous checksum , let the upper layer
2701 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2702 /* Post-Read sync */
2703 bus_dmamap_sync(lldev->dma_tag_rx,
2704 rxd_priv->dmainfo[index].dma_map,
2705 BUS_DMASYNC_POSTREAD);
2708 #ifdef XGE_FEATURE_LRO
2709 xgell_lro_flush_sessions(lldev);
2712 mtx_unlock(&lldev->xge_lock);
2713 (*ifnetp->if_input)(ifnetp, mbuf_up);
2714 mtx_lock(&lldev->xge_lock);
2717 } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2719 #ifdef XGE_FEATURE_LRO
2720 xgell_lro_flush_sessions(lldev);
2728 /******************************************
2729 * Function: xge_ring_dtr_get
2730 * Parameters: mbuf pointer, channel handler
2731 * descriptot, Per adapter xgelldev_t
2732 * structure pointer,
2733 * Rx private structure
2734 * Return: HAL status code
2735 * Description: Updates the mbuf lengths
2736 * depending on packet lengths.
2737 ******************************************/
2739 xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2740 xgelldev_t *lldev, xgell_rx_priv_t *rxd_priv)
2743 int pkt_length[5]={0,0}, pkt_len=0;
2744 dma_addr_t dma_data[5];
2750 if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2751 xge_os_memzero(pkt_length, sizeof(pkt_length));
2754 * Retrieve data of interest from the completed descriptor -- This
2755 * returns the packet length
2757 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2758 xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2761 xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2764 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2765 m->m_len = pkt_length[index];
2767 if(index < (lldev->rxd_mbuf_cnt-1)) {
2768 m->m_next = rxd_priv->bufferArray[index + 1];
2774 pkt_len+=pkt_length[index];
2778 * Since 2 buffer mode is an exceptional case where data is in 3rd
2779 * buffer but not in 2nd buffer
2781 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2782 m->m_len = pkt_length[2];
2783 pkt_len+=pkt_length[2];
2787 * Update length of newly created buffer to be sent up with packet
2790 mbuf_up->m_pkthdr.len = pkt_len;
2794 * Retrieve data of interest from the completed descriptor -- This
2795 * returns the packet length
2797 xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2800 * Update length of newly created buffer to be sent up with packet
2803 mbuf_up->m_len = mbuf_up->m_pkthdr.len = pkt_length[0];
2810 /******************************************
2811 * Function: xge_send
2812 * Parameters: Pointer to ifnet structure
2814 * Description: Transmit entry point
2815 ******************************************/
2817 xge_send(struct ifnet *ifnetp)
2819 xgelldev_t *lldev = ifnetp->if_softc;
2821 mtx_lock(&lldev->xge_lock);
2822 xge_send_locked(ifnetp);
2823 mtx_unlock(&lldev->xge_lock);
2827 xge_send_locked(struct ifnet *ifnetp)
2830 static bus_dma_segment_t segs[MAX_SEGS];
2831 xge_hal_status_e status_code;
2832 unsigned int max_fragments;
2833 xgelldev_t *lldev = ifnetp->if_softc;
2834 xge_hal_channel_h channelh = lldev->fifo_channel_0;
2835 mbuf_t m_head = NULL;
2836 mbuf_t m_buf = NULL;
2837 xgell_tx_priv_t *ll_tx_priv = NULL;
2838 register unsigned int count = 0;
2839 unsigned int nsegs = 0;
2842 max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
2844 mtx_assert((&lldev->xge_lock), MA_OWNED);
2846 /* If device is not initialized, return */
2847 if((!lldev->initialized) ||
2848 (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))) {
2849 xge_trace(XGE_ERR, "Device is not initialized");
2854 * Get the number of free descriptors in the FIFO channel and return if
2855 * the count is less than the XGELL_TX_LEVEL_LOW -- the low threshold
2857 count = xge_hal_channel_dtr_count(channelh);
2858 if(count <= XGELL_TX_LEVEL_LOW) {
2859 ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
2860 xge_trace(XGE_TRACE, "Free descriptor count %d/%d at low threshold",
2861 count, XGELL_TX_LEVEL_LOW);
2863 /* Serialized -- through queue */
2864 xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
2865 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev);
2869 /* This loop will be executed for each packet in the kernel maintained
2870 * queue -- each packet can be with fragments as an mbuf chain */
2871 while((ifnetp->if_snd.ifq_head) &&
2872 (xge_hal_channel_dtr_count(channelh) > XGELL_TX_LEVEL_LOW)) {
2873 IF_DEQUEUE(&ifnetp->if_snd, m_head);
2875 for(count = 0, m_buf = m_head; m_buf != NULL;
2876 m_buf = m_buf->m_next) {
2882 if(count >= max_fragments) {
2883 m_buf = m_defrag(m_head, M_DONTWAIT);
2889 /* Reserve descriptors */
2890 status_code = xge_hal_fifo_dtr_reserve(channelh, &dtr);
2892 switch(status_code) {
2893 case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
2894 xge_trace(XGE_ERR, "Channel is not ready");
2897 case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
2898 xge_trace(XGE_ERR, "Out of descriptors");
2903 "Reserving (Tx) descriptors failed. Status %d",
2910 vlan_tag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
2911 xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
2913 /* Update Tx private structure for this descriptor */
2914 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
2915 ll_tx_priv->buffer = m_head;
2918 * Do mapping -- Required DMA tag has been created in xge_init
2919 * function and DMA maps have already been created in the
2920 * xgell_tx_replenish function.
2921 * Returns number of segments through nsegs
2923 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
2924 ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
2925 xge_trace(XGE_ERR, "DMA map load with segments failed");
2929 /* Set descriptor buffer for header and each fragment/segment */
2932 xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
2933 (dma_addr_t)htole64(segs[count].ds_addr),
2934 segs[count].ds_len);
2936 } while(count < nsegs);
2938 /* Pre-write Sync of mapping */
2939 bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
2940 BUS_DMASYNC_PREWRITE);
2942 #ifdef XGE_FEATURE_TSO
2943 if((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2944 xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
2948 if(ifnetp->if_hwassist > 0) {
2949 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
2950 | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
2953 /* Post descriptor to FIFO channel */
2954 xge_hal_fifo_dtr_post(channelh, dtr);
2956 /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
2957 * listener so that we can use tools like tcpdump */
2958 ETHER_BPF_MTAP(ifnetp, m_head);
2962 /* Prepend the packet back to queue */
2963 IF_PREPEND(&ifnetp->if_snd, m_head);
2965 ifnetp->if_timer = 15;
2968 /******************************************
2969 * Function: xgell_get_buf
2970 * Parameters: Per adapter xgelldev_t
2971 * structure pointer, descriptor,
2972 * Rx private structure, rxd_priv buffer
2973 * buffer index for mapping
2974 * Return: HAL status code
2975 * Description: Gets buffer from system mbuf
2977 ******************************************/
2979 xgell_get_buf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
2980 xgelldev_t *lldev, int index)
2982 register mbuf_t mp = NULL;
2983 struct ifnet *ifnetp = lldev->ifnetp;
2984 int retValue = XGE_HAL_OK;
2986 int BUFLEN = 0, CLUSTLEN = 0;
2988 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2989 CLUSTLEN = MJUMPAGESIZE;
2990 BUFLEN = MJUMPAGESIZE;
2993 BUFLEN = lldev->rxd_mbuf_len[index];
2994 if(BUFLEN < MCLBYTES) {
2995 CLUSTLEN = MCLBYTES;
2998 CLUSTLEN = MJUMPAGESIZE;
3002 /* Get mbuf with attached cluster */
3003 mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, CLUSTLEN);
3005 xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3006 retValue = XGE_HAL_FAIL;
3010 /* Update mbuf's length, packet length and receive interface */
3011 mp->m_len = mp->m_pkthdr.len = BUFLEN;
3012 mp->m_pkthdr.rcvif = ifnetp;
3014 /* Unload DMA map of mbuf in current descriptor */
3015 bus_dmamap_unload(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map);
3018 if(bus_dmamap_load(lldev->dma_tag_rx , rxd_priv->dmainfo[index].dma_map,
3019 mtod(mp, void*), mp->m_len, dmamap_cb , &paddr , 0)) {
3020 xge_trace(XGE_ERR, "Loading DMA map failed");
3022 retValue = XGE_HAL_FAIL;
3026 /* Update descriptor private data */
3027 rxd_priv->bufferArray[index] = mp;
3028 rxd_priv->dmainfo[index].dma_phyaddr = htole64(paddr);
3030 /* Pre-Read/Write sync */
3031 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map,
3032 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3034 /* Set descriptor buffer */
3035 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3036 xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3044 /******************************************
3045 * Function: xgell_get_buf_3b_5b
3046 * Parameters: Per adapter xgelldev_t
3047 * structure pointer, descriptor,
3048 * Rx private structure
3049 * Return: HAL status code
3050 * Description: Gets buffers from system mbuf
3052 ******************************************/
3054 xgell_get_buf_3b_5b(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
3057 bus_addr_t dma_pointers[5];
3059 int retValue = XGE_HAL_OK, index;
3062 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3063 retValue = xgell_get_buf(dtrh, rxd_priv, lldev, index);
3064 if(retValue != XGE_HAL_OK) {
3065 for(newindex = 0; newindex < index; newindex++) {
3066 m_freem(rxd_priv->bufferArray[newindex]);
3072 for(index = 0; index < lldev->buffer_mode; index++) {
3073 if(lldev->rxd_mbuf_len[index] != 0) {
3074 dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3075 dma_sizes[index] = lldev->rxd_mbuf_len[index];
3078 dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3079 dma_sizes[index] = 1;
3083 /* Assigning second buffer to third pointer in 2 buffer mode */
3084 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3085 dma_pointers[2] = dma_pointers[1];
3086 dma_sizes[2] = dma_sizes[1];
3090 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3091 xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3094 xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3100 /******************************************
3101 * Function: xgell_tx_compl
3102 * Parameters: Channel handle, descriptor,
3104 * userdata -> per adapter
3105 * xgelldev_t structure as void *
3106 * Return: HAL status code
3107 * Description: If an interrupt was raised
3108 * to indicate DMA complete of
3109 * the Tx packet, this function
3110 * is called. It identifies the
3111 * last TxD whose buffer was
3112 * freed and frees all skbs
3113 * whose data have already DMA'ed
3114 * into the NICs internal memory.
3115 ******************************************/
3117 xgell_tx_compl(xge_hal_channel_h channelh,
3118 xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3120 xgell_tx_priv_t *ll_tx_priv;
3122 xgelldev_t *lldev = (xgelldev_t *)userdata;
3123 struct ifnet *ifnetp = lldev->ifnetp;
3125 ifnetp->if_timer = 0;
3127 /* For each completed descriptor: Get private structure, free buffer,
3128 * do unmapping, and free descriptor */
3131 xge_trace(XGE_TRACE, "t_code %d", t_code);
3132 xge_hal_device_handle_tcode(channelh, dtr, t_code);
3135 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3136 m_buffer = ll_tx_priv->buffer;
3137 bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3139 ll_tx_priv->buffer = NULL;
3140 xge_hal_fifo_dtr_free(channelh, dtr);
3141 } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3143 ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3148 /******************************************
3149 * Function: xgell_tx_initial_replenish
3150 * Parameters: Channel handle, descriptor,
3151 * index (not used), userdata
3152 * (not used), channel
3153 * open/close/reopen option.
3154 * Return: HAL status code
3155 * Description: Creates DMA maps to be used
3157 ******************************************/
3159 xgell_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3160 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3162 xgell_tx_priv_t *txd_priv = NULL;
3163 int retValue = XGE_HAL_OK;
3164 device_t dev = NULL;
3166 /* Get the user data portion from channel handle */
3167 xgelldev_t *lldev = xge_hal_channel_userdata(channelh);
3169 xge_trace(XGE_ERR, "Failed to get user data");
3170 retValue = XGE_HAL_FAIL;
3173 dev = lldev->device;
3175 /* Get the private data */
3176 txd_priv = (xgell_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3177 if(txd_priv == NULL) {
3178 xge_trace(XGE_ERR, "Failed to get descriptor private data");
3179 retValue = XGE_HAL_FAIL;
3183 /* Create DMA map for this descriptor */
3184 if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3185 &txd_priv->dma_map)) {
3186 xge_trace(XGE_ERR, "DMA map creation for Tx descriptor failed");
3187 retValue = XGE_HAL_FAIL;
3195 /******************************************
3196 * Function: xgell_rx_initial_replenish
3197 * Parameters: Channel handle, descriptor,
3198 * ring index, userdata
3199 * (not used), channel
3200 * open/close/reopen option.
3201 * Return: HAL status code
3202 * Description: Replenish descriptor with
3203 * rx_buffer in Rx buffer pool.
3204 ******************************************/
3206 xgell_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3207 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3209 xgell_rx_priv_t *rxd_priv = NULL;
3210 int retValue = XGE_HAL_OK;
3211 struct ifnet *ifnetp;
3215 /* Get the user data portion from channel handle */
3216 xgelldev_t *lldev = xge_hal_channel_userdata(channelh);
3218 xge_ctrace(XGE_ERR, "xgeX: %s: Failed to get user data",
3220 retValue = XGE_HAL_FAIL;
3223 dev = lldev->device;
3225 /* Get the private data */
3226 rxd_priv = (xgell_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3227 if(rxd_priv == NULL) {
3228 xge_trace(XGE_ERR, "Failed to get descriptor private data");
3229 retValue = XGE_HAL_FAIL;
3233 rxd_priv->bufferArray =
3234 malloc(((sizeof(rxd_priv->bufferArray)) * (lldev->rxd_mbuf_cnt)),
3235 M_DEVBUF, M_NOWAIT);
3237 if(rxd_priv->bufferArray == NULL) {
3239 "Failed to allocate buffers for Rxd private structure");
3240 retValue = XGE_HAL_FAIL;
3244 ifnetp = lldev->ifnetp;
3246 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3247 /* Create DMA map for these descriptors*/
3248 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3249 &rxd_priv->dmainfo[0].dma_map)) {
3251 "DMA map creation for Rx descriptor failed");
3252 retValue = XGE_HAL_FAIL;
3253 goto rxinit_err_out;
3255 /* Get a buffer, attach it to this descriptor */
3256 retValue = xgell_get_buf(dtrh, rxd_priv, lldev, 0);
3259 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3260 /* Create DMA map for this descriptor */
3261 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3262 &rxd_priv->dmainfo[index1].dma_map)) {
3264 "Jumbo DMA map creation for Rx descriptor failed");
3265 for(index2 = index1 - 1; index2 >= 0; index2--) {
3266 bus_dmamap_destroy(lldev->dma_tag_rx,
3267 rxd_priv->dmainfo[index2].dma_map);
3269 retValue = XGE_HAL_FAIL;
3270 goto rxinit_err_out;
3273 retValue = xgell_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3276 if(retValue != XGE_HAL_OK) {
3277 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3278 bus_dmamap_destroy(lldev->dma_tag_rx,
3279 rxd_priv->dmainfo[index1].dma_map);
3281 goto rxinit_err_out;
3288 free(rxd_priv->bufferArray,M_DEVBUF);
3293 /******************************************
3294 * Function: xgell_rx_term
3295 * Parameters: Channel handle, descriptor,
3296 * descriptor state, userdata
3297 * (not used), channel
3298 * open/close/reopen option.
3300 * Description: Called by HAL to terminate
3301 * all DTRs for ring channels.
3302 ******************************************/
3304 xgell_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3305 xge_hal_dtr_state_e state, void *userdata,
3306 xge_hal_channel_reopen_e reopen)
3308 xgell_rx_priv_t *rxd_priv;
3310 struct ifnet *ifnetp;
3316 /* Descriptor state is not "Posted" */
3317 if(state != XGE_HAL_DTR_STATE_POSTED) {
3318 xge_ctrace(XGE_ERR, "xgeX: %s: Descriptor not posted\n",
3323 /* Get the user data portion */
3324 lldev = xge_hal_channel_userdata(channelh);
3326 dev = lldev->device;
3327 ifnetp = lldev->ifnetp;
3329 /* Get the private data */
3330 rxd_priv = (xgell_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3332 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3333 /* Post-Read sync */
3334 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
3335 BUS_DMASYNC_POSTREAD);
3337 /* Do unmapping and destory DMA map */
3338 bus_dmamap_unload(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map);
3339 m_freem(rxd_priv->bufferArray[0]);
3340 bus_dmamap_destroy(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map);
3343 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3344 /* Post-Read sync */
3345 bus_dmamap_sync(lldev->dma_tag_rx,
3346 rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3348 /* Do unmapping and destory DMA map */
3349 bus_dmamap_unload(lldev->dma_tag_rx,
3350 rxd_priv->dmainfo[index].dma_map);
3352 bus_dmamap_destroy(lldev->dma_tag_rx,
3353 rxd_priv->dmainfo[index].dma_map);
3355 /* Free the buffer */
3356 m_free(rxd_priv->bufferArray[index]);
3359 free(rxd_priv->bufferArray,M_DEVBUF);
3361 /* Free the descriptor */
3362 xge_hal_ring_dtr_free(channelh, dtrh);
3370 /******************************************
3371 * Function: xgell_tx_term
3372 * Parameters: Channel handle, descriptor,
3373 * descriptor state, userdata
3374 * (not used), channel
3375 * open/close/reopen option.
3377 * Description: Called by HAL to terminate
3378 * all DTRs for fifo channels.
3379 ******************************************/
3381 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3382 xge_hal_dtr_state_e state, void *userdata,
3383 xge_hal_channel_reopen_e reopen)
3385 xgell_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3386 xgelldev_t *lldev = (xgelldev_t *)userdata;
3390 /* Destroy DMA map */
3391 bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3396 /******************************************
3399 * FreeBSD device interface entry points
3400 ******************************************/
3401 static device_method_t xge_methods[] = {
3402 DEVMETHOD(device_probe, xge_probe),
3403 DEVMETHOD(device_attach, xge_attach),
3404 DEVMETHOD(device_detach, xge_detach),
3405 DEVMETHOD(device_shutdown, xge_shutdown),
3409 static driver_t xge_driver = {
3414 static devclass_t xge_devclass;
3415 DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);