2 * Copyright (c) 2002-2007 Neterion, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <dev/nxge/if_nxge.h>
30 #include <dev/nxge/xge-osdep.h>
31 #include <net/if_arp.h>
32 #include <sys/types.h>
34 #include <net/if_vlan_var.h>
36 int copyright_print = 0;
37 int hal_driver_init_count = 0;
38 size_t size = sizeof(int);
40 static void inline xge_flush_txds(xge_hal_channel_h);
44 * Probes for Xframe devices
49 * BUS_PROBE_DEFAULT if device is supported
50 * ENXIO if device is not supported
53 xge_probe(device_t dev)
55 int devid = pci_get_device(dev);
56 int vendorid = pci_get_vendor(dev);
59 if(vendorid == XGE_PCI_VENDOR_ID) {
60 if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
61 (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
62 if(!copyright_print) {
63 xge_os_printf(XGE_COPYRIGHT);
66 device_set_desc_copy(dev,
67 "Neterion Xframe 10 Gigabit Ethernet Adapter");
68 retValue = BUS_PROBE_DEFAULT;
77 * Sets HAL parameter values (from kenv).
79 * @dconfig Device Configuration
83 xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
85 int qindex, tindex, revision;
87 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
89 dconfig->mtu = XGE_DEFAULT_INITIAL_MTU;
90 dconfig->pci_freq_mherz = XGE_DEFAULT_USER_HARDCODED;
91 dconfig->device_poll_millis = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
92 dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
93 dconfig->mac.rmac_bcast_en = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
94 dconfig->fifo.alignment_size = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
96 XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
97 XGE_DEFAULT_ENABLED_TSO);
98 XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
99 XGE_DEFAULT_ENABLED_LRO);
100 XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
101 XGE_DEFAULT_ENABLED_MSI);
103 XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
104 XGE_DEFAULT_LATENCY_TIMER);
105 XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
106 XGE_DEFAULT_MAX_SPLITS_TRANS);
107 XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
108 XGE_DEFAULT_MMRB_COUNT);
109 XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
110 XGE_DEFAULT_SHARED_SPLITS);
111 XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
112 XGE_DEFAULT_ISR_POLLING_CNT);
113 XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
114 stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
116 XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
117 XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
118 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
119 XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
120 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
121 XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
122 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
123 XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
124 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
125 XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
126 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
127 mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
128 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
129 mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
131 XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
132 XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
133 XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
134 XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
135 XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
136 XGE_DEFAULT_FIFO_MAX_FRAGS);
138 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
139 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
140 XGE_DEFAULT_FIFO_QUEUE_INTR);
141 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
142 XGE_DEFAULT_FIFO_QUEUE_MAX);
143 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
144 qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
146 for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
147 dconfig->fifo.queue[qindex].tti[tindex].enabled = 1;
148 dconfig->fifo.queue[qindex].configured = 1;
150 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
151 urange_a, qindex, tindex,
152 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
153 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
154 urange_b, qindex, tindex,
155 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
156 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
157 urange_c, qindex, tindex,
158 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
159 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
160 ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
161 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
162 ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
163 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
164 ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
165 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
166 ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
167 XGE_GET_PARAM_FIFO_QUEUE_TTI(
168 "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
169 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
170 XGE_GET_PARAM_FIFO_QUEUE_TTI(
171 "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
172 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
173 XGE_GET_PARAM_FIFO_QUEUE_TTI(
174 "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
175 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
179 XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
180 XGE_DEFAULT_RING_MEMBLOCK_SIZE);
182 XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
183 XGE_DEFAULT_RING_STRIP_VLAN_TAG);
185 XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
186 XGE_DEFAULT_BUFFER_MODE);
187 if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
188 (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
189 xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
190 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
193 for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
194 dconfig->ring.queue[qindex].max_frm_len = XGE_HAL_RING_USE_MTU;
195 dconfig->ring.queue[qindex].priority = 0;
196 dconfig->ring.queue[qindex].configured = 1;
197 dconfig->ring.queue[qindex].buffer_mode =
198 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
199 XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
201 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
202 XGE_DEFAULT_RING_QUEUE_MAX);
203 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
204 qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
205 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
206 dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
207 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
208 indicate_max_pkts, qindex,
209 XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
210 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
211 backoff_interval_us, qindex,
212 XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
214 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
215 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
216 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
217 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
218 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
219 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
220 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
221 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
222 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
223 timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
224 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
225 timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
226 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
227 urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
228 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
229 urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
230 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
231 urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
234 if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
235 xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
236 xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
237 (int)(PAGE_SIZE / 32))
238 xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
239 dconfig->fifo.max_frags = (PAGE_SIZE / 32);
242 checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
243 if(checkdev != NULL) {
244 /* Check Revision for 0x12 */
245 revision = pci_read_config(checkdev,
246 xge_offsetof(xge_hal_pci_config_t, revision), 1);
247 if(revision <= 0x12) {
248 /* Set mmrb_count to 1k and max splits = 2 */
249 dconfig->mmrb_count = 1;
250 dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
256 * xge_buffer_sizes_set
257 * Set buffer sizes based on Rx buffer mode
259 * @lldev Per-adapter Data
260 * @buffer_mode Rx Buffer Mode
263 xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
266 int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
267 int buffer_size = mtu + frame_header;
269 xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
271 if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
272 lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
274 lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
276 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
277 lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
279 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
281 buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
282 while(buffer_size > MJUMPAGESIZE) {
283 lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
284 buffer_size -= MJUMPAGESIZE;
286 XGE_ALIGN_TO(buffer_size, 128);
287 lldev->rxd_mbuf_len[index] = buffer_size;
288 lldev->rxd_mbuf_cnt = index + 1;
291 for(index = 0; index < buffer_mode; index++)
292 xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
293 lldev->rxd_mbuf_len[index]);
297 * xge_buffer_mode_init
298 * Init Rx buffer mode
300 * @lldev Per-adapter Data
304 xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
306 int index = 0, buffer_size = 0;
307 xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
309 buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
311 if(lldev->enabled_lro)
312 (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
314 (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
316 lldev->rxd_mbuf_cnt = lldev->buffer_mode;
317 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
318 XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
319 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
322 XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
323 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
325 xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
327 xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
328 ((lldev->enabled_tso) ? "Enabled":"Disabled"));
329 xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
330 ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
331 xge_os_printf("%s: Rx %d Buffer Mode Enabled",
332 device_get_nameunit(lldev->device), lldev->buffer_mode);
336 * xge_driver_initialize
337 * Initializes HAL driver (common for all devices)
340 * XGE_HAL_OK if success
341 * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
344 xge_driver_initialize(void)
346 xge_hal_uld_cbs_t uld_callbacks;
347 xge_hal_driver_config_t driver_config;
348 xge_hal_status_e status = XGE_HAL_OK;
350 /* Initialize HAL driver */
351 if(!hal_driver_init_count) {
352 xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
353 xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
356 * Initial and maximum size of the queue used to store the events
357 * like Link up/down (xge_hal_event_e)
359 driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
360 driver_config.queue_size_max = XGE_HAL_MAX_QUEUE_SIZE_MAX;
362 uld_callbacks.link_up = xge_callback_link_up;
363 uld_callbacks.link_down = xge_callback_link_down;
364 uld_callbacks.crit_err = xge_callback_crit_err;
365 uld_callbacks.event = xge_callback_event;
367 status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
368 if(status != XGE_HAL_OK) {
369 XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
373 hal_driver_init_count = hal_driver_init_count + 1;
375 xge_hal_driver_debug_module_mask_set(0xffffffff);
376 xge_hal_driver_debug_level_set(XGE_TRACE);
384 * Initializes, adds and sets media
386 * @devc Device Handle
389 xge_media_init(device_t devc)
391 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
393 /* Initialize Media */
394 ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
397 /* Add supported media */
398 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
399 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
400 ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO, 0, NULL);
401 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
402 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
405 ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
410 * Save PCI configuration space
415 xge_pci_space_save(device_t dev)
417 struct pci_devinfo *dinfo = NULL;
419 dinfo = device_get_ivars(dev);
420 xge_trace(XGE_TRACE, "Saving PCI configuration space");
421 pci_cfg_save(dev, dinfo, 0);
425 * xge_pci_space_restore
426 * Restore saved PCI configuration space
431 xge_pci_space_restore(device_t dev)
433 struct pci_devinfo *dinfo = NULL;
435 dinfo = device_get_ivars(dev);
436 xge_trace(XGE_TRACE, "Restoring PCI configuration space");
437 pci_cfg_restore(dev, dinfo);
444 * @lldev Per-adapter Data
447 xge_msi_info_save(xge_lldev_t * lldev)
449 xge_os_pci_read16(lldev->pdev, NULL,
450 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
451 &lldev->msi_info.msi_control);
452 xge_os_pci_read32(lldev->pdev, NULL,
453 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
454 &lldev->msi_info.msi_lower_address);
455 xge_os_pci_read32(lldev->pdev, NULL,
456 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
457 &lldev->msi_info.msi_higher_address);
458 xge_os_pci_read16(lldev->pdev, NULL,
459 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
460 &lldev->msi_info.msi_data);
464 * xge_msi_info_restore
465 * Restore saved MSI info
470 xge_msi_info_restore(xge_lldev_t *lldev)
473 * If interface is made down and up, traffic fails. It was observed that
474 * MSI information were getting reset on down. Restoring them.
476 xge_os_pci_write16(lldev->pdev, NULL,
477 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
478 lldev->msi_info.msi_control);
480 xge_os_pci_write32(lldev->pdev, NULL,
481 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
482 lldev->msi_info.msi_lower_address);
484 xge_os_pci_write32(lldev->pdev, NULL,
485 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
486 lldev->msi_info.msi_higher_address);
488 xge_os_pci_write16(lldev->pdev, NULL,
489 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
490 lldev->msi_info.msi_data);
495 * Initializes mutexes used in driver
497 * @lldev Per-adapter Data
500 xge_mutex_init(xge_lldev_t *lldev)
504 sprintf(lldev->mtx_name_drv, "%s_drv",
505 device_get_nameunit(lldev->device));
506 mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
509 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
510 sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
511 device_get_nameunit(lldev->device), qindex);
512 mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
519 * Destroys mutexes used in driver
521 * @lldev Per-adapter Data
524 xge_mutex_destroy(xge_lldev_t *lldev)
528 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
529 mtx_destroy(&lldev->mtx_tx[qindex]);
530 mtx_destroy(&lldev->mtx_drv);
535 * Print device and driver information
537 * @lldev Per-adapter Data
540 xge_print_info(xge_lldev_t *lldev)
542 device_t dev = lldev->device;
543 xge_hal_device_t *hldev = lldev->devh;
544 xge_hal_status_e status = XGE_HAL_OK;
546 const char *xge_pci_bus_speeds[17] = {
549 "PCIX(M1) 66MHz Bus",
550 "PCIX(M1) 100MHz Bus",
551 "PCIX(M1) 133MHz Bus",
552 "PCIX(M2) 133MHz Bus",
553 "PCIX(M2) 200MHz Bus",
554 "PCIX(M2) 266MHz Bus",
556 "PCIX(M1) 66MHz Bus (Not Supported)",
557 "PCIX(M1) 100MHz Bus (Not Supported)",
558 "PCIX(M1) 133MHz Bus (Not Supported)",
566 xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
567 device_get_nameunit(dev),
568 ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
569 hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
570 xge_os_printf("%s: Serial Number %s",
571 device_get_nameunit(dev), hldev->vpd_data.serial_num);
573 if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
574 status = xge_hal_mgmt_reg_read(hldev, 0,
575 xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
576 if(status != XGE_HAL_OK)
577 xge_trace(XGE_ERR, "Error for getting bus speed");
579 xge_os_printf("%s: Adapter is on %s bit %s",
580 device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
581 (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
584 xge_os_printf("%s: Using %s Interrupts",
585 device_get_nameunit(dev),
586 (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
590 * xge_create_dma_tags
591 * Creates DMA tags for both Tx and Rx
595 * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
598 xge_create_dma_tags(device_t dev)
600 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
601 xge_hal_status_e status = XGE_HAL_FAIL;
602 int mtu = (lldev->ifnetp)->if_mtu, maxsize;
605 status = bus_dma_tag_create(
606 bus_get_dma_tag(dev), /* Parent */
607 PAGE_SIZE, /* Alignment */
609 BUS_SPACE_MAXADDR, /* Low Address */
610 BUS_SPACE_MAXADDR, /* High Address */
611 NULL, /* Filter Function */
612 NULL, /* Filter Function Arguments */
613 MCLBYTES * XGE_MAX_SEGS, /* Maximum Size */
614 XGE_MAX_SEGS, /* Number of Segments */
615 MCLBYTES, /* Maximum Segment Size */
616 BUS_DMA_ALLOCNOW, /* Flags */
617 NULL, /* Lock Function */
618 NULL, /* Lock Function Arguments */
619 (&lldev->dma_tag_tx)); /* DMA Tag */
623 maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
624 if(maxsize <= MCLBYTES) {
628 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
629 maxsize = MJUMPAGESIZE;
631 maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
635 status = bus_dma_tag_create(
636 bus_get_dma_tag(dev), /* Parent */
637 PAGE_SIZE, /* Alignment */
639 BUS_SPACE_MAXADDR, /* Low Address */
640 BUS_SPACE_MAXADDR, /* High Address */
641 NULL, /* Filter Function */
642 NULL, /* Filter Function Arguments */
643 maxsize, /* Maximum Size */
644 1, /* Number of Segments */
645 maxsize, /* Maximum Segment Size */
646 BUS_DMA_ALLOCNOW, /* Flags */
647 NULL, /* Lock Function */
648 NULL, /* Lock Function Arguments */
649 (&lldev->dma_tag_rx)); /* DMA Tag */
653 status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
654 &lldev->extra_dma_map);
662 status = bus_dma_tag_destroy(lldev->dma_tag_rx);
664 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
666 status = bus_dma_tag_destroy(lldev->dma_tag_tx);
668 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
669 status = XGE_HAL_FAIL;
675 * xge_confirm_changes
676 * Disables and Enables interface to apply requested change
678 * @lldev Per-adapter Data
679 * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
681 * Returns 0 or Error Number
684 xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
686 if(lldev->initialized == 0) goto _exit1;
688 mtx_lock(&lldev->mtx_drv);
689 if_down(lldev->ifnetp);
690 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
692 if(option == XGE_SET_MTU)
693 (lldev->ifnetp)->if_mtu = lldev->mtu;
695 xge_buffer_mode_init(lldev, lldev->mtu);
697 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
698 if_up(lldev->ifnetp);
699 mtx_unlock(&lldev->mtx_drv);
703 /* Request was to change MTU and device not initialized */
704 if(option == XGE_SET_MTU) {
705 (lldev->ifnetp)->if_mtu = lldev->mtu;
706 xge_buffer_mode_init(lldev, lldev->mtu);
713 * xge_change_lro_status
714 * Enable/Disable LRO feature
716 * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
718 * Returns 0 or error number.
721 xge_change_lro_status(SYSCTL_HANDLER_ARGS)
723 xge_lldev_t *lldev = (xge_lldev_t *)arg1;
724 int request = lldev->enabled_lro, status = XGE_HAL_OK;
726 status = sysctl_handle_int(oidp, &request, arg2, req);
727 if((status != XGE_HAL_OK) || (!req->newptr))
730 if((request < 0) || (request > 1)) {
735 /* Return if current and requested states are same */
736 if(request == lldev->enabled_lro){
737 xge_trace(XGE_ERR, "LRO is already %s",
738 ((request) ? "enabled" : "disabled"));
742 lldev->enabled_lro = request;
743 xge_confirm_changes(lldev, XGE_CHANGE_LRO);
744 arg2 = lldev->enabled_lro;
751 * xge_add_sysctl_handlers
752 * Registers sysctl parameter value update handlers
754 * @lldev Per-adapter data
757 xge_add_sysctl_handlers(xge_lldev_t *lldev)
759 struct sysctl_ctx_list *context_list =
760 device_get_sysctl_ctx(lldev->device);
761 struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
763 SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
764 "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
765 xge_change_lro_status, "I", "Enable or disable LRO feature");
770 * Connects driver to the system if probe was success
775 xge_attach(device_t dev)
777 xge_hal_device_config_t *device_config;
778 xge_hal_device_attr_t attr;
780 xge_hal_device_t *hldev;
781 xge_pci_info_t *pci_info;
782 struct ifnet *ifnetp;
783 int rid, rid0, rid1, error;
784 int msi_count = 0, status = XGE_HAL_OK;
785 int enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
787 device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
789 XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
790 attach_out_config, ENOMEM);
793 lldev = (xge_lldev_t *) device_get_softc(dev);
795 XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
799 xge_mutex_init(lldev);
801 error = xge_driver_initialize();
802 if(error != XGE_HAL_OK) {
803 xge_resources_free(dev, xge_free_mutex);
804 XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
809 (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
811 xge_resources_free(dev, xge_free_terminate_hal_driver);
812 XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
817 /* Our private structure */
819 (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
821 xge_resources_free(dev, xge_free_hal_device);
822 XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
825 lldev->pdev = pci_info;
826 pci_info->device = dev;
829 pci_enable_busmaster(dev);
831 /* Get virtual address for BAR0 */
833 pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
835 if(pci_info->regmap0 == NULL) {
836 xge_resources_free(dev, xge_free_pci_info);
837 XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
840 attr.bar0 = (char *)pci_info->regmap0;
842 pci_info->bar0resource = (xge_bus_resource_t*)
843 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
844 if(pci_info->bar0resource == NULL) {
845 xge_resources_free(dev, xge_free_bar0);
846 XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
849 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
850 rman_get_bustag(pci_info->regmap0);
851 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
852 rman_get_bushandle(pci_info->regmap0);
853 ((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
856 /* Get virtual address for BAR1 */
858 pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
860 if(pci_info->regmap1 == NULL) {
861 xge_resources_free(dev, xge_free_bar0_resource);
862 XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
865 attr.bar1 = (char *)pci_info->regmap1;
867 pci_info->bar1resource = (xge_bus_resource_t*)
868 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
869 if(pci_info->bar1resource == NULL) {
870 xge_resources_free(dev, xge_free_bar1);
871 XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
874 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
875 rman_get_bustag(pci_info->regmap1);
876 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
877 rman_get_bushandle(pci_info->regmap1);
878 ((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
881 /* Save PCI config space */
882 xge_pci_space_save(dev);
884 attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
885 attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
886 attr.irqh = lldev->irqhandle;
887 attr.cfgh = pci_info;
888 attr.pdev = pci_info;
890 /* Initialize device configuration parameters */
891 xge_init_params(device_config, dev);
894 if(lldev->enabled_msi) {
895 /* Number of MSI messages supported by device */
896 msi_count = pci_msi_count(dev);
898 /* Device supports MSI */
900 xge_trace(XGE_ERR, "MSI count: %d", msi_count);
901 xge_trace(XGE_ERR, "Now, driver supporting 1 message");
904 error = pci_alloc_msi(dev, &msi_count);
907 xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
908 enable_msi = XGE_HAL_INTR_MODE_MSI;
913 xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
917 lldev->enabled_msi = enable_msi;
919 /* Allocate resource for irq */
920 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
921 (RF_SHAREABLE | RF_ACTIVE));
922 if(lldev->irq == NULL) {
923 xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
924 ((rid == 0) ? "line interrupt" : "MSI"));
926 error = pci_release_msi(dev);
928 xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
930 xge_trace(XGE_ERR, "Requires reboot to use MSI again");
932 xge_trace(XGE_ERR, "Trying line interrupts");
934 lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
935 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
936 (RF_SHAREABLE | RF_ACTIVE));
938 if(lldev->irq == NULL) {
939 xge_trace(XGE_ERR, "Allocating irq resource failed");
940 xge_resources_free(dev, xge_free_bar1_resource);
946 device_config->intr_mode = lldev->enabled_msi;
948 xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
949 lldev->enabled_msi, msi_count);
952 /* Initialize HAL device */
953 error = xge_hal_device_initialize(hldev, &attr, device_config);
954 if(error != XGE_HAL_OK) {
955 xge_resources_free(dev, xge_free_irq_resource);
956 XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
960 xge_hal_device_private_set(hldev, lldev);
962 error = xge_interface_setup(dev);
968 ifnetp = lldev->ifnetp;
969 ifnetp->if_mtu = device_config->mtu;
973 /* Associate interrupt handler with the device */
974 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
975 error = bus_setup_intr(dev, lldev->irq,
976 (INTR_TYPE_NET | INTR_MPSAFE),
977 #if __FreeBSD_version > 700030
980 xge_isr_msi, lldev, &lldev->irqhandle);
981 xge_msi_info_save(lldev);
984 error = bus_setup_intr(dev, lldev->irq,
985 (INTR_TYPE_NET | INTR_MPSAFE),
986 #if __FreeBSD_version > 700030
989 xge_isr_line, lldev, &lldev->irqhandle);
992 xge_resources_free(dev, xge_free_media_interface);
993 XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
997 xge_print_info(lldev);
999 xge_add_sysctl_handlers(lldev);
1001 xge_buffer_mode_init(lldev, device_config->mtu);
1004 xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1010 * xge_resources_free
1011 * Undo what-all we did during load/attach
1013 * @dev Device Handle
1014 * @error Identifies what-all to undo
1017 xge_resources_free(device_t dev, xge_lables_e error)
1020 xge_pci_info_t *pci_info;
1021 xge_hal_device_t *hldev;
1025 lldev = (xge_lldev_t *) device_get_softc(dev);
1026 pci_info = lldev->pdev;
1029 hldev = lldev->devh;
1033 /* Teardown interrupt handler - device association */
1034 bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1036 case xge_free_media_interface:
1038 ifmedia_removeall(&lldev->media);
1041 ether_ifdetach(lldev->ifnetp);
1042 if_free(lldev->ifnetp);
1044 xge_hal_device_private_set(hldev, NULL);
1045 xge_hal_device_disable(hldev);
1047 case xge_free_terminate_hal_device:
1049 xge_hal_device_terminate(hldev);
1051 case xge_free_irq_resource:
1052 /* Release IRQ resource */
1053 bus_release_resource(dev, SYS_RES_IRQ,
1054 ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1057 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1058 status = pci_release_msi(dev);
1062 "pci_release_msi returned %d", status);
1067 case xge_free_bar1_resource:
1068 /* Restore PCI configuration space */
1069 xge_pci_space_restore(dev);
1071 /* Free bar1resource */
1072 xge_os_free(NULL, pci_info->bar1resource,
1073 sizeof(xge_bus_resource_t));
1078 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1081 case xge_free_bar0_resource:
1082 /* Free bar0resource */
1083 xge_os_free(NULL, pci_info->bar0resource,
1084 sizeof(xge_bus_resource_t));
1089 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1092 case xge_free_pci_info:
1093 /* Disable Bus Master */
1094 pci_disable_busmaster(dev);
1096 /* Free pci_info_t */
1098 xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1100 case xge_free_hal_device:
1101 /* Free device configuration struct and HAL device */
1102 xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1104 case xge_free_terminate_hal_driver:
1105 /* Terminate HAL driver */
1106 hal_driver_init_count = hal_driver_init_count - 1;
1107 if(!hal_driver_init_count) {
1108 xge_hal_driver_terminate();
1111 case xge_free_mutex:
1112 xge_mutex_destroy(lldev);
1118 * Detaches driver from the Kernel subsystem
1120 * @dev Device Handle
1123 xge_detach(device_t dev)
1125 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1127 if(lldev->in_detach == 0) {
1128 lldev->in_detach = 1;
1130 xge_resources_free(dev, xge_free_all);
1138 * To shutdown device before system shutdown
1140 * @dev Device Handle
1143 xge_shutdown(device_t dev)
1145 xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1152 * xge_interface_setup
1155 * @dev Device Handle
1157 * Returns 0 on success, ENXIO/ENOMEM on failure
1160 xge_interface_setup(device_t dev)
1162 u8 mcaddr[ETHER_ADDR_LEN];
1163 xge_hal_status_e status;
1164 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1165 struct ifnet *ifnetp;
1166 xge_hal_device_t *hldev = lldev->devh;
1168 /* Get the MAC address of the device */
1169 status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1170 if(status != XGE_HAL_OK) {
1171 xge_resources_free(dev, xge_free_terminate_hal_device);
1172 XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1175 /* Get interface ifnet structure for this Ether device */
1176 ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1177 if(ifnetp == NULL) {
1178 xge_resources_free(dev, xge_free_terminate_hal_device);
1179 XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1182 /* Initialize interface ifnet structure */
1183 if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1184 ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU;
1185 ifnetp->if_baudrate = XGE_BAUDRATE;
1186 ifnetp->if_init = xge_init;
1187 ifnetp->if_softc = lldev;
1188 ifnetp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1189 ifnetp->if_ioctl = xge_ioctl;
1190 ifnetp->if_start = xge_send;
1192 /* TODO: Check and assign optimal value */
1193 ifnetp->if_snd.ifq_maxlen = ifqmaxlen;
1195 ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1197 if(lldev->enabled_tso)
1198 ifnetp->if_capabilities |= IFCAP_TSO4;
1199 if(lldev->enabled_lro)
1200 ifnetp->if_capabilities |= IFCAP_LRO;
1202 ifnetp->if_capenable = ifnetp->if_capabilities;
1204 /* Attach the interface */
1205 ether_ifattach(ifnetp, mcaddr);
1212 * xge_callback_link_up
1213 * Callback for Link-up indication from HAL
1215 * @userdata Per-adapter data
1218 xge_callback_link_up(void *userdata)
1220 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1221 struct ifnet *ifnetp = lldev->ifnetp;
1223 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1224 if_link_state_change(ifnetp, LINK_STATE_UP);
1228 * xge_callback_link_down
1229 * Callback for Link-down indication from HAL
1231 * @userdata Per-adapter data
1234 xge_callback_link_down(void *userdata)
1236 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1237 struct ifnet *ifnetp = lldev->ifnetp;
1239 ifnetp->if_flags |= IFF_DRV_OACTIVE;
1240 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1244 * xge_callback_crit_err
1245 * Callback for Critical error indication from HAL
1247 * @userdata Per-adapter data
1248 * @type Event type (Enumerated hardware error)
1249 * @serr_data Hardware status
1252 xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1254 xge_trace(XGE_ERR, "Critical Error");
1255 xge_reset(userdata);
1259 * xge_callback_event
1260 * Callback from HAL indicating that some event has been queued
1262 * @item Queued event item
1265 xge_callback_event(xge_queue_item_t *item)
1267 xge_lldev_t *lldev = NULL;
1268 xge_hal_device_t *hldev = NULL;
1269 struct ifnet *ifnetp = NULL;
1271 hldev = item->context;
1272 lldev = xge_hal_device_private(hldev);
1273 ifnetp = lldev->ifnetp;
1275 switch((int)item->event_type) {
1276 case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1277 if(lldev->initialized) {
1278 if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1279 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1282 xge_queue_produce_context(
1283 xge_hal_device_queue(lldev->devh),
1284 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1289 case XGE_LL_EVENT_DEVICE_RESETTING:
1290 xge_reset(item->context);
1299 * xge_ifmedia_change
1300 * Media change driver callback
1302 * @ifnetp Interface Handle
1304 * Returns 0 if media is Ether else EINVAL
1307 xge_ifmedia_change(struct ifnet *ifnetp)
1309 xge_lldev_t *lldev = ifnetp->if_softc;
1310 struct ifmedia *ifmediap = &lldev->media;
1312 return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ? EINVAL:0;
1316 * xge_ifmedia_status
1317 * Media status driver callback
1319 * @ifnetp Interface Handle
1320 * @ifmr Interface Media Settings
1323 xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1325 xge_hal_status_e status;
1327 xge_lldev_t *lldev = ifnetp->if_softc;
1328 xge_hal_device_t *hldev = lldev->devh;
1330 ifmr->ifm_status = IFM_AVALID;
1331 ifmr->ifm_active = IFM_ETHER;
1333 status = xge_hal_mgmt_reg_read(hldev, 0,
1334 xge_offsetof(xge_hal_pci_bar0_t, adapter_status), ®value);
1335 if(status != XGE_HAL_OK) {
1336 xge_trace(XGE_TRACE, "Getting adapter status failed");
1340 if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1341 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1342 ifmr->ifm_status |= IFM_ACTIVE;
1343 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1344 if_link_state_change(ifnetp, LINK_STATE_UP);
1347 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1355 * IOCTL to get statistics
1357 * @lldev Per-adapter data
1358 * @ifreqp Interface request
1361 xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1363 xge_hal_status_e status = XGE_HAL_OK;
1368 cmd = retValue = fubyte(ifreqp->ifr_data);
1374 case XGE_QUERY_STATS:
1375 mtx_lock(&lldev->mtx_drv);
1376 status = xge_hal_stats_hw(lldev->devh,
1377 (xge_hal_stats_hw_info_t **)&info);
1378 mtx_unlock(&lldev->mtx_drv);
1379 if(status == XGE_HAL_OK) {
1380 if(copyout(info, ifreqp->ifr_data,
1381 sizeof(xge_hal_stats_hw_info_t)) == 0)
1385 xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1390 case XGE_QUERY_PCICONF:
1391 info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1393 mtx_lock(&lldev->mtx_drv);
1394 status = xge_hal_mgmt_pci_config(lldev->devh, info,
1395 sizeof(xge_hal_pci_config_t));
1396 mtx_unlock(&lldev->mtx_drv);
1397 if(status == XGE_HAL_OK) {
1398 if(copyout(info, ifreqp->ifr_data,
1399 sizeof(xge_hal_pci_config_t)) == 0)
1404 "Getting PCI configuration failed (%d)", status);
1406 xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1410 case XGE_QUERY_DEVSTATS:
1411 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1413 mtx_lock(&lldev->mtx_drv);
1414 status =xge_hal_mgmt_device_stats(lldev->devh, info,
1415 sizeof(xge_hal_stats_device_info_t));
1416 mtx_unlock(&lldev->mtx_drv);
1417 if(status == XGE_HAL_OK) {
1418 if(copyout(info, ifreqp->ifr_data,
1419 sizeof(xge_hal_stats_device_info_t)) == 0)
1423 xge_trace(XGE_ERR, "Getting device info failed (%d)",
1426 xge_os_free(NULL, info,
1427 sizeof(xge_hal_stats_device_info_t));
1431 case XGE_QUERY_SWSTATS:
1432 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1434 mtx_lock(&lldev->mtx_drv);
1435 status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1436 sizeof(xge_hal_stats_sw_err_t));
1437 mtx_unlock(&lldev->mtx_drv);
1438 if(status == XGE_HAL_OK) {
1439 if(copyout(info, ifreqp->ifr_data,
1440 sizeof(xge_hal_stats_sw_err_t)) == 0)
1445 "Getting tcode statistics failed (%d)", status);
1447 xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1451 case XGE_QUERY_DRIVERSTATS:
1452 if(copyout(&lldev->driver_stats, ifreqp->ifr_data,
1453 sizeof(xge_driver_stats_t)) == 0) {
1458 "Copyout of driver statistics failed (%d)", status);
1462 case XGE_READ_VERSION:
1463 info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1464 if(version != NULL) {
1465 strcpy(info, XGE_DRIVER_VERSION);
1466 if(copyout(info, ifreqp->ifr_data, XGE_BUFFER_SIZE) == 0)
1468 xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1472 case XGE_QUERY_DEVCONF:
1473 info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1475 mtx_lock(&lldev->mtx_drv);
1476 status = xge_hal_mgmt_device_config(lldev->devh, info,
1477 sizeof(xge_hal_device_config_t));
1478 mtx_unlock(&lldev->mtx_drv);
1479 if(status == XGE_HAL_OK) {
1480 if(copyout(info, ifreqp->ifr_data,
1481 sizeof(xge_hal_device_config_t)) == 0)
1485 xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1488 xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1492 case XGE_QUERY_BUFFER_MODE:
1493 if(copyout(&lldev->buffer_mode, ifreqp->ifr_data,
1498 case XGE_SET_BUFFER_MODE_1:
1499 case XGE_SET_BUFFER_MODE_2:
1500 case XGE_SET_BUFFER_MODE_5:
1501 mode = (cmd == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1502 if(copyout(&mode, ifreqp->ifr_data, sizeof(mode)) == 0)
1506 xge_trace(XGE_TRACE, "Nothing is matching");
1514 * xge_ioctl_registers
1515 * IOCTL to get registers
1517 * @lldev Per-adapter data
1518 * @ifreqp Interface request
1521 xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1523 xge_register_t tmpdata;
1524 xge_register_t *data;
1525 xge_hal_status_e status = XGE_HAL_OK;
1526 int retValue = EINVAL, offset = 0, index = 0;
1530 error = copyin(ifreqp->ifr_data, &tmpdata, sizeof(tmpdata));
1535 /* Reading a register */
1536 if(strcmp(data->option, "-r") == 0) {
1537 data->value = 0x0000;
1538 mtx_lock(&lldev->mtx_drv);
1539 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1541 mtx_unlock(&lldev->mtx_drv);
1542 if(status == XGE_HAL_OK) {
1543 if(copyout(data, ifreqp->ifr_data, sizeof(xge_register_t)) == 0)
1547 /* Writing to a register */
1548 else if(strcmp(data->option, "-w") == 0) {
1549 mtx_lock(&lldev->mtx_drv);
1550 status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1552 if(status == XGE_HAL_OK) {
1554 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1556 if(status != XGE_HAL_OK) {
1557 xge_trace(XGE_ERR, "Reading back updated register failed");
1560 if(val64 != data->value) {
1562 "Read and written register values mismatched");
1568 xge_trace(XGE_ERR, "Getting register value failed");
1570 mtx_unlock(&lldev->mtx_drv);
1573 mtx_lock(&lldev->mtx_drv);
1574 for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1575 index++, offset += 0x0008) {
1577 status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1578 if(status != XGE_HAL_OK) {
1579 xge_trace(XGE_ERR, "Getting register value failed");
1582 *((u64 *)((u64 *)data + index)) = val64;
1585 mtx_unlock(&lldev->mtx_drv);
1588 if(copyout(data, ifreqp->ifr_data,
1589 sizeof(xge_hal_pci_bar0_t)) != 0) {
1590 xge_trace(XGE_ERR, "Copyout of register values failed");
1595 xge_trace(XGE_ERR, "Getting register values failed");
1603 * Callback to control the device - Interface configuration
1605 * @ifnetp Interface Handle
1606 * @command Device control command
1607 * @data Parameters associated with command (if any)
1610 xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1612 struct ifreq *ifreqp = (struct ifreq *)data;
1613 xge_lldev_t *lldev = ifnetp->if_softc;
1614 struct ifmedia *ifmediap = &lldev->media;
1615 int retValue = 0, mask = 0;
1617 if(lldev->in_detach) {
1622 /* Set/Get ifnet address */
1625 ether_ioctl(ifnetp, command, data);
1630 retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1633 /* Set ifnet flags */
1635 if(ifnetp->if_flags & IFF_UP) {
1636 /* Link status is UP */
1637 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1640 xge_disable_promisc(lldev);
1641 xge_enable_promisc(lldev);
1644 /* Link status is DOWN */
1645 /* If device is in running, make it down */
1646 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1652 /* Add/delete multicast address */
1655 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1656 xge_setmulti(lldev);
1660 /* Set/Get net media */
1663 retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1666 /* Set capabilities */
1668 mtx_lock(&lldev->mtx_drv);
1669 mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1670 if(mask & IFCAP_TXCSUM) {
1671 if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1672 ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1673 ifnetp->if_hwassist &=
1674 ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1677 ifnetp->if_capenable |= IFCAP_TXCSUM;
1678 ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1681 if(mask & IFCAP_TSO4) {
1682 if(ifnetp->if_capenable & IFCAP_TSO4) {
1683 ifnetp->if_capenable &= ~IFCAP_TSO4;
1684 ifnetp->if_hwassist &= ~CSUM_TSO;
1686 xge_os_printf("%s: TSO Disabled",
1687 device_get_nameunit(lldev->device));
1689 else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1690 ifnetp->if_capenable |= IFCAP_TSO4;
1691 ifnetp->if_hwassist |= CSUM_TSO;
1693 xge_os_printf("%s: TSO Enabled",
1694 device_get_nameunit(lldev->device));
1698 mtx_unlock(&lldev->mtx_drv);
1701 /* Custom IOCTL 0 */
1702 case SIOCGPRIVATE_0:
1703 retValue = xge_ioctl_stats(lldev, ifreqp);
1706 /* Custom IOCTL 1 */
1707 case SIOCGPRIVATE_1:
1708 retValue = xge_ioctl_registers(lldev, ifreqp);
1720 * Initialize the interface
1722 * @plldev Per-adapter Data
1725 xge_init(void *plldev)
1727 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1729 mtx_lock(&lldev->mtx_drv);
1730 xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1731 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1732 mtx_unlock(&lldev->mtx_drv);
1737 * Initialize the interface (called by holding lock)
1739 * @pdevin Per-adapter Data
1742 xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1744 struct ifnet *ifnetp = lldev->ifnetp;
1745 xge_hal_device_t *hldev = lldev->devh;
1746 struct ifaddr *ifaddrp;
1747 unsigned char *macaddr;
1748 struct sockaddr_dl *sockaddrp;
1749 int status = XGE_HAL_OK;
1751 mtx_assert((&lldev->mtx_drv), MA_OWNED);
1753 /* If device is in running state, initializing is not required */
1754 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1757 /* Initializing timer */
1758 callout_init(&lldev->timer, 1);
1760 xge_trace(XGE_TRACE, "Set MTU size");
1761 status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1762 if(status != XGE_HAL_OK) {
1763 xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1767 /* Enable HAL device */
1768 xge_hal_device_enable(hldev);
1770 /* Get MAC address and update in HAL */
1771 ifaddrp = ifnetp->if_addr;
1772 sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1773 sockaddrp->sdl_type = IFT_ETHER;
1774 sockaddrp->sdl_alen = ifnetp->if_addrlen;
1775 macaddr = LLADDR(sockaddrp);
1776 xge_trace(XGE_TRACE,
1777 "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1778 *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1779 *(macaddr + 4), *(macaddr + 5));
1780 status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1781 if(status != XGE_HAL_OK)
1782 xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1784 /* Opening channels */
1785 mtx_unlock(&lldev->mtx_drv);
1786 status = xge_channel_open(lldev, option);
1787 mtx_lock(&lldev->mtx_drv);
1788 if(status != XGE_HAL_OK)
1791 /* Set appropriate flags */
1792 ifnetp->if_drv_flags |= IFF_DRV_RUNNING;
1793 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1795 /* Checksum capability */
1796 ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1797 (CSUM_TCP | CSUM_UDP) : 0;
1799 if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1800 ifnetp->if_hwassist |= CSUM_TSO;
1802 /* Enable interrupts */
1803 xge_hal_device_intr_enable(hldev);
1805 callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1807 /* Disable promiscuous mode */
1808 xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1809 xge_enable_promisc(lldev);
1811 /* Device is initialized */
1812 lldev->initialized = 1;
1813 xge_os_mdelay(1000);
1821 * Timer timeout function to handle link status
1823 * @devp Per-adapter Data
1826 xge_timer(void *devp)
1828 xge_lldev_t *lldev = (xge_lldev_t *)devp;
1829 xge_hal_device_t *hldev = lldev->devh;
1831 /* Poll for changes */
1832 xge_hal_device_poll(hldev);
1835 callout_reset(&lldev->timer, hz, xge_timer, lldev);
1842 * De-activate the interface
1844 * @lldev Per-adater Data
1847 xge_stop(xge_lldev_t *lldev)
1849 mtx_lock(&lldev->mtx_drv);
1850 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1851 mtx_unlock(&lldev->mtx_drv);
1856 * ISR filter function - to filter interrupts from other devices (shared)
1858 * @handle Per-adapter Data
1861 * FILTER_STRAY if interrupt is from other device
1862 * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1865 xge_isr_filter(void *handle)
1867 xge_lldev_t *lldev = (xge_lldev_t *)handle;
1868 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1869 u16 retValue = FILTER_STRAY;
1872 XGE_DRV_STATS(isr_filter);
1874 val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1875 &bar0->general_int_status);
1876 retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1883 * Interrupt service routine for Line interrupts
1885 * @plldev Per-adapter Data
1888 xge_isr_line(void *plldev)
1890 xge_hal_status_e status;
1891 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1892 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1893 struct ifnet *ifnetp = lldev->ifnetp;
1895 XGE_DRV_STATS(isr_line);
1897 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1898 status = xge_hal_device_handle_irq(hldev);
1899 if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1906 * ISR for Message signaled interrupts
1909 xge_isr_msi(void *plldev)
1911 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1912 XGE_DRV_STATS(isr_msi);
1913 xge_hal_device_continue_irq(lldev->devh);
1918 * Initiate and open all Rx channels
1921 * @lldev Per-adapter Data
1922 * @rflag Channel open/close/reopen flag
1924 * Returns 0 or Error Number
1927 xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1929 u64 adapter_status = 0x0;
1930 xge_hal_status_e status = XGE_HAL_FAIL;
1932 xge_hal_channel_attr_t attr = {
1935 .callback = xge_rx_compl,
1936 .per_dtr_space = sizeof(xge_rx_priv_t),
1938 .type = XGE_HAL_CHANNEL_TYPE_RING,
1940 .dtr_init = xge_rx_initial_replenish,
1941 .dtr_term = xge_rx_term
1944 /* If device is not ready, return */
1945 status = xge_hal_device_status(lldev->devh, &adapter_status);
1946 if(status != XGE_HAL_OK) {
1947 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1948 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1951 status = xge_hal_channel_open(lldev->devh, &attr,
1952 &lldev->ring_channel[qid], rflag);
1961 * Initialize and open all Tx channels
1963 * @lldev Per-adapter Data
1964 * @tflag Channel open/close/reopen flag
1966 * Returns 0 or Error Number
1969 xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1971 xge_hal_status_e status = XGE_HAL_FAIL;
1972 u64 adapter_status = 0x0;
1975 xge_hal_channel_attr_t attr = {
1977 .callback = xge_tx_compl,
1978 .per_dtr_space = sizeof(xge_tx_priv_t),
1980 .type = XGE_HAL_CHANNEL_TYPE_FIFO,
1982 .dtr_init = xge_tx_initial_replenish,
1983 .dtr_term = xge_tx_term
1986 /* If device is not ready, return */
1987 status = xge_hal_device_status(lldev->devh, &adapter_status);
1988 if(status != XGE_HAL_OK) {
1989 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1990 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1993 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1994 attr.post_qid = qindex,
1995 status = xge_hal_channel_open(lldev->devh, &attr,
1996 &lldev->fifo_channel[qindex], tflag);
1997 if(status != XGE_HAL_OK) {
1998 for(index = 0; index < qindex; index++)
1999 xge_hal_channel_close(lldev->fifo_channel[index], tflag);
2011 * @lldev Per-adapter Data
2014 xge_enable_msi(xge_lldev_t *lldev)
2016 xge_list_t *item = NULL;
2017 xge_hal_device_t *hldev = lldev->devh;
2018 xge_hal_channel_t *channel = NULL;
2019 u16 offset = 0, val16 = 0;
2021 xge_os_pci_read16(lldev->pdev, NULL,
2022 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2024 /* Update msi_data */
2025 offset = (val16 & 0x80) ? 0x4c : 0x48;
2026 xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2031 xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2033 /* Update msi_control */
2034 xge_os_pci_read16(lldev->pdev, NULL,
2035 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2037 xge_os_pci_write16(lldev->pdev, NULL,
2038 xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2040 /* Set TxMAT and RxMAT registers with MSI */
2041 xge_list_for_each(item, &hldev->free_channels) {
2042 channel = xge_container_of(item, xge_hal_channel_t, item);
2043 xge_hal_channel_msi_set(channel, 1, (u32)val16);
2049 * Open both Tx and Rx channels
2051 * @lldev Per-adapter Data
2052 * @option Channel reopen option
2055 xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2057 xge_lro_entry_t *lro_session = NULL;
2058 xge_hal_status_e status = XGE_HAL_OK;
2059 int index = 0, index2 = 0;
2061 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2062 xge_msi_info_restore(lldev);
2063 xge_enable_msi(lldev);
2067 status = xge_create_dma_tags(lldev->device);
2068 if(status != XGE_HAL_OK)
2069 XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2071 /* Open ring (Rx) channel */
2072 for(index = 0; index < XGE_RING_COUNT; index++) {
2073 status = xge_rx_open(index, lldev, option);
2074 if(status != XGE_HAL_OK) {
2076 * DMA mapping fails in the unpatched Kernel which can't
2077 * allocate contiguous memory for Jumbo frames.
2078 * Try using 5 buffer mode.
2080 if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2081 (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2083 /* Close so far opened channels */
2084 for(index2 = 0; index2 < index; index2++) {
2085 xge_hal_channel_close(lldev->ring_channel[index2],
2089 /* Destroy DMA tags intended to use for 1 buffer mode */
2090 if(bus_dmamap_destroy(lldev->dma_tag_rx,
2091 lldev->extra_dma_map)) {
2092 xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2094 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2095 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2096 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2097 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2099 /* Switch to 5 buffer mode */
2100 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2101 xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2107 XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2113 if(lldev->enabled_lro) {
2114 SLIST_INIT(&lldev->lro_free);
2115 SLIST_INIT(&lldev->lro_active);
2116 lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2118 for(index = 0; index < lldev->lro_num; index++) {
2119 lro_session = (xge_lro_entry_t *)
2120 xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2121 if(lro_session == NULL) {
2122 lldev->lro_num = index;
2125 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2129 /* Open FIFO (Tx) channel */
2130 status = xge_tx_open(lldev, option);
2131 if(status != XGE_HAL_OK)
2132 XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2138 * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2139 * Initialization of LRO failed (index is XGE_RING_COUNT)
2140 * Opening Tx channel failed (index is XGE_RING_COUNT)
2142 for(index2 = 0; index2 < index; index2++)
2143 xge_hal_channel_close(lldev->ring_channel[index2], option);
2151 * Close both Tx and Rx channels
2153 * @lldev Per-adapter Data
2154 * @option Channel reopen option
2158 xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2164 /* Close FIFO (Tx) channel */
2165 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2166 xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2168 /* Close Ring (Rx) channels */
2169 for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2170 xge_hal_channel_close(lldev->ring_channel[qindex], option);
2172 if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2173 xge_trace(XGE_ERR, "Rx extra map destroy failed");
2174 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2175 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2176 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2177 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2184 * @arg Parameter passed from dmamap
2186 * @nseg Number of segments
2190 dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2193 *(bus_addr_t *) arg = segs->ds_addr;
2201 * @lldev Per-adapter Data
2204 xge_reset(xge_lldev_t *lldev)
2206 xge_trace(XGE_TRACE, "Reseting the chip");
2208 /* If the device is not initialized, return */
2209 if(lldev->initialized) {
2210 mtx_lock(&lldev->mtx_drv);
2211 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2212 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2213 mtx_unlock(&lldev->mtx_drv);
2221 * Set an address as a multicast address
2223 * @lldev Per-adapter Data
2226 xge_setmulti(xge_lldev_t *lldev)
2228 struct ifmultiaddr *ifma;
2230 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2231 struct ifnet *ifnetp = lldev->ifnetp;
2234 int table_size = 47;
2235 xge_hal_status_e status = XGE_HAL_OK;
2236 u8 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2238 if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2239 status = xge_hal_device_mcast_enable(hldev);
2240 lldev->all_multicast = 1;
2242 else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2243 status = xge_hal_device_mcast_disable(hldev);
2244 lldev->all_multicast = 0;
2247 if(status != XGE_HAL_OK) {
2248 xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2252 /* Updating address list */
2253 if_maddr_rlock(ifnetp);
2255 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2256 if(ifma->ifma_addr->sa_family != AF_LINK) {
2259 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2262 if_maddr_runlock(ifnetp);
2264 if((!lldev->all_multicast) && (index)) {
2265 lldev->macaddr_count = (index + 1);
2266 if(lldev->macaddr_count > table_size) {
2270 /* Clear old addresses */
2271 for(index = 0; index < 48; index++) {
2272 xge_hal_device_macaddr_set(hldev, (offset + index),
2277 /* Add new addresses */
2278 if_maddr_rlock(ifnetp);
2280 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2281 if(ifma->ifma_addr->sa_family != AF_LINK) {
2284 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2285 xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2288 if_maddr_runlock(ifnetp);
2295 * xge_enable_promisc
2296 * Enable Promiscuous Mode
2298 * @lldev Per-adapter Data
2301 xge_enable_promisc(xge_lldev_t *lldev)
2303 struct ifnet *ifnetp = lldev->ifnetp;
2304 xge_hal_device_t *hldev = lldev->devh;
2305 xge_hal_pci_bar0_t *bar0 = NULL;
2308 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2310 if(ifnetp->if_flags & IFF_PROMISC) {
2311 xge_hal_device_promisc_enable(lldev->devh);
2314 * When operating in promiscuous mode, don't strip the VLAN tag
2316 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2318 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2319 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2320 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2323 xge_trace(XGE_TRACE, "Promiscuous mode ON");
2328 * xge_disable_promisc
2329 * Disable Promiscuous Mode
2331 * @lldev Per-adapter Data
2334 xge_disable_promisc(xge_lldev_t *lldev)
2336 xge_hal_device_t *hldev = lldev->devh;
2337 xge_hal_pci_bar0_t *bar0 = NULL;
2340 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2342 xge_hal_device_promisc_disable(lldev->devh);
2345 * Strip VLAN tag when operating in non-promiscuous mode
2347 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2349 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2350 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2351 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2354 xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2359 * Change interface MTU to a requested valid size
2361 * @lldev Per-adapter Data
2362 * @NewMtu Requested MTU
2364 * Returns 0 or Error Number
2367 xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2369 int status = XGE_HAL_OK;
2371 /* Check requested MTU size for boundary */
2372 if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2373 XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2376 lldev->mtu = new_mtu;
2377 xge_confirm_changes(lldev, XGE_SET_MTU);
2386 * Common code for both stop and part of reset. Disables device, interrupts and
2389 * @dev Device Handle
2390 * @option Channel normal/reset option
2393 xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2395 xge_hal_device_t *hldev = lldev->devh;
2396 struct ifnet *ifnetp = lldev->ifnetp;
2399 mtx_assert((&lldev->mtx_drv), MA_OWNED);
2401 /* If device is not in "Running" state, return */
2402 if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2405 /* Set appropriate flags */
2406 ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2409 callout_stop(&lldev->timer);
2411 /* Disable interrupts */
2412 xge_hal_device_intr_disable(hldev);
2414 mtx_unlock(&lldev->mtx_drv);
2415 xge_queue_flush(xge_hal_device_queue(lldev->devh));
2416 mtx_lock(&lldev->mtx_drv);
2418 /* Disable HAL device */
2419 if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2420 xge_trace(XGE_ERR, "Disabling HAL device failed");
2421 xge_hal_device_status(hldev, &val64);
2422 xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2425 /* Close Tx and Rx channels */
2426 xge_channel_close(lldev, option);
2428 /* Reset HAL device */
2429 xge_hal_device_reset(hldev);
2431 xge_os_mdelay(1000);
2432 lldev->initialized = 0;
2434 if_link_state_change(ifnetp, LINK_STATE_DOWN);
2441 * xge_set_mbuf_cflags
2442 * set checksum flag for the mbuf
2447 xge_set_mbuf_cflags(mbuf_t pkt)
2449 pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2450 pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2451 pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2452 pkt->m_pkthdr.csum_data = htons(0xffff);
2456 * xge_lro_flush_sessions
2457 * Flush LRO session and send accumulated LRO packet to upper layer
2459 * @lldev Per-adapter Data
2462 xge_lro_flush_sessions(xge_lldev_t *lldev)
2464 xge_lro_entry_t *lro_session = NULL;
2466 while(!SLIST_EMPTY(&lldev->lro_active)) {
2467 lro_session = SLIST_FIRST(&lldev->lro_active);
2468 SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2469 xge_lro_flush(lldev, lro_session);
2475 * Flush LRO session. Send accumulated LRO packet to upper layer
2477 * @lldev Per-adapter Data
2478 * @lro LRO session to be flushed
2481 xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2483 struct ip *header_ip;
2484 struct tcphdr *header_tcp;
2487 if(lro_session->append_cnt) {
2488 header_ip = lro_session->lro_header_ip;
2489 header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2490 lro_session->m_head->m_pkthdr.len = lro_session->len;
2491 header_tcp = (struct tcphdr *)(header_ip + 1);
2492 header_tcp->th_ack = lro_session->ack_seq;
2493 header_tcp->th_win = lro_session->window;
2494 if(lro_session->timestamp) {
2495 ptr = (u32 *)(header_tcp + 1);
2496 ptr[1] = htonl(lro_session->tsval);
2497 ptr[2] = lro_session->tsecr;
2501 (*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2502 lro_session->m_head = NULL;
2503 lro_session->timestamp = 0;
2504 lro_session->append_cnt = 0;
2505 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2509 * xge_lro_accumulate
2510 * Accumulate packets to form a large LRO packet based on various conditions
2512 * @lldev Per-adapter Data
2513 * @m_head Current Packet
2515 * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2518 xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2520 struct ether_header *header_ethernet;
2521 struct ip *header_ip;
2522 struct tcphdr *header_tcp;
2524 struct mbuf *buffer_next, *buffer_tail;
2525 xge_lro_entry_t *lro_session;
2526 xge_hal_status_e status = XGE_HAL_FAIL;
2527 int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2530 /* Get Ethernet header */
2531 header_ethernet = mtod(m_head, struct ether_header *);
2533 /* Return if it is not IP packet */
2534 if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2538 header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2539 (struct ip *)(header_ethernet + 1) :
2540 mtod(m_head->m_next, struct ip *);
2542 /* Return if it is not TCP packet */
2543 if(header_ip->ip_p != IPPROTO_TCP)
2546 /* Return if packet has options */
2547 if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2550 /* Return if packet is fragmented */
2551 if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2554 /* Get TCP header */
2555 header_tcp = (struct tcphdr *)(header_ip + 1);
2557 /* Return if not ACK or PUSH */
2558 if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2561 /* Only timestamp option is handled */
2562 tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2563 tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2564 ptr = (u32 *)(header_tcp + 1);
2565 if(tcp_options != 0) {
2566 if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2567 (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2568 TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2573 /* Total length of packet (IP) */
2574 ip_len = ntohs(header_ip->ip_len);
2577 tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2579 /* If the frame is padded, trim it */
2580 tot_len = m_head->m_pkthdr.len;
2581 trim = tot_len - (ip_len + ETHER_HDR_LEN);
2585 m_adj(m_head, -trim);
2586 tot_len = m_head->m_pkthdr.len;
2589 buffer_next = m_head;
2591 while(buffer_next != NULL) {
2592 buffer_tail = buffer_next;
2593 buffer_next = buffer_tail->m_next;
2596 /* Total size of only headers */
2597 hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2599 /* Get sequence number */
2600 seq = ntohl(header_tcp->th_seq);
2602 SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2603 if(lro_session->source_port == header_tcp->th_sport &&
2604 lro_session->dest_port == header_tcp->th_dport &&
2605 lro_session->source_ip == header_ip->ip_src.s_addr &&
2606 lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2608 /* Unmatched sequence number, flush LRO session */
2609 if(__predict_false(seq != lro_session->next_seq)) {
2610 SLIST_REMOVE(&lldev->lro_active, lro_session,
2611 xge_lro_entry_t, next);
2612 xge_lro_flush(lldev, lro_session);
2616 /* Handle timestamp option */
2618 u32 tsval = ntohl(*(ptr + 1));
2619 if(__predict_false(lro_session->tsval > tsval ||
2623 lro_session->tsval = tsval;
2624 lro_session->tsecr = *(ptr + 2);
2627 lro_session->next_seq += tcp_data_len;
2628 lro_session->ack_seq = header_tcp->th_ack;
2629 lro_session->window = header_tcp->th_win;
2631 /* If TCP data/payload is of 0 size, free mbuf */
2632 if(tcp_data_len == 0) {
2634 status = XGE_HAL_OK;
2638 lro_session->append_cnt++;
2639 lro_session->len += tcp_data_len;
2641 /* Adjust mbuf so that m_data points to payload than headers */
2642 m_adj(m_head, hlen);
2644 /* Append this packet to LRO accumulated packet */
2645 lro_session->m_tail->m_next = m_head;
2646 lro_session->m_tail = buffer_tail;
2648 /* Flush if LRO packet is exceeding maximum size */
2649 if(lro_session->len >
2650 (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2651 SLIST_REMOVE(&lldev->lro_active, lro_session,
2652 xge_lro_entry_t, next);
2653 xge_lro_flush(lldev, lro_session);
2655 status = XGE_HAL_OK;
2660 if(SLIST_EMPTY(&lldev->lro_free))
2663 /* Start a new LRO session */
2664 lro_session = SLIST_FIRST(&lldev->lro_free);
2665 SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2666 SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2667 lro_session->source_port = header_tcp->th_sport;
2668 lro_session->dest_port = header_tcp->th_dport;
2669 lro_session->source_ip = header_ip->ip_src.s_addr;
2670 lro_session->dest_ip = header_ip->ip_dst.s_addr;
2671 lro_session->next_seq = seq + tcp_data_len;
2672 lro_session->mss = tcp_data_len;
2673 lro_session->ack_seq = header_tcp->th_ack;
2674 lro_session->window = header_tcp->th_win;
2676 lro_session->lro_header_ip = header_ip;
2678 /* Handle timestamp option */
2680 lro_session->timestamp = 1;
2681 lro_session->tsval = ntohl(*(ptr + 1));
2682 lro_session->tsecr = *(ptr + 2);
2685 lro_session->len = tot_len;
2686 lro_session->m_head = m_head;
2687 lro_session->m_tail = buffer_tail;
2688 status = XGE_HAL_OK;
2695 * xge_accumulate_large_rx
2696 * Accumulate packets to form a large LRO packet based on various conditions
2698 * @lldev Per-adapter Data
2699 * @pkt Current packet
2700 * @pkt_length Packet Length
2701 * @rxd_priv Rx Descriptor Private Data
2704 xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2705 xge_rx_priv_t *rxd_priv)
2707 if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2708 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2709 BUS_DMASYNC_POSTREAD);
2710 (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2716 * If the interrupt is due to received frame (Rx completion), send it up
2718 * @channelh Ring Channel Handle
2719 * @dtr Current Descriptor
2720 * @t_code Transfer Code indicating success or error
2721 * @userdata Per-adapter Data
2723 * Returns XGE_HAL_OK or HAL error enums
2726 xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2729 struct ifnet *ifnetp;
2730 xge_rx_priv_t *rxd_priv = NULL;
2731 mbuf_t mbuf_up = NULL;
2732 xge_hal_status_e status = XGE_HAL_OK;
2733 xge_hal_dtr_info_t ext_info;
2737 /*get the user data portion*/
2738 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2740 XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2743 XGE_DRV_STATS(rx_completions);
2745 /* get the interface pointer */
2746 ifnetp = lldev->ifnetp;
2749 XGE_DRV_STATS(rx_desc_compl);
2751 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2752 status = XGE_HAL_FAIL;
2757 xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2758 XGE_DRV_STATS(rx_tcode);
2759 xge_hal_device_handle_tcode(channelh, dtr, t_code);
2760 xge_hal_ring_dtr_post(channelh,dtr);
2764 /* Get the private data for this descriptor*/
2765 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2768 XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2773 * Prepare one buffer to send it to upper layer -- since the upper
2774 * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2775 * prepare a new buffer, do mapping, use it in the current
2776 * descriptor and post descriptor back to ring channel
2778 mbuf_up = rxd_priv->bufferArray[0];
2780 /* Gets details of mbuf i.e., packet length */
2781 xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2784 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2785 xge_get_buf(dtr, rxd_priv, lldev, 0) :
2786 xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2788 if(status != XGE_HAL_OK) {
2789 xge_trace(XGE_ERR, "No memory");
2790 XGE_DRV_STATS(rx_no_buf);
2793 * Unable to allocate buffer. Instead of discarding, post
2794 * descriptor back to channel for future processing of same
2797 xge_hal_ring_dtr_post(channelh, dtr);
2801 /* Get the extended information */
2802 xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2805 * As we have allocated a new mbuf for this descriptor, post this
2806 * descriptor with new mbuf back to ring channel
2808 vlan_tag = ext_info.vlan;
2809 xge_hal_ring_dtr_post(channelh, dtr);
2810 if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2811 (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2812 (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2813 (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2815 /* set Checksum Flag */
2816 xge_set_mbuf_cflags(mbuf_up);
2818 if(lldev->enabled_lro) {
2819 xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2823 /* Post-Read sync for buffers*/
2824 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2825 bus_dmamap_sync(lldev->dma_tag_rx,
2826 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2828 (*ifnetp->if_input)(ifnetp, mbuf_up);
2833 * Packet with erroneous checksum , let the upper layer deal
2837 /* Post-Read sync for buffers*/
2838 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2839 bus_dmamap_sync(lldev->dma_tag_rx,
2840 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2844 mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2845 mbuf_up->m_flags |= M_VLANTAG;
2848 if(lldev->enabled_lro)
2849 xge_lro_flush_sessions(lldev);
2851 (*ifnetp->if_input)(ifnetp, mbuf_up);
2853 } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2856 if(lldev->enabled_lro)
2857 xge_lro_flush_sessions(lldev);
2867 * @mbuf_up Packet to send up
2868 * @channelh Ring Channel Handle
2870 * @lldev Per-adapter Data
2871 * @rxd_priv Rx Descriptor Private Data
2873 * Returns XGE_HAL_OK or HAL error enums
2876 xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2877 xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2880 int pkt_length[5]={0,0}, pkt_len=0;
2881 dma_addr_t dma_data[5];
2887 if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2888 xge_os_memzero(pkt_length, sizeof(pkt_length));
2891 * Retrieve data of interest from the completed descriptor -- This
2892 * returns the packet length
2894 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2895 xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2898 xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2901 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2902 m->m_len = pkt_length[index];
2904 if(index < (lldev->rxd_mbuf_cnt-1)) {
2905 m->m_next = rxd_priv->bufferArray[index + 1];
2911 pkt_len+=pkt_length[index];
2915 * Since 2 buffer mode is an exceptional case where data is in 3rd
2916 * buffer but not in 2nd buffer
2918 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2919 m->m_len = pkt_length[2];
2920 pkt_len+=pkt_length[2];
2924 * Update length of newly created buffer to be sent up with packet
2927 mbuf_up->m_pkthdr.len = pkt_len;
2931 * Retrieve data of interest from the completed descriptor -- This
2932 * returns the packet length
2934 xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2937 * Update length of newly created buffer to be sent up with packet
2940 mbuf_up->m_len = mbuf_up->m_pkthdr.len = pkt_length[0];
2948 * Flush Tx descriptors
2950 * @channelh Channel handle
2953 xge_flush_txds(xge_hal_channel_h channelh)
2955 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2956 xge_hal_dtr_h tx_dtr;
2957 xge_tx_priv_t *tx_priv;
2960 while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2962 XGE_DRV_STATS(tx_desc_compl);
2964 xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2965 XGE_DRV_STATS(tx_tcode);
2966 xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2969 tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2970 bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2971 m_freem(tx_priv->buffer);
2972 tx_priv->buffer = NULL;
2973 xge_hal_fifo_dtr_free(channelh, tx_dtr);
2981 * @ifnetp Interface Handle
2984 xge_send(struct ifnet *ifnetp)
2987 xge_lldev_t *lldev = ifnetp->if_softc;
2989 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2990 if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2991 XGE_DRV_STATS(tx_lock_fail);
2994 xge_send_locked(ifnetp, qindex);
2995 mtx_unlock(&lldev->mtx_tx[qindex]);
3000 xge_send_locked(struct ifnet *ifnetp, int qindex)
3003 static bus_dma_segment_t segs[XGE_MAX_SEGS];
3004 xge_hal_status_e status;
3005 unsigned int max_fragments;
3006 xge_lldev_t *lldev = ifnetp->if_softc;
3007 xge_hal_channel_h channelh = lldev->fifo_channel[qindex];
3008 mbuf_t m_head = NULL;
3009 mbuf_t m_buf = NULL;
3010 xge_tx_priv_t *ll_tx_priv = NULL;
3011 register unsigned int count = 0;
3012 unsigned int nsegs = 0;
3015 max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3017 /* If device is not initialized, return */
3018 if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3021 XGE_DRV_STATS(tx_calls);
3024 * This loop will be executed for each packet in the kernel maintained
3025 * queue -- each packet can be with fragments as an mbuf chain
3028 IF_DEQUEUE(&ifnetp->if_snd, m_head);
3029 if (m_head == NULL) {
3030 ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3034 for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3035 if(m_buf->m_len) count += 1;
3038 if(count >= max_fragments) {
3039 m_buf = m_defrag(m_head, M_NOWAIT);
3040 if(m_buf != NULL) m_head = m_buf;
3041 XGE_DRV_STATS(tx_defrag);
3044 /* Reserve descriptors */
3045 status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3046 if(status != XGE_HAL_OK) {
3047 XGE_DRV_STATS(tx_no_txd);
3048 xge_flush_txds(channelh);
3053 (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3054 xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3056 /* Update Tx private structure for this descriptor */
3057 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3058 ll_tx_priv->buffer = m_head;
3061 * Do mapping -- Required DMA tag has been created in xge_init
3062 * function and DMA maps have already been created in the
3063 * xgell_tx_replenish function.
3064 * Returns number of segments through nsegs
3066 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3067 ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3068 xge_trace(XGE_TRACE, "DMA map load failed");
3069 XGE_DRV_STATS(tx_map_fail);
3073 if(lldev->driver_stats.tx_max_frags < nsegs)
3074 lldev->driver_stats.tx_max_frags = nsegs;
3076 /* Set descriptor buffer for header and each fragment/segment */
3079 xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3080 (dma_addr_t)htole64(segs[count].ds_addr),
3081 segs[count].ds_len);
3083 } while(count < nsegs);
3085 /* Pre-write Sync of mapping */
3086 bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3087 BUS_DMASYNC_PREWRITE);
3089 if((lldev->enabled_tso) &&
3090 (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3091 XGE_DRV_STATS(tx_tso);
3092 xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3096 if(ifnetp->if_hwassist > 0) {
3097 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3098 | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3101 /* Post descriptor to FIFO channel */
3102 xge_hal_fifo_dtr_post(channelh, dtr);
3103 XGE_DRV_STATS(tx_posted);
3105 /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3106 * listener so that we can use tools like tcpdump */
3107 ETHER_BPF_MTAP(ifnetp, m_head);
3110 /* Prepend the packet back to queue */
3111 IF_PREPEND(&ifnetp->if_snd, m_head);
3112 ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3114 xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3115 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3116 XGE_DRV_STATS(tx_again);
3121 * Allocates new mbufs to be placed into descriptors
3123 * @dtrh Descriptor Handle
3124 * @rxd_priv Rx Descriptor Private Data
3125 * @lldev Per-adapter Data
3126 * @index Buffer Index (if multi-buffer mode)
3128 * Returns XGE_HAL_OK or HAL error enums
3131 xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3132 xge_lldev_t *lldev, int index)
3134 register mbuf_t mp = NULL;
3135 struct ifnet *ifnetp = lldev->ifnetp;
3136 int status = XGE_HAL_OK;
3137 int buffer_size = 0, cluster_size = 0, count;
3138 bus_dmamap_t map = rxd_priv->dmainfo[index].dma_map;
3139 bus_dma_segment_t segs[3];
3141 buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3142 ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3143 lldev->rxd_mbuf_len[index];
3145 if(buffer_size <= MCLBYTES) {
3146 cluster_size = MCLBYTES;
3147 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3150 cluster_size = MJUMPAGESIZE;
3151 if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3152 (buffer_size > MJUMPAGESIZE)) {
3153 cluster_size = MJUM9BYTES;
3155 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, cluster_size);
3158 xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3159 status = XGE_HAL_FAIL;
3163 /* Update mbuf's length, packet length and receive interface */
3164 mp->m_len = mp->m_pkthdr.len = buffer_size;
3165 mp->m_pkthdr.rcvif = ifnetp;
3168 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3169 mp, segs, &count, BUS_DMA_NOWAIT)) {
3170 XGE_DRV_STATS(rx_map_fail);
3172 XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3175 /* Update descriptor private data */
3176 rxd_priv->bufferArray[index] = mp;
3177 rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3178 rxd_priv->dmainfo[index].dma_map = lldev->extra_dma_map;
3179 lldev->extra_dma_map = map;
3181 /* Pre-Read/Write sync */
3182 bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3184 /* Unload DMA map of mbuf in current descriptor */
3185 bus_dmamap_unload(lldev->dma_tag_rx, map);
3187 /* Set descriptor buffer */
3188 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3189 xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3199 * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3201 * @dtrh Descriptor Handle
3202 * @rxd_priv Rx Descriptor Private Data
3203 * @lldev Per-adapter Data
3205 * Returns XGE_HAL_OK or HAL error enums
3208 xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3211 bus_addr_t dma_pointers[5];
3213 int status = XGE_HAL_OK, index;
3216 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3217 status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3218 if(status != XGE_HAL_OK) {
3219 for(newindex = 0; newindex < index; newindex++) {
3220 m_freem(rxd_priv->bufferArray[newindex]);
3222 XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3226 for(index = 0; index < lldev->buffer_mode; index++) {
3227 if(lldev->rxd_mbuf_len[index] != 0) {
3228 dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3229 dma_sizes[index] = lldev->rxd_mbuf_len[index];
3232 dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3233 dma_sizes[index] = 1;
3237 /* Assigning second buffer to third pointer in 2 buffer mode */
3238 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3239 dma_pointers[2] = dma_pointers[1];
3240 dma_sizes[2] = dma_sizes[1];
3244 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3245 xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3248 xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3257 * If the interrupt is due to Tx completion, free the sent buffer
3259 * @channelh Channel Handle
3261 * @t_code Transfer Code indicating success or error
3262 * @userdata Per-adapter Data
3264 * Returns XGE_HAL_OK or HAL error enum
3267 xge_tx_compl(xge_hal_channel_h channelh,
3268 xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3270 xge_tx_priv_t *ll_tx_priv = NULL;
3271 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3272 struct ifnet *ifnetp = lldev->ifnetp;
3273 mbuf_t m_buffer = NULL;
3274 int qindex = xge_hal_channel_id(channelh);
3276 mtx_lock(&lldev->mtx_tx[qindex]);
3278 XGE_DRV_STATS(tx_completions);
3281 * For each completed descriptor: Get private structure, free buffer,
3282 * do unmapping, and free descriptor
3285 XGE_DRV_STATS(tx_desc_compl);
3288 XGE_DRV_STATS(tx_tcode);
3289 xge_trace(XGE_TRACE, "t_code %d", t_code);
3290 xge_hal_device_handle_tcode(channelh, dtr, t_code);
3293 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3294 m_buffer = ll_tx_priv->buffer;
3295 bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3297 ll_tx_priv->buffer = NULL;
3298 xge_hal_fifo_dtr_free(channelh, dtr);
3299 } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3301 xge_send_locked(ifnetp, qindex);
3302 ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3304 mtx_unlock(&lldev->mtx_tx[qindex]);
3310 * xge_tx_initial_replenish
3311 * Initially allocate buffers and set them into descriptors for later use
3313 * @channelh Tx Channel Handle
3314 * @dtrh Descriptor Handle
3316 * @userdata Per-adapter Data
3317 * @reopen Channel open/reopen option
3319 * Returns XGE_HAL_OK or HAL error enums
3322 xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3323 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3325 xge_tx_priv_t *txd_priv = NULL;
3326 int status = XGE_HAL_OK;
3328 /* Get the user data portion from channel handle */
3329 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3331 XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3335 /* Get the private data */
3336 txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3337 if(txd_priv == NULL) {
3338 XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3342 /* Create DMA map for this descriptor */
3343 if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3344 &txd_priv->dma_map)) {
3345 XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3346 txinit_out, XGE_HAL_FAIL);
3354 * xge_rx_initial_replenish
3355 * Initially allocate buffers and set them into descriptors for later use
3357 * @channelh Tx Channel Handle
3358 * @dtrh Descriptor Handle
3360 * @userdata Per-adapter Data
3361 * @reopen Channel open/reopen option
3363 * Returns XGE_HAL_OK or HAL error enums
3366 xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3367 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3369 xge_rx_priv_t *rxd_priv = NULL;
3370 int status = XGE_HAL_OK;
3371 int index1 = 0, index2 = 0;
3373 /* Get the user data portion from channel handle */
3374 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3376 XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3380 /* Get the private data */
3381 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3382 if(rxd_priv == NULL) {
3383 XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3387 rxd_priv->bufferArray = xge_os_malloc(NULL,
3388 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3390 if(rxd_priv->bufferArray == NULL) {
3391 XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3395 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3396 /* Create DMA map for these descriptors*/
3397 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3398 &rxd_priv->dmainfo[0].dma_map)) {
3399 XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3400 rxinit_err_out, XGE_HAL_FAIL);
3402 /* Get a buffer, attach it to this descriptor */
3403 status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3406 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3407 /* Create DMA map for this descriptor */
3408 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3409 &rxd_priv->dmainfo[index1].dma_map)) {
3410 for(index2 = index1 - 1; index2 >= 0; index2--) {
3411 bus_dmamap_destroy(lldev->dma_tag_rx,
3412 rxd_priv->dmainfo[index2].dma_map);
3415 "Jumbo DMA map creation for Rx descriptor failed",
3416 rxinit_err_out, XGE_HAL_FAIL);
3419 status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3422 if(status != XGE_HAL_OK) {
3423 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3424 bus_dmamap_destroy(lldev->dma_tag_rx,
3425 rxd_priv->dmainfo[index1].dma_map);
3427 goto rxinit_err_out;
3434 xge_os_free(NULL, rxd_priv->bufferArray,
3435 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3442 * During unload terminate and free all descriptors
3444 * @channelh Rx Channel Handle
3445 * @dtrh Rx Descriptor Handle
3446 * @state Descriptor State
3447 * @userdata Per-adapter Data
3448 * @reopen Channel open/reopen option
3451 xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3452 xge_hal_dtr_state_e state, void *userdata,
3453 xge_hal_channel_reopen_e reopen)
3455 xge_rx_priv_t *rxd_priv = NULL;
3456 xge_lldev_t *lldev = NULL;
3459 /* Descriptor state is not "Posted" */
3460 if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3462 /* Get the user data portion */
3463 lldev = xge_hal_channel_userdata(channelh);
3465 /* Get the private data */
3466 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3468 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3469 if(rxd_priv->dmainfo[index].dma_map != NULL) {
3470 bus_dmamap_sync(lldev->dma_tag_rx,
3471 rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3472 bus_dmamap_unload(lldev->dma_tag_rx,
3473 rxd_priv->dmainfo[index].dma_map);
3474 if(rxd_priv->bufferArray[index] != NULL)
3475 m_free(rxd_priv->bufferArray[index]);
3476 bus_dmamap_destroy(lldev->dma_tag_rx,
3477 rxd_priv->dmainfo[index].dma_map);
3480 xge_os_free(NULL, rxd_priv->bufferArray,
3481 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3483 /* Free the descriptor */
3484 xge_hal_ring_dtr_free(channelh, dtrh);
3492 * During unload terminate and free all descriptors
3494 * @channelh Rx Channel Handle
3495 * @dtrh Rx Descriptor Handle
3496 * @state Descriptor State
3497 * @userdata Per-adapter Data
3498 * @reopen Channel open/reopen option
3501 xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3502 xge_hal_dtr_state_e state, void *userdata,
3503 xge_hal_channel_reopen_e reopen)
3505 xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3506 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3508 /* Destroy DMA map */
3509 bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3515 * FreeBSD device interface entry points
3517 static device_method_t xge_methods[] = {
3518 DEVMETHOD(device_probe, xge_probe),
3519 DEVMETHOD(device_attach, xge_attach),
3520 DEVMETHOD(device_detach, xge_detach),
3521 DEVMETHOD(device_shutdown, xge_shutdown),
3526 static driver_t xge_driver = {
3529 sizeof(xge_lldev_t),
3531 static devclass_t xge_devclass;
3532 DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);