2 * Copyright (c) 2002-2007 Neterion, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <dev/nxge/if_nxge.h>
30 #include <dev/nxge/xge-osdep.h>
31 #include <net/if_arp.h>
32 #include <sys/types.h>
34 #include <net/if_vlan_var.h>
36 int copyright_print = 0;
37 int hal_driver_init_count = 0;
38 size_t size = sizeof(int);
40 static void inline xge_flush_txds(xge_hal_channel_h);
44 * Probes for Xframe devices
49 * BUS_PROBE_DEFAULT if device is supported
50 * ENXIO if device is not supported
53 xge_probe(device_t dev)
55 int devid = pci_get_device(dev);
56 int vendorid = pci_get_vendor(dev);
59 if(vendorid == XGE_PCI_VENDOR_ID) {
60 if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
61 (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
62 if(!copyright_print) {
63 xge_os_printf(XGE_COPYRIGHT);
66 device_set_desc_copy(dev,
67 "Neterion Xframe 10 Gigabit Ethernet Adapter");
68 retValue = BUS_PROBE_DEFAULT;
77 * Sets HAL parameter values (from kenv).
79 * @dconfig Device Configuration
83 xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
85 int qindex, tindex, revision;
87 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
89 dconfig->mtu = XGE_DEFAULT_INITIAL_MTU;
90 dconfig->pci_freq_mherz = XGE_DEFAULT_USER_HARDCODED;
91 dconfig->device_poll_millis = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
92 dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
93 dconfig->mac.rmac_bcast_en = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
94 dconfig->fifo.alignment_size = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
96 XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
97 XGE_DEFAULT_ENABLED_TSO);
98 XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
99 XGE_DEFAULT_ENABLED_LRO);
100 XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
101 XGE_DEFAULT_ENABLED_MSI);
103 XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
104 XGE_DEFAULT_LATENCY_TIMER);
105 XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
106 XGE_DEFAULT_MAX_SPLITS_TRANS);
107 XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
108 XGE_DEFAULT_MMRB_COUNT);
109 XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
110 XGE_DEFAULT_SHARED_SPLITS);
111 XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
112 XGE_DEFAULT_ISR_POLLING_CNT);
113 XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
114 stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
116 XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
117 XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
118 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
119 XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
120 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
121 XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
122 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
123 XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
124 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
125 XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
126 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
127 mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
128 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
129 mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
131 XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
132 XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
133 XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
134 XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
135 XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
136 XGE_DEFAULT_FIFO_MAX_FRAGS);
138 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
139 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
140 XGE_DEFAULT_FIFO_QUEUE_INTR);
141 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
142 XGE_DEFAULT_FIFO_QUEUE_MAX);
143 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
144 qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
146 for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
147 dconfig->fifo.queue[qindex].tti[tindex].enabled = 1;
148 dconfig->fifo.queue[qindex].configured = 1;
150 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
151 urange_a, qindex, tindex,
152 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
153 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
154 urange_b, qindex, tindex,
155 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
156 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
157 urange_c, qindex, tindex,
158 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
159 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
160 ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
161 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
162 ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
163 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
164 ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
165 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
166 ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
167 XGE_GET_PARAM_FIFO_QUEUE_TTI(
168 "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
169 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
170 XGE_GET_PARAM_FIFO_QUEUE_TTI(
171 "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
172 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
173 XGE_GET_PARAM_FIFO_QUEUE_TTI(
174 "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
175 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
179 XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
180 XGE_DEFAULT_RING_MEMBLOCK_SIZE);
182 XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
183 XGE_DEFAULT_RING_STRIP_VLAN_TAG);
185 XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
186 XGE_DEFAULT_BUFFER_MODE);
187 if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
188 (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
189 xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
190 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
193 for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
194 dconfig->ring.queue[qindex].max_frm_len = XGE_HAL_RING_USE_MTU;
195 dconfig->ring.queue[qindex].priority = 0;
196 dconfig->ring.queue[qindex].configured = 1;
197 dconfig->ring.queue[qindex].buffer_mode =
198 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
199 XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
201 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
202 XGE_DEFAULT_RING_QUEUE_MAX);
203 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
204 qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
205 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
206 dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
207 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
208 indicate_max_pkts, qindex,
209 XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
210 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
211 backoff_interval_us, qindex,
212 XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
214 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
215 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
216 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
217 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
218 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
219 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
220 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
221 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
222 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
223 timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
224 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
225 timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
226 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
227 urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
228 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
229 urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
230 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
231 urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
234 if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
235 xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
236 xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
237 (int)(PAGE_SIZE / 32))
238 xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
239 dconfig->fifo.max_frags = (PAGE_SIZE / 32);
242 checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
243 if(checkdev != NULL) {
244 /* Check Revision for 0x12 */
245 revision = pci_read_config(checkdev,
246 xge_offsetof(xge_hal_pci_config_t, revision), 1);
247 if(revision <= 0x12) {
248 /* Set mmrb_count to 1k and max splits = 2 */
249 dconfig->mmrb_count = 1;
250 dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
256 * xge_buffer_sizes_set
257 * Set buffer sizes based on Rx buffer mode
259 * @lldev Per-adapter Data
260 * @buffer_mode Rx Buffer Mode
263 xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
266 int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
267 int buffer_size = mtu + frame_header;
269 xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
271 if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
272 lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
274 lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
276 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
277 lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
279 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
281 buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
282 while(buffer_size > MJUMPAGESIZE) {
283 lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
284 buffer_size -= MJUMPAGESIZE;
286 XGE_ALIGN_TO(buffer_size, 128);
287 lldev->rxd_mbuf_len[index] = buffer_size;
288 lldev->rxd_mbuf_cnt = index + 1;
291 for(index = 0; index < buffer_mode; index++)
292 xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
293 lldev->rxd_mbuf_len[index]);
297 * xge_buffer_mode_init
298 * Init Rx buffer mode
300 * @lldev Per-adapter Data
304 xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
306 int index = 0, buffer_size = 0;
307 xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
309 buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
311 if(lldev->enabled_lro)
312 (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
314 (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
316 lldev->rxd_mbuf_cnt = lldev->buffer_mode;
317 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
318 XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
319 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
322 XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
323 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
325 xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
327 xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
328 ((lldev->enabled_tso) ? "Enabled":"Disabled"));
329 xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
330 ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
331 xge_os_printf("%s: Rx %d Buffer Mode Enabled",
332 device_get_nameunit(lldev->device), lldev->buffer_mode);
336 * xge_driver_initialize
337 * Initializes HAL driver (common for all devices)
340 * XGE_HAL_OK if success
341 * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
344 xge_driver_initialize(void)
346 xge_hal_uld_cbs_t uld_callbacks;
347 xge_hal_driver_config_t driver_config;
348 xge_hal_status_e status = XGE_HAL_OK;
350 /* Initialize HAL driver */
351 if(!hal_driver_init_count) {
352 xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
353 xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
356 * Initial and maximum size of the queue used to store the events
357 * like Link up/down (xge_hal_event_e)
359 driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
360 driver_config.queue_size_max = XGE_HAL_MAX_QUEUE_SIZE_MAX;
362 uld_callbacks.link_up = xge_callback_link_up;
363 uld_callbacks.link_down = xge_callback_link_down;
364 uld_callbacks.crit_err = xge_callback_crit_err;
365 uld_callbacks.event = xge_callback_event;
367 status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
368 if(status != XGE_HAL_OK) {
369 XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
373 hal_driver_init_count = hal_driver_init_count + 1;
375 xge_hal_driver_debug_module_mask_set(0xffffffff);
376 xge_hal_driver_debug_level_set(XGE_TRACE);
384 * Initializes, adds and sets media
386 * @devc Device Handle
389 xge_media_init(device_t devc)
391 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
393 /* Initialize Media */
394 ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
397 /* Add supported media */
398 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
399 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
400 ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO, 0, NULL);
401 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
402 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
405 ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
410 * Save PCI configuration space
415 xge_pci_space_save(device_t dev)
417 struct pci_devinfo *dinfo = NULL;
419 dinfo = device_get_ivars(dev);
420 xge_trace(XGE_TRACE, "Saving PCI configuration space");
421 pci_cfg_save(dev, dinfo, 0);
425 * xge_pci_space_restore
426 * Restore saved PCI configuration space
431 xge_pci_space_restore(device_t dev)
433 struct pci_devinfo *dinfo = NULL;
435 dinfo = device_get_ivars(dev);
436 xge_trace(XGE_TRACE, "Restoring PCI configuration space");
437 pci_cfg_restore(dev, dinfo);
444 * @lldev Per-adapter Data
447 xge_msi_info_save(xge_lldev_t * lldev)
449 xge_os_pci_read16(lldev->pdev, NULL,
450 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
451 &lldev->msi_info.msi_control);
452 xge_os_pci_read32(lldev->pdev, NULL,
453 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
454 &lldev->msi_info.msi_lower_address);
455 xge_os_pci_read32(lldev->pdev, NULL,
456 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
457 &lldev->msi_info.msi_higher_address);
458 xge_os_pci_read16(lldev->pdev, NULL,
459 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
460 &lldev->msi_info.msi_data);
464 * xge_msi_info_restore
465 * Restore saved MSI info
470 xge_msi_info_restore(xge_lldev_t *lldev)
473 * If interface is made down and up, traffic fails. It was observed that
474 * MSI information were getting reset on down. Restoring them.
476 xge_os_pci_write16(lldev->pdev, NULL,
477 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
478 lldev->msi_info.msi_control);
480 xge_os_pci_write32(lldev->pdev, NULL,
481 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
482 lldev->msi_info.msi_lower_address);
484 xge_os_pci_write32(lldev->pdev, NULL,
485 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
486 lldev->msi_info.msi_higher_address);
488 xge_os_pci_write16(lldev->pdev, NULL,
489 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
490 lldev->msi_info.msi_data);
495 * Initializes mutexes used in driver
497 * @lldev Per-adapter Data
500 xge_mutex_init(xge_lldev_t *lldev)
504 sprintf(lldev->mtx_name_drv, "%s_drv",
505 device_get_nameunit(lldev->device));
506 mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
509 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
510 sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
511 device_get_nameunit(lldev->device), qindex);
512 mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
519 * Destroys mutexes used in driver
521 * @lldev Per-adapter Data
524 xge_mutex_destroy(xge_lldev_t *lldev)
528 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
529 mtx_destroy(&lldev->mtx_tx[qindex]);
530 mtx_destroy(&lldev->mtx_drv);
535 * Print device and driver information
537 * @lldev Per-adapter Data
540 xge_print_info(xge_lldev_t *lldev)
542 device_t dev = lldev->device;
543 xge_hal_device_t *hldev = lldev->devh;
544 xge_hal_status_e status = XGE_HAL_OK;
546 const char *xge_pci_bus_speeds[17] = {
549 "PCIX(M1) 66MHz Bus",
550 "PCIX(M1) 100MHz Bus",
551 "PCIX(M1) 133MHz Bus",
552 "PCIX(M2) 133MHz Bus",
553 "PCIX(M2) 200MHz Bus",
554 "PCIX(M2) 266MHz Bus",
556 "PCIX(M1) 66MHz Bus (Not Supported)",
557 "PCIX(M1) 100MHz Bus (Not Supported)",
558 "PCIX(M1) 133MHz Bus (Not Supported)",
566 xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
567 device_get_nameunit(dev),
568 ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
569 hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
570 xge_os_printf("%s: Serial Number %s",
571 device_get_nameunit(dev), hldev->vpd_data.serial_num);
573 if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
574 status = xge_hal_mgmt_reg_read(hldev, 0,
575 xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
576 if(status != XGE_HAL_OK)
577 xge_trace(XGE_ERR, "Error for getting bus speed");
579 xge_os_printf("%s: Adapter is on %s bit %s",
580 device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
581 (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
584 xge_os_printf("%s: Using %s Interrupts",
585 device_get_nameunit(dev),
586 (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
590 * xge_create_dma_tags
591 * Creates DMA tags for both Tx and Rx
595 * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
598 xge_create_dma_tags(device_t dev)
600 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
601 xge_hal_status_e status = XGE_HAL_FAIL;
602 int mtu = (lldev->ifnetp)->if_mtu, maxsize;
605 status = bus_dma_tag_create(
606 bus_get_dma_tag(dev), /* Parent */
607 PAGE_SIZE, /* Alignment */
609 BUS_SPACE_MAXADDR, /* Low Address */
610 BUS_SPACE_MAXADDR, /* High Address */
611 NULL, /* Filter Function */
612 NULL, /* Filter Function Arguments */
613 MCLBYTES * XGE_MAX_SEGS, /* Maximum Size */
614 XGE_MAX_SEGS, /* Number of Segments */
615 MCLBYTES, /* Maximum Segment Size */
616 BUS_DMA_ALLOCNOW, /* Flags */
617 NULL, /* Lock Function */
618 NULL, /* Lock Function Arguments */
619 (&lldev->dma_tag_tx)); /* DMA Tag */
623 maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
624 if(maxsize <= MCLBYTES) {
628 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
629 maxsize = MJUMPAGESIZE;
631 maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
635 status = bus_dma_tag_create(
636 bus_get_dma_tag(dev), /* Parent */
637 PAGE_SIZE, /* Alignment */
639 BUS_SPACE_MAXADDR, /* Low Address */
640 BUS_SPACE_MAXADDR, /* High Address */
641 NULL, /* Filter Function */
642 NULL, /* Filter Function Arguments */
643 maxsize, /* Maximum Size */
644 1, /* Number of Segments */
645 maxsize, /* Maximum Segment Size */
646 BUS_DMA_ALLOCNOW, /* Flags */
647 NULL, /* Lock Function */
648 NULL, /* Lock Function Arguments */
649 (&lldev->dma_tag_rx)); /* DMA Tag */
653 status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
654 &lldev->extra_dma_map);
662 status = bus_dma_tag_destroy(lldev->dma_tag_rx);
664 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
666 status = bus_dma_tag_destroy(lldev->dma_tag_tx);
668 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
669 status = XGE_HAL_FAIL;
675 * xge_confirm_changes
676 * Disables and Enables interface to apply requested change
678 * @lldev Per-adapter Data
679 * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
681 * Returns 0 or Error Number
684 xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
686 if(lldev->initialized == 0) goto _exit1;
688 mtx_lock(&lldev->mtx_drv);
689 if_down(lldev->ifnetp);
690 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
692 if(option == XGE_SET_MTU)
693 (lldev->ifnetp)->if_mtu = lldev->mtu;
695 xge_buffer_mode_init(lldev, lldev->mtu);
697 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
698 if_up(lldev->ifnetp);
699 mtx_unlock(&lldev->mtx_drv);
703 /* Request was to change MTU and device not initialized */
704 if(option == XGE_SET_MTU) {
705 (lldev->ifnetp)->if_mtu = lldev->mtu;
706 xge_buffer_mode_init(lldev, lldev->mtu);
713 * xge_change_lro_status
714 * Enable/Disable LRO feature
716 * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
718 * Returns 0 or error number.
721 xge_change_lro_status(SYSCTL_HANDLER_ARGS)
723 xge_lldev_t *lldev = (xge_lldev_t *)arg1;
724 int request = lldev->enabled_lro, status = XGE_HAL_OK;
726 status = sysctl_handle_int(oidp, &request, arg2, req);
727 if((status != XGE_HAL_OK) || (!req->newptr))
730 if((request < 0) || (request > 1)) {
735 /* Return if current and requested states are same */
736 if(request == lldev->enabled_lro){
737 xge_trace(XGE_ERR, "LRO is already %s",
738 ((request) ? "enabled" : "disabled"));
742 lldev->enabled_lro = request;
743 xge_confirm_changes(lldev, XGE_CHANGE_LRO);
744 arg2 = lldev->enabled_lro;
751 * xge_add_sysctl_handlers
752 * Registers sysctl parameter value update handlers
754 * @lldev Per-adapter data
757 xge_add_sysctl_handlers(xge_lldev_t *lldev)
759 struct sysctl_ctx_list *context_list =
760 device_get_sysctl_ctx(lldev->device);
761 struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
763 SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
764 "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
765 xge_change_lro_status, "I", "Enable or disable LRO feature");
770 * Connects driver to the system if probe was success
775 xge_attach(device_t dev)
777 xge_hal_device_config_t *device_config;
778 xge_hal_device_attr_t attr;
780 xge_hal_device_t *hldev;
781 xge_pci_info_t *pci_info;
782 struct ifnet *ifnetp;
783 int rid, rid0, rid1, error;
784 int msi_count = 0, status = XGE_HAL_OK;
785 int enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
787 device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
789 XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
790 attach_out_config, ENOMEM);
793 lldev = (xge_lldev_t *) device_get_softc(dev);
795 XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
799 xge_mutex_init(lldev);
801 error = xge_driver_initialize();
802 if(error != XGE_HAL_OK) {
803 xge_resources_free(dev, xge_free_mutex);
804 XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
809 (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
811 xge_resources_free(dev, xge_free_terminate_hal_driver);
812 XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
817 /* Our private structure */
819 (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
821 xge_resources_free(dev, xge_free_hal_device);
822 XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
825 lldev->pdev = pci_info;
826 pci_info->device = dev;
829 pci_enable_busmaster(dev);
831 /* Get virtual address for BAR0 */
833 pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
835 if(pci_info->regmap0 == NULL) {
836 xge_resources_free(dev, xge_free_pci_info);
837 XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
840 attr.bar0 = (char *)pci_info->regmap0;
842 pci_info->bar0resource = (xge_bus_resource_t*)
843 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
844 if(pci_info->bar0resource == NULL) {
845 xge_resources_free(dev, xge_free_bar0);
846 XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
849 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
850 rman_get_bustag(pci_info->regmap0);
851 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
852 rman_get_bushandle(pci_info->regmap0);
853 ((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
856 /* Get virtual address for BAR1 */
858 pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
860 if(pci_info->regmap1 == NULL) {
861 xge_resources_free(dev, xge_free_bar0_resource);
862 XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
865 attr.bar1 = (char *)pci_info->regmap1;
867 pci_info->bar1resource = (xge_bus_resource_t*)
868 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
869 if(pci_info->bar1resource == NULL) {
870 xge_resources_free(dev, xge_free_bar1);
871 XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
874 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
875 rman_get_bustag(pci_info->regmap1);
876 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
877 rman_get_bushandle(pci_info->regmap1);
878 ((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
881 /* Save PCI config space */
882 xge_pci_space_save(dev);
884 attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
885 attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
886 attr.irqh = lldev->irqhandle;
887 attr.cfgh = pci_info;
888 attr.pdev = pci_info;
890 /* Initialize device configuration parameters */
891 xge_init_params(device_config, dev);
894 if(lldev->enabled_msi) {
895 /* Number of MSI messages supported by device */
896 msi_count = pci_msi_count(dev);
898 /* Device supports MSI */
900 xge_trace(XGE_ERR, "MSI count: %d", msi_count);
901 xge_trace(XGE_ERR, "Now, driver supporting 1 message");
904 error = pci_alloc_msi(dev, &msi_count);
907 xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
908 enable_msi = XGE_HAL_INTR_MODE_MSI;
913 xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
917 lldev->enabled_msi = enable_msi;
919 /* Allocate resource for irq */
920 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
921 (RF_SHAREABLE | RF_ACTIVE));
922 if(lldev->irq == NULL) {
923 xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
924 ((rid == 0) ? "line interrupt" : "MSI"));
926 error = pci_release_msi(dev);
928 xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
930 xge_trace(XGE_ERR, "Requires reboot to use MSI again");
932 xge_trace(XGE_ERR, "Trying line interrupts");
934 lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
935 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
936 (RF_SHAREABLE | RF_ACTIVE));
938 if(lldev->irq == NULL) {
939 xge_trace(XGE_ERR, "Allocating irq resource failed");
940 xge_resources_free(dev, xge_free_bar1_resource);
946 device_config->intr_mode = lldev->enabled_msi;
948 xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
949 lldev->enabled_msi, msi_count);
952 /* Initialize HAL device */
953 error = xge_hal_device_initialize(hldev, &attr, device_config);
954 if(error != XGE_HAL_OK) {
955 xge_resources_free(dev, xge_free_irq_resource);
956 XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
960 xge_hal_device_private_set(hldev, lldev);
962 error = xge_interface_setup(dev);
968 ifnetp = lldev->ifnetp;
969 ifnetp->if_mtu = device_config->mtu;
973 /* Associate interrupt handler with the device */
974 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
975 error = bus_setup_intr(dev, lldev->irq,
976 (INTR_TYPE_NET | INTR_MPSAFE),
977 #if __FreeBSD_version > 700030
980 xge_isr_msi, lldev, &lldev->irqhandle);
981 xge_msi_info_save(lldev);
984 error = bus_setup_intr(dev, lldev->irq,
985 (INTR_TYPE_NET | INTR_MPSAFE),
986 #if __FreeBSD_version > 700030
989 xge_isr_line, lldev, &lldev->irqhandle);
992 xge_resources_free(dev, xge_free_media_interface);
993 XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
997 xge_print_info(lldev);
999 xge_add_sysctl_handlers(lldev);
1001 xge_buffer_mode_init(lldev, device_config->mtu);
1004 xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1010 * xge_resources_free
1011 * Undo what-all we did during load/attach
1013 * @dev Device Handle
1014 * @error Identifies what-all to undo
1017 xge_resources_free(device_t dev, xge_lables_e error)
1020 xge_pci_info_t *pci_info;
1021 xge_hal_device_t *hldev;
1025 lldev = (xge_lldev_t *) device_get_softc(dev);
1026 pci_info = lldev->pdev;
1029 hldev = lldev->devh;
1033 /* Teardown interrupt handler - device association */
1034 bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1036 case xge_free_media_interface:
1038 ifmedia_removeall(&lldev->media);
1041 ether_ifdetach(lldev->ifnetp);
1042 if_free(lldev->ifnetp);
1044 xge_hal_device_private_set(hldev, NULL);
1045 xge_hal_device_disable(hldev);
1047 case xge_free_terminate_hal_device:
1049 xge_hal_device_terminate(hldev);
1051 case xge_free_irq_resource:
1052 /* Release IRQ resource */
1053 bus_release_resource(dev, SYS_RES_IRQ,
1054 ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1057 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1058 status = pci_release_msi(dev);
1062 "pci_release_msi returned %d", status);
1067 case xge_free_bar1_resource:
1068 /* Restore PCI configuration space */
1069 xge_pci_space_restore(dev);
1071 /* Free bar1resource */
1072 xge_os_free(NULL, pci_info->bar1resource,
1073 sizeof(xge_bus_resource_t));
1078 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1081 case xge_free_bar0_resource:
1082 /* Free bar0resource */
1083 xge_os_free(NULL, pci_info->bar0resource,
1084 sizeof(xge_bus_resource_t));
1089 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1092 case xge_free_pci_info:
1093 /* Disable Bus Master */
1094 pci_disable_busmaster(dev);
1096 /* Free pci_info_t */
1098 xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1100 case xge_free_hal_device:
1101 /* Free device configuration struct and HAL device */
1102 xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1104 case xge_free_terminate_hal_driver:
1105 /* Terminate HAL driver */
1106 hal_driver_init_count = hal_driver_init_count - 1;
1107 if(!hal_driver_init_count) {
1108 xge_hal_driver_terminate();
1111 case xge_free_mutex:
1112 xge_mutex_destroy(lldev);
1118 * Detaches driver from the Kernel subsystem
1120 * @dev Device Handle
1123 xge_detach(device_t dev)
1125 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1127 if(lldev->in_detach == 0) {
1128 lldev->in_detach = 1;
1130 xge_resources_free(dev, xge_free_all);
1138 * To shutdown device before system shutdown
1140 * @dev Device Handle
1143 xge_shutdown(device_t dev)
1145 xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1152 * xge_interface_setup
1155 * @dev Device Handle
1157 * Returns 0 on success, ENXIO/ENOMEM on failure
1160 xge_interface_setup(device_t dev)
1162 u8 mcaddr[ETHER_ADDR_LEN];
1163 xge_hal_status_e status;
1164 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1165 struct ifnet *ifnetp;
1166 xge_hal_device_t *hldev = lldev->devh;
1168 /* Get the MAC address of the device */
1169 status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1170 if(status != XGE_HAL_OK) {
1171 xge_resources_free(dev, xge_free_terminate_hal_device);
1172 XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1175 /* Get interface ifnet structure for this Ether device */
1176 ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1177 if(ifnetp == NULL) {
1178 xge_resources_free(dev, xge_free_terminate_hal_device);
1179 XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1182 /* Initialize interface ifnet structure */
1183 if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1184 ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU;
1185 ifnetp->if_baudrate = XGE_BAUDRATE;
1186 ifnetp->if_init = xge_init;
1187 ifnetp->if_softc = lldev;
1188 ifnetp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1189 ifnetp->if_ioctl = xge_ioctl;
1190 ifnetp->if_start = xge_send;
1192 /* TODO: Check and assign optimal value */
1193 ifnetp->if_snd.ifq_maxlen = ifqmaxlen;
1195 ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1197 if(lldev->enabled_tso)
1198 ifnetp->if_capabilities |= IFCAP_TSO4;
1199 if(lldev->enabled_lro)
1200 ifnetp->if_capabilities |= IFCAP_LRO;
1202 ifnetp->if_capenable = ifnetp->if_capabilities;
1204 /* Attach the interface */
1205 ether_ifattach(ifnetp, mcaddr);
1212 * xge_callback_link_up
1213 * Callback for Link-up indication from HAL
1215 * @userdata Per-adapter data
1218 xge_callback_link_up(void *userdata)
1220 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1221 struct ifnet *ifnetp = lldev->ifnetp;
1223 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1224 if_link_state_change(ifnetp, LINK_STATE_UP);
1228 * xge_callback_link_down
1229 * Callback for Link-down indication from HAL
1231 * @userdata Per-adapter data
1234 xge_callback_link_down(void *userdata)
1236 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1237 struct ifnet *ifnetp = lldev->ifnetp;
1239 ifnetp->if_flags |= IFF_DRV_OACTIVE;
1240 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1244 * xge_callback_crit_err
1245 * Callback for Critical error indication from HAL
1247 * @userdata Per-adapter data
1248 * @type Event type (Enumerated hardware error)
1249 * @serr_data Hardware status
1252 xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1254 xge_trace(XGE_ERR, "Critical Error");
1255 xge_reset(userdata);
1259 * xge_callback_event
1260 * Callback from HAL indicating that some event has been queued
1262 * @item Queued event item
1265 xge_callback_event(xge_queue_item_t *item)
1267 xge_lldev_t *lldev = NULL;
1268 xge_hal_device_t *hldev = NULL;
1269 struct ifnet *ifnetp = NULL;
1271 hldev = item->context;
1272 lldev = xge_hal_device_private(hldev);
1273 ifnetp = lldev->ifnetp;
1275 switch(item->event_type) {
1276 case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1277 if(lldev->initialized) {
1278 if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1279 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1282 xge_queue_produce_context(
1283 xge_hal_device_queue(lldev->devh),
1284 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1289 case XGE_LL_EVENT_DEVICE_RESETTING:
1290 xge_reset(item->context);
1299 * xge_ifmedia_change
1300 * Media change driver callback
1302 * @ifnetp Interface Handle
1304 * Returns 0 if media is Ether else EINVAL
1307 xge_ifmedia_change(struct ifnet *ifnetp)
1309 xge_lldev_t *lldev = ifnetp->if_softc;
1310 struct ifmedia *ifmediap = &lldev->media;
1312 return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ? EINVAL:0;
1316 * xge_ifmedia_status
1317 * Media status driver callback
1319 * @ifnetp Interface Handle
1320 * @ifmr Interface Media Settings
1323 xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1325 xge_hal_status_e status;
1327 xge_lldev_t *lldev = ifnetp->if_softc;
1328 xge_hal_device_t *hldev = lldev->devh;
1330 ifmr->ifm_status = IFM_AVALID;
1331 ifmr->ifm_active = IFM_ETHER;
1333 status = xge_hal_mgmt_reg_read(hldev, 0,
1334 xge_offsetof(xge_hal_pci_bar0_t, adapter_status), ®value);
1335 if(status != XGE_HAL_OK) {
1336 xge_trace(XGE_TRACE, "Getting adapter status failed");
1340 if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1341 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1342 ifmr->ifm_status |= IFM_ACTIVE;
1343 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1344 if_link_state_change(ifnetp, LINK_STATE_UP);
1347 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1355 * IOCTL to get statistics
1357 * @lldev Per-adapter data
1358 * @ifreqp Interface request
1361 xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1363 xge_hal_status_e status = XGE_HAL_OK;
1364 char *data = (char *)ifreqp->ifr_data;
1366 int retValue = EINVAL;
1369 case XGE_QUERY_STATS:
1370 mtx_lock(&lldev->mtx_drv);
1371 status = xge_hal_stats_hw(lldev->devh,
1372 (xge_hal_stats_hw_info_t **)&info);
1373 mtx_unlock(&lldev->mtx_drv);
1374 if(status == XGE_HAL_OK) {
1375 if(copyout(info, ifreqp->ifr_data,
1376 sizeof(xge_hal_stats_hw_info_t)) == 0)
1380 xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1385 case XGE_QUERY_PCICONF:
1386 info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1388 mtx_lock(&lldev->mtx_drv);
1389 status = xge_hal_mgmt_pci_config(lldev->devh, info,
1390 sizeof(xge_hal_pci_config_t));
1391 mtx_unlock(&lldev->mtx_drv);
1392 if(status == XGE_HAL_OK) {
1393 if(copyout(info, ifreqp->ifr_data,
1394 sizeof(xge_hal_pci_config_t)) == 0)
1399 "Getting PCI configuration failed (%d)", status);
1401 xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1405 case XGE_QUERY_DEVSTATS:
1406 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1408 mtx_lock(&lldev->mtx_drv);
1409 status =xge_hal_mgmt_device_stats(lldev->devh, info,
1410 sizeof(xge_hal_stats_device_info_t));
1411 mtx_unlock(&lldev->mtx_drv);
1412 if(status == XGE_HAL_OK) {
1413 if(copyout(info, ifreqp->ifr_data,
1414 sizeof(xge_hal_stats_device_info_t)) == 0)
1418 xge_trace(XGE_ERR, "Getting device info failed (%d)",
1421 xge_os_free(NULL, info,
1422 sizeof(xge_hal_stats_device_info_t));
1426 case XGE_QUERY_SWSTATS:
1427 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1429 mtx_lock(&lldev->mtx_drv);
1430 status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1431 sizeof(xge_hal_stats_sw_err_t));
1432 mtx_unlock(&lldev->mtx_drv);
1433 if(status == XGE_HAL_OK) {
1434 if(copyout(info, ifreqp->ifr_data,
1435 sizeof(xge_hal_stats_sw_err_t)) == 0)
1440 "Getting tcode statistics failed (%d)", status);
1442 xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1446 case XGE_QUERY_DRIVERSTATS:
1447 if(copyout(&lldev->driver_stats, ifreqp->ifr_data,
1448 sizeof(xge_driver_stats_t)) == 0) {
1453 "Copyout of driver statistics failed (%d)", status);
1457 case XGE_READ_VERSION:
1458 info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1459 if(version != NULL) {
1460 strcpy(info, XGE_DRIVER_VERSION);
1461 if(copyout(info, ifreqp->ifr_data, XGE_BUFFER_SIZE) == 0)
1463 xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1467 case XGE_QUERY_DEVCONF:
1468 info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1470 mtx_lock(&lldev->mtx_drv);
1471 status = xge_hal_mgmt_device_config(lldev->devh, info,
1472 sizeof(xge_hal_device_config_t));
1473 mtx_unlock(&lldev->mtx_drv);
1474 if(status == XGE_HAL_OK) {
1475 if(copyout(info, ifreqp->ifr_data,
1476 sizeof(xge_hal_device_config_t)) == 0)
1480 xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1483 xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1487 case XGE_QUERY_BUFFER_MODE:
1488 if(copyout(&lldev->buffer_mode, ifreqp->ifr_data,
1493 case XGE_SET_BUFFER_MODE_1:
1494 case XGE_SET_BUFFER_MODE_2:
1495 case XGE_SET_BUFFER_MODE_5:
1496 *data = (*data == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1497 if(copyout(data, ifreqp->ifr_data, sizeof(data)) == 0)
1501 xge_trace(XGE_TRACE, "Nothing is matching");
1509 * xge_ioctl_registers
1510 * IOCTL to get registers
1512 * @lldev Per-adapter data
1513 * @ifreqp Interface request
1516 xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1518 xge_register_t *data = (xge_register_t *)ifreqp->ifr_data;
1519 xge_hal_status_e status = XGE_HAL_OK;
1520 int retValue = EINVAL, offset = 0, index = 0;
1523 /* Reading a register */
1524 if(strcmp(data->option, "-r") == 0) {
1525 data->value = 0x0000;
1526 mtx_lock(&lldev->mtx_drv);
1527 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1529 mtx_unlock(&lldev->mtx_drv);
1530 if(status == XGE_HAL_OK) {
1531 if(copyout(data, ifreqp->ifr_data, sizeof(xge_register_t)) == 0)
1535 /* Writing to a register */
1536 else if(strcmp(data->option, "-w") == 0) {
1537 mtx_lock(&lldev->mtx_drv);
1538 status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1540 if(status == XGE_HAL_OK) {
1542 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1544 if(status != XGE_HAL_OK) {
1545 xge_trace(XGE_ERR, "Reading back updated register failed");
1548 if(val64 != data->value) {
1550 "Read and written register values mismatched");
1556 xge_trace(XGE_ERR, "Getting register value failed");
1558 mtx_unlock(&lldev->mtx_drv);
1561 mtx_lock(&lldev->mtx_drv);
1562 for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1563 index++, offset += 0x0008) {
1565 status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1566 if(status != XGE_HAL_OK) {
1567 xge_trace(XGE_ERR, "Getting register value failed");
1570 *((u64 *)((u64 *)data + index)) = val64;
1573 mtx_unlock(&lldev->mtx_drv);
1576 if(copyout(data, ifreqp->ifr_data,
1577 sizeof(xge_hal_pci_bar0_t)) != 0) {
1578 xge_trace(XGE_ERR, "Copyout of register values failed");
1583 xge_trace(XGE_ERR, "Getting register values failed");
1591 * Callback to control the device - Interface configuration
1593 * @ifnetp Interface Handle
1594 * @command Device control command
1595 * @data Parameters associated with command (if any)
1598 xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1600 struct ifreq *ifreqp = (struct ifreq *)data;
1601 xge_lldev_t *lldev = ifnetp->if_softc;
1602 struct ifmedia *ifmediap = &lldev->media;
1603 int retValue = 0, mask = 0;
1605 if(lldev->in_detach) {
1610 /* Set/Get ifnet address */
1613 ether_ioctl(ifnetp, command, data);
1618 retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1621 /* Set ifnet flags */
1623 if(ifnetp->if_flags & IFF_UP) {
1624 /* Link status is UP */
1625 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1628 xge_disable_promisc(lldev);
1629 xge_enable_promisc(lldev);
1632 /* Link status is DOWN */
1633 /* If device is in running, make it down */
1634 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1640 /* Add/delete multicast address */
1643 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1644 xge_setmulti(lldev);
1648 /* Set/Get net media */
1651 retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1654 /* Set capabilities */
1656 mtx_lock(&lldev->mtx_drv);
1657 mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1658 if(mask & IFCAP_TXCSUM) {
1659 if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1660 ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1661 ifnetp->if_hwassist &=
1662 ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1665 ifnetp->if_capenable |= IFCAP_TXCSUM;
1666 ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1669 if(mask & IFCAP_TSO4) {
1670 if(ifnetp->if_capenable & IFCAP_TSO4) {
1671 ifnetp->if_capenable &= ~IFCAP_TSO4;
1672 ifnetp->if_hwassist &= ~CSUM_TSO;
1674 xge_os_printf("%s: TSO Disabled",
1675 device_get_nameunit(lldev->device));
1677 else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1678 ifnetp->if_capenable |= IFCAP_TSO4;
1679 ifnetp->if_hwassist |= CSUM_TSO;
1681 xge_os_printf("%s: TSO Enabled",
1682 device_get_nameunit(lldev->device));
1686 mtx_unlock(&lldev->mtx_drv);
1689 /* Custom IOCTL 0 */
1690 case SIOCGPRIVATE_0:
1691 retValue = xge_ioctl_stats(lldev, ifreqp);
1694 /* Custom IOCTL 1 */
1695 case SIOCGPRIVATE_1:
1696 retValue = xge_ioctl_registers(lldev, ifreqp);
1708 * Initialize the interface
1710 * @plldev Per-adapter Data
1713 xge_init(void *plldev)
1715 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1717 mtx_lock(&lldev->mtx_drv);
1718 xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1719 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1720 mtx_unlock(&lldev->mtx_drv);
1725 * Initialize the interface (called by holding lock)
1727 * @pdevin Per-adapter Data
1730 xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1732 struct ifnet *ifnetp = lldev->ifnetp;
1733 xge_hal_device_t *hldev = lldev->devh;
1734 struct ifaddr *ifaddrp;
1735 unsigned char *macaddr;
1736 struct sockaddr_dl *sockaddrp;
1737 int status = XGE_HAL_OK;
1739 mtx_assert((&lldev->mtx_drv), MA_OWNED);
1741 /* If device is in running state, initializing is not required */
1742 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1745 /* Initializing timer */
1746 callout_init(&lldev->timer, CALLOUT_MPSAFE);
1748 xge_trace(XGE_TRACE, "Set MTU size");
1749 status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1750 if(status != XGE_HAL_OK) {
1751 xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1755 /* Enable HAL device */
1756 xge_hal_device_enable(hldev);
1758 /* Get MAC address and update in HAL */
1759 ifaddrp = ifnetp->if_addr;
1760 sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1761 sockaddrp->sdl_type = IFT_ETHER;
1762 sockaddrp->sdl_alen = ifnetp->if_addrlen;
1763 macaddr = LLADDR(sockaddrp);
1764 xge_trace(XGE_TRACE,
1765 "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1766 *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1767 *(macaddr + 4), *(macaddr + 5));
1768 status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1769 if(status != XGE_HAL_OK)
1770 xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1772 /* Opening channels */
1773 mtx_unlock(&lldev->mtx_drv);
1774 status = xge_channel_open(lldev, option);
1775 mtx_lock(&lldev->mtx_drv);
1776 if(status != XGE_HAL_OK)
1779 /* Set appropriate flags */
1780 ifnetp->if_drv_flags |= IFF_DRV_RUNNING;
1781 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1783 /* Checksum capability */
1784 ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1785 (CSUM_TCP | CSUM_UDP) : 0;
1787 if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1788 ifnetp->if_hwassist |= CSUM_TSO;
1790 /* Enable interrupts */
1791 xge_hal_device_intr_enable(hldev);
1793 callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1795 /* Disable promiscuous mode */
1796 xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1797 xge_enable_promisc(lldev);
1799 /* Device is initialized */
1800 lldev->initialized = 1;
1801 xge_os_mdelay(1000);
1809 * Timer timeout function to handle link status
1811 * @devp Per-adapter Data
1814 xge_timer(void *devp)
1816 xge_lldev_t *lldev = (xge_lldev_t *)devp;
1817 xge_hal_device_t *hldev = lldev->devh;
1819 /* Poll for changes */
1820 xge_hal_device_poll(hldev);
1823 callout_reset(&lldev->timer, hz, xge_timer, lldev);
1830 * De-activate the interface
1832 * @lldev Per-adater Data
1835 xge_stop(xge_lldev_t *lldev)
1837 mtx_lock(&lldev->mtx_drv);
1838 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1839 mtx_unlock(&lldev->mtx_drv);
1844 * ISR filter function - to filter interrupts from other devices (shared)
1846 * @handle Per-adapter Data
1849 * FILTER_STRAY if interrupt is from other device
1850 * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1853 xge_isr_filter(void *handle)
1855 xge_lldev_t *lldev = (xge_lldev_t *)handle;
1856 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1857 u16 retValue = FILTER_STRAY;
1860 XGE_DRV_STATS(isr_filter);
1862 val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1863 &bar0->general_int_status);
1864 retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1871 * Interrupt service routine for Line interrupts
1873 * @plldev Per-adapter Data
1876 xge_isr_line(void *plldev)
1878 xge_hal_status_e status;
1879 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1880 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1881 struct ifnet *ifnetp = lldev->ifnetp;
1883 XGE_DRV_STATS(isr_line);
1885 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1886 status = xge_hal_device_handle_irq(hldev);
1887 if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1894 * ISR for Message signaled interrupts
1897 xge_isr_msi(void *plldev)
1899 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1900 XGE_DRV_STATS(isr_msi);
1901 xge_hal_device_continue_irq(lldev->devh);
1906 * Initiate and open all Rx channels
1909 * @lldev Per-adapter Data
1910 * @rflag Channel open/close/reopen flag
1912 * Returns 0 or Error Number
1915 xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1917 u64 adapter_status = 0x0;
1918 xge_hal_status_e status = XGE_HAL_FAIL;
1920 xge_hal_channel_attr_t attr = {
1923 .callback = xge_rx_compl,
1924 .per_dtr_space = sizeof(xge_rx_priv_t),
1926 .type = XGE_HAL_CHANNEL_TYPE_RING,
1928 .dtr_init = xge_rx_initial_replenish,
1929 .dtr_term = xge_rx_term
1932 /* If device is not ready, return */
1933 status = xge_hal_device_status(lldev->devh, &adapter_status);
1934 if(status != XGE_HAL_OK) {
1935 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1936 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1939 status = xge_hal_channel_open(lldev->devh, &attr,
1940 &lldev->ring_channel[qid], rflag);
1949 * Initialize and open all Tx channels
1951 * @lldev Per-adapter Data
1952 * @tflag Channel open/close/reopen flag
1954 * Returns 0 or Error Number
1957 xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1959 xge_hal_status_e status = XGE_HAL_FAIL;
1960 u64 adapter_status = 0x0;
1963 xge_hal_channel_attr_t attr = {
1965 .callback = xge_tx_compl,
1966 .per_dtr_space = sizeof(xge_tx_priv_t),
1968 .type = XGE_HAL_CHANNEL_TYPE_FIFO,
1970 .dtr_init = xge_tx_initial_replenish,
1971 .dtr_term = xge_tx_term
1974 /* If device is not ready, return */
1975 status = xge_hal_device_status(lldev->devh, &adapter_status);
1976 if(status != XGE_HAL_OK) {
1977 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1978 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1981 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1982 attr.post_qid = qindex,
1983 status = xge_hal_channel_open(lldev->devh, &attr,
1984 &lldev->fifo_channel[qindex], tflag);
1985 if(status != XGE_HAL_OK) {
1986 for(index = 0; index < qindex; index++)
1987 xge_hal_channel_close(lldev->fifo_channel[index], tflag);
1999 * @lldev Per-adapter Data
2002 xge_enable_msi(xge_lldev_t *lldev)
2004 xge_list_t *item = NULL;
2005 xge_hal_device_t *hldev = lldev->devh;
2006 xge_hal_channel_t *channel = NULL;
2007 u16 offset = 0, val16 = 0;
2009 xge_os_pci_read16(lldev->pdev, NULL,
2010 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2012 /* Update msi_data */
2013 offset = (val16 & 0x80) ? 0x4c : 0x48;
2014 xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2019 xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2021 /* Update msi_control */
2022 xge_os_pci_read16(lldev->pdev, NULL,
2023 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2025 xge_os_pci_write16(lldev->pdev, NULL,
2026 xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2028 /* Set TxMAT and RxMAT registers with MSI */
2029 xge_list_for_each(item, &hldev->free_channels) {
2030 channel = xge_container_of(item, xge_hal_channel_t, item);
2031 xge_hal_channel_msi_set(channel, 1, (u32)val16);
2037 * Open both Tx and Rx channels
2039 * @lldev Per-adapter Data
2040 * @option Channel reopen option
2043 xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2045 xge_lro_entry_t *lro_session = NULL;
2046 xge_hal_status_e status = XGE_HAL_OK;
2047 int index = 0, index2 = 0;
2049 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2050 xge_msi_info_restore(lldev);
2051 xge_enable_msi(lldev);
2055 status = xge_create_dma_tags(lldev->device);
2056 if(status != XGE_HAL_OK)
2057 XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2059 /* Open ring (Rx) channel */
2060 for(index = 0; index < XGE_RING_COUNT; index++) {
2061 status = xge_rx_open(index, lldev, option);
2062 if(status != XGE_HAL_OK) {
2064 * DMA mapping fails in the unpatched Kernel which can't
2065 * allocate contiguous memory for Jumbo frames.
2066 * Try using 5 buffer mode.
2068 if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2069 (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2071 /* Close so far opened channels */
2072 for(index2 = 0; index2 < index; index2++) {
2073 xge_hal_channel_close(lldev->ring_channel[index2],
2077 /* Destroy DMA tags intended to use for 1 buffer mode */
2078 if(bus_dmamap_destroy(lldev->dma_tag_rx,
2079 lldev->extra_dma_map)) {
2080 xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2082 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2083 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2084 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2085 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2087 /* Switch to 5 buffer mode */
2088 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2089 xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2095 XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2101 if(lldev->enabled_lro) {
2102 SLIST_INIT(&lldev->lro_free);
2103 SLIST_INIT(&lldev->lro_active);
2104 lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2106 for(index = 0; index < lldev->lro_num; index++) {
2107 lro_session = (xge_lro_entry_t *)
2108 xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2109 if(lro_session == NULL) {
2110 lldev->lro_num = index;
2113 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2117 /* Open FIFO (Tx) channel */
2118 status = xge_tx_open(lldev, option);
2119 if(status != XGE_HAL_OK)
2120 XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2126 * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2127 * Initialization of LRO failed (index is XGE_RING_COUNT)
2128 * Opening Tx channel failed (index is XGE_RING_COUNT)
2130 for(index2 = 0; index2 < index; index2++)
2131 xge_hal_channel_close(lldev->ring_channel[index2], option);
2139 * Close both Tx and Rx channels
2141 * @lldev Per-adapter Data
2142 * @option Channel reopen option
2146 xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2152 /* Close FIFO (Tx) channel */
2153 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2154 xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2156 /* Close Ring (Rx) channels */
2157 for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2158 xge_hal_channel_close(lldev->ring_channel[qindex], option);
2160 if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2161 xge_trace(XGE_ERR, "Rx extra map destroy failed");
2162 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2163 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2164 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2165 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2172 * @arg Parameter passed from dmamap
2174 * @nseg Number of segments
2178 dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2181 *(bus_addr_t *) arg = segs->ds_addr;
2189 * @lldev Per-adapter Data
2192 xge_reset(xge_lldev_t *lldev)
2194 xge_trace(XGE_TRACE, "Reseting the chip");
2196 /* If the device is not initialized, return */
2197 if(lldev->initialized) {
2198 mtx_lock(&lldev->mtx_drv);
2199 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2200 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2201 mtx_unlock(&lldev->mtx_drv);
2209 * Set an address as a multicast address
2211 * @lldev Per-adapter Data
2214 xge_setmulti(xge_lldev_t *lldev)
2216 struct ifmultiaddr *ifma;
2218 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2219 struct ifnet *ifnetp = lldev->ifnetp;
2222 int table_size = 47;
2223 xge_hal_status_e status = XGE_HAL_OK;
2224 u8 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2226 if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2227 status = xge_hal_device_mcast_enable(hldev);
2228 lldev->all_multicast = 1;
2230 else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2231 status = xge_hal_device_mcast_disable(hldev);
2232 lldev->all_multicast = 0;
2235 if(status != XGE_HAL_OK) {
2236 xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2240 /* Updating address list */
2241 if_maddr_rlock(ifnetp);
2243 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2244 if(ifma->ifma_addr->sa_family != AF_LINK) {
2247 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2250 if_maddr_runlock(ifnetp);
2252 if((!lldev->all_multicast) && (index)) {
2253 lldev->macaddr_count = (index + 1);
2254 if(lldev->macaddr_count > table_size) {
2258 /* Clear old addresses */
2259 for(index = 0; index < 48; index++) {
2260 xge_hal_device_macaddr_set(hldev, (offset + index),
2265 /* Add new addresses */
2266 if_maddr_rlock(ifnetp);
2268 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2269 if(ifma->ifma_addr->sa_family != AF_LINK) {
2272 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2273 xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2276 if_maddr_runlock(ifnetp);
2283 * xge_enable_promisc
2284 * Enable Promiscuous Mode
2286 * @lldev Per-adapter Data
2289 xge_enable_promisc(xge_lldev_t *lldev)
2291 struct ifnet *ifnetp = lldev->ifnetp;
2292 xge_hal_device_t *hldev = lldev->devh;
2293 xge_hal_pci_bar0_t *bar0 = NULL;
2296 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2298 if(ifnetp->if_flags & IFF_PROMISC) {
2299 xge_hal_device_promisc_enable(lldev->devh);
2302 * When operating in promiscuous mode, don't strip the VLAN tag
2304 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2306 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2307 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2308 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2311 xge_trace(XGE_TRACE, "Promiscuous mode ON");
2316 * xge_disable_promisc
2317 * Disable Promiscuous Mode
2319 * @lldev Per-adapter Data
2322 xge_disable_promisc(xge_lldev_t *lldev)
2324 xge_hal_device_t *hldev = lldev->devh;
2325 xge_hal_pci_bar0_t *bar0 = NULL;
2328 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2330 xge_hal_device_promisc_disable(lldev->devh);
2333 * Strip VLAN tag when operating in non-promiscuous mode
2335 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2337 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2338 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2339 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2342 xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2347 * Change interface MTU to a requested valid size
2349 * @lldev Per-adapter Data
2350 * @NewMtu Requested MTU
2352 * Returns 0 or Error Number
2355 xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2357 int status = XGE_HAL_OK;
2359 /* Check requested MTU size for boundary */
2360 if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2361 XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2364 lldev->mtu = new_mtu;
2365 xge_confirm_changes(lldev, XGE_SET_MTU);
2374 * Common code for both stop and part of reset. Disables device, interrupts and
2377 * @dev Device Handle
2378 * @option Channel normal/reset option
2381 xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2383 xge_hal_device_t *hldev = lldev->devh;
2384 struct ifnet *ifnetp = lldev->ifnetp;
2387 mtx_assert((&lldev->mtx_drv), MA_OWNED);
2389 /* If device is not in "Running" state, return */
2390 if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2393 /* Set appropriate flags */
2394 ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2397 callout_stop(&lldev->timer);
2399 /* Disable interrupts */
2400 xge_hal_device_intr_disable(hldev);
2402 mtx_unlock(&lldev->mtx_drv);
2403 xge_queue_flush(xge_hal_device_queue(lldev->devh));
2404 mtx_lock(&lldev->mtx_drv);
2406 /* Disable HAL device */
2407 if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2408 xge_trace(XGE_ERR, "Disabling HAL device failed");
2409 xge_hal_device_status(hldev, &val64);
2410 xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2413 /* Close Tx and Rx channels */
2414 xge_channel_close(lldev, option);
2416 /* Reset HAL device */
2417 xge_hal_device_reset(hldev);
2419 xge_os_mdelay(1000);
2420 lldev->initialized = 0;
2422 if_link_state_change(ifnetp, LINK_STATE_DOWN);
2429 * xge_set_mbuf_cflags
2430 * set checksum flag for the mbuf
2435 xge_set_mbuf_cflags(mbuf_t pkt)
2437 pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2438 pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2439 pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2440 pkt->m_pkthdr.csum_data = htons(0xffff);
2444 * xge_lro_flush_sessions
2445 * Flush LRO session and send accumulated LRO packet to upper layer
2447 * @lldev Per-adapter Data
2450 xge_lro_flush_sessions(xge_lldev_t *lldev)
2452 xge_lro_entry_t *lro_session = NULL;
2454 while(!SLIST_EMPTY(&lldev->lro_active)) {
2455 lro_session = SLIST_FIRST(&lldev->lro_active);
2456 SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2457 xge_lro_flush(lldev, lro_session);
2463 * Flush LRO session. Send accumulated LRO packet to upper layer
2465 * @lldev Per-adapter Data
2466 * @lro LRO session to be flushed
2469 xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2471 struct ip *header_ip;
2472 struct tcphdr *header_tcp;
2475 if(lro_session->append_cnt) {
2476 header_ip = lro_session->lro_header_ip;
2477 header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2478 lro_session->m_head->m_pkthdr.len = lro_session->len;
2479 header_tcp = (struct tcphdr *)(header_ip + 1);
2480 header_tcp->th_ack = lro_session->ack_seq;
2481 header_tcp->th_win = lro_session->window;
2482 if(lro_session->timestamp) {
2483 ptr = (u32 *)(header_tcp + 1);
2484 ptr[1] = htonl(lro_session->tsval);
2485 ptr[2] = lro_session->tsecr;
2489 (*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2490 lro_session->m_head = NULL;
2491 lro_session->timestamp = 0;
2492 lro_session->append_cnt = 0;
2493 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2497 * xge_lro_accumulate
2498 * Accumulate packets to form a large LRO packet based on various conditions
2500 * @lldev Per-adapter Data
2501 * @m_head Current Packet
2503 * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2506 xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2508 struct ether_header *header_ethernet;
2509 struct ip *header_ip;
2510 struct tcphdr *header_tcp;
2512 struct mbuf *buffer_next, *buffer_tail;
2513 xge_lro_entry_t *lro_session;
2514 xge_hal_status_e status = XGE_HAL_FAIL;
2515 int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2518 /* Get Ethernet header */
2519 header_ethernet = mtod(m_head, struct ether_header *);
2521 /* Return if it is not IP packet */
2522 if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2526 header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2527 (struct ip *)(header_ethernet + 1) :
2528 mtod(m_head->m_next, struct ip *);
2530 /* Return if it is not TCP packet */
2531 if(header_ip->ip_p != IPPROTO_TCP)
2534 /* Return if packet has options */
2535 if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2538 /* Return if packet is fragmented */
2539 if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2542 /* Get TCP header */
2543 header_tcp = (struct tcphdr *)(header_ip + 1);
2545 /* Return if not ACK or PUSH */
2546 if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2549 /* Only timestamp option is handled */
2550 tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2551 tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2552 ptr = (u32 *)(header_tcp + 1);
2553 if(tcp_options != 0) {
2554 if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2555 (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2556 TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2561 /* Total length of packet (IP) */
2562 ip_len = ntohs(header_ip->ip_len);
2565 tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2567 /* If the frame is padded, trim it */
2568 tot_len = m_head->m_pkthdr.len;
2569 trim = tot_len - (ip_len + ETHER_HDR_LEN);
2573 m_adj(m_head, -trim);
2574 tot_len = m_head->m_pkthdr.len;
2577 buffer_next = m_head;
2579 while(buffer_next != NULL) {
2580 buffer_tail = buffer_next;
2581 buffer_next = buffer_tail->m_next;
2584 /* Total size of only headers */
2585 hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2587 /* Get sequence number */
2588 seq = ntohl(header_tcp->th_seq);
2590 SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2591 if(lro_session->source_port == header_tcp->th_sport &&
2592 lro_session->dest_port == header_tcp->th_dport &&
2593 lro_session->source_ip == header_ip->ip_src.s_addr &&
2594 lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2596 /* Unmatched sequence number, flush LRO session */
2597 if(__predict_false(seq != lro_session->next_seq)) {
2598 SLIST_REMOVE(&lldev->lro_active, lro_session,
2599 xge_lro_entry_t, next);
2600 xge_lro_flush(lldev, lro_session);
2604 /* Handle timestamp option */
2606 u32 tsval = ntohl(*(ptr + 1));
2607 if(__predict_false(lro_session->tsval > tsval ||
2611 lro_session->tsval = tsval;
2612 lro_session->tsecr = *(ptr + 2);
2615 lro_session->next_seq += tcp_data_len;
2616 lro_session->ack_seq = header_tcp->th_ack;
2617 lro_session->window = header_tcp->th_win;
2619 /* If TCP data/payload is of 0 size, free mbuf */
2620 if(tcp_data_len == 0) {
2622 status = XGE_HAL_OK;
2626 lro_session->append_cnt++;
2627 lro_session->len += tcp_data_len;
2629 /* Adjust mbuf so that m_data points to payload than headers */
2630 m_adj(m_head, hlen);
2632 /* Append this packet to LRO accumulated packet */
2633 lro_session->m_tail->m_next = m_head;
2634 lro_session->m_tail = buffer_tail;
2636 /* Flush if LRO packet is exceeding maximum size */
2637 if(lro_session->len >
2638 (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2639 SLIST_REMOVE(&lldev->lro_active, lro_session,
2640 xge_lro_entry_t, next);
2641 xge_lro_flush(lldev, lro_session);
2643 status = XGE_HAL_OK;
2648 if(SLIST_EMPTY(&lldev->lro_free))
2651 /* Start a new LRO session */
2652 lro_session = SLIST_FIRST(&lldev->lro_free);
2653 SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2654 SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2655 lro_session->source_port = header_tcp->th_sport;
2656 lro_session->dest_port = header_tcp->th_dport;
2657 lro_session->source_ip = header_ip->ip_src.s_addr;
2658 lro_session->dest_ip = header_ip->ip_dst.s_addr;
2659 lro_session->next_seq = seq + tcp_data_len;
2660 lro_session->mss = tcp_data_len;
2661 lro_session->ack_seq = header_tcp->th_ack;
2662 lro_session->window = header_tcp->th_win;
2664 lro_session->lro_header_ip = header_ip;
2666 /* Handle timestamp option */
2668 lro_session->timestamp = 1;
2669 lro_session->tsval = ntohl(*(ptr + 1));
2670 lro_session->tsecr = *(ptr + 2);
2673 lro_session->len = tot_len;
2674 lro_session->m_head = m_head;
2675 lro_session->m_tail = buffer_tail;
2676 status = XGE_HAL_OK;
2683 * xge_accumulate_large_rx
2684 * Accumulate packets to form a large LRO packet based on various conditions
2686 * @lldev Per-adapter Data
2687 * @pkt Current packet
2688 * @pkt_length Packet Length
2689 * @rxd_priv Rx Descriptor Private Data
2692 xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2693 xge_rx_priv_t *rxd_priv)
2695 if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2696 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2697 BUS_DMASYNC_POSTREAD);
2698 (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2704 * If the interrupt is due to received frame (Rx completion), send it up
2706 * @channelh Ring Channel Handle
2707 * @dtr Current Descriptor
2708 * @t_code Transfer Code indicating success or error
2709 * @userdata Per-adapter Data
2711 * Returns XGE_HAL_OK or HAL error enums
2714 xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2717 struct ifnet *ifnetp;
2718 xge_rx_priv_t *rxd_priv = NULL;
2719 mbuf_t mbuf_up = NULL;
2720 xge_hal_status_e status = XGE_HAL_OK;
2721 xge_hal_dtr_info_t ext_info;
2725 /*get the user data portion*/
2726 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2728 XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2731 XGE_DRV_STATS(rx_completions);
2733 /* get the interface pointer */
2734 ifnetp = lldev->ifnetp;
2737 XGE_DRV_STATS(rx_desc_compl);
2739 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2740 status = XGE_HAL_FAIL;
2745 xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2746 XGE_DRV_STATS(rx_tcode);
2747 xge_hal_device_handle_tcode(channelh, dtr, t_code);
2748 xge_hal_ring_dtr_post(channelh,dtr);
2752 /* Get the private data for this descriptor*/
2753 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2756 XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2761 * Prepare one buffer to send it to upper layer -- since the upper
2762 * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2763 * prepare a new buffer, do mapping, use it in the current
2764 * descriptor and post descriptor back to ring channel
2766 mbuf_up = rxd_priv->bufferArray[0];
2768 /* Gets details of mbuf i.e., packet length */
2769 xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2772 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2773 xge_get_buf(dtr, rxd_priv, lldev, 0) :
2774 xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2776 if(status != XGE_HAL_OK) {
2777 xge_trace(XGE_ERR, "No memory");
2778 XGE_DRV_STATS(rx_no_buf);
2781 * Unable to allocate buffer. Instead of discarding, post
2782 * descriptor back to channel for future processing of same
2785 xge_hal_ring_dtr_post(channelh, dtr);
2789 /* Get the extended information */
2790 xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2793 * As we have allocated a new mbuf for this descriptor, post this
2794 * descriptor with new mbuf back to ring channel
2796 vlan_tag = ext_info.vlan;
2797 xge_hal_ring_dtr_post(channelh, dtr);
2798 if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2799 (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2800 (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2801 (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2803 /* set Checksum Flag */
2804 xge_set_mbuf_cflags(mbuf_up);
2806 if(lldev->enabled_lro) {
2807 xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2811 /* Post-Read sync for buffers*/
2812 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2813 bus_dmamap_sync(lldev->dma_tag_rx,
2814 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2816 (*ifnetp->if_input)(ifnetp, mbuf_up);
2821 * Packet with erroneous checksum , let the upper layer deal
2825 /* Post-Read sync for buffers*/
2826 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2827 bus_dmamap_sync(lldev->dma_tag_rx,
2828 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2832 mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2833 mbuf_up->m_flags |= M_VLANTAG;
2836 if(lldev->enabled_lro)
2837 xge_lro_flush_sessions(lldev);
2839 (*ifnetp->if_input)(ifnetp, mbuf_up);
2841 } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2844 if(lldev->enabled_lro)
2845 xge_lro_flush_sessions(lldev);
2855 * @mbuf_up Packet to send up
2856 * @channelh Ring Channel Handle
2858 * @lldev Per-adapter Data
2859 * @rxd_priv Rx Descriptor Private Data
2861 * Returns XGE_HAL_OK or HAL error enums
2864 xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2865 xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2868 int pkt_length[5]={0,0}, pkt_len=0;
2869 dma_addr_t dma_data[5];
2875 if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2876 xge_os_memzero(pkt_length, sizeof(pkt_length));
2879 * Retrieve data of interest from the completed descriptor -- This
2880 * returns the packet length
2882 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2883 xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2886 xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2889 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2890 m->m_len = pkt_length[index];
2892 if(index < (lldev->rxd_mbuf_cnt-1)) {
2893 m->m_next = rxd_priv->bufferArray[index + 1];
2899 pkt_len+=pkt_length[index];
2903 * Since 2 buffer mode is an exceptional case where data is in 3rd
2904 * buffer but not in 2nd buffer
2906 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2907 m->m_len = pkt_length[2];
2908 pkt_len+=pkt_length[2];
2912 * Update length of newly created buffer to be sent up with packet
2915 mbuf_up->m_pkthdr.len = pkt_len;
2919 * Retrieve data of interest from the completed descriptor -- This
2920 * returns the packet length
2922 xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2925 * Update length of newly created buffer to be sent up with packet
2928 mbuf_up->m_len = mbuf_up->m_pkthdr.len = pkt_length[0];
2936 * Flush Tx descriptors
2938 * @channelh Channel handle
2941 xge_flush_txds(xge_hal_channel_h channelh)
2943 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2944 xge_hal_dtr_h tx_dtr;
2945 xge_tx_priv_t *tx_priv;
2948 while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2950 XGE_DRV_STATS(tx_desc_compl);
2952 xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2953 XGE_DRV_STATS(tx_tcode);
2954 xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2957 tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2958 bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2959 m_freem(tx_priv->buffer);
2960 tx_priv->buffer = NULL;
2961 xge_hal_fifo_dtr_free(channelh, tx_dtr);
2969 * @ifnetp Interface Handle
2972 xge_send(struct ifnet *ifnetp)
2975 xge_lldev_t *lldev = ifnetp->if_softc;
2977 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2978 if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2979 XGE_DRV_STATS(tx_lock_fail);
2982 xge_send_locked(ifnetp, qindex);
2983 mtx_unlock(&lldev->mtx_tx[qindex]);
2988 xge_send_locked(struct ifnet *ifnetp, int qindex)
2991 static bus_dma_segment_t segs[XGE_MAX_SEGS];
2992 xge_hal_status_e status;
2993 unsigned int max_fragments;
2994 xge_lldev_t *lldev = ifnetp->if_softc;
2995 xge_hal_channel_h channelh = lldev->fifo_channel[qindex];
2996 mbuf_t m_head = NULL;
2997 mbuf_t m_buf = NULL;
2998 xge_tx_priv_t *ll_tx_priv = NULL;
2999 register unsigned int count = 0;
3000 unsigned int nsegs = 0;
3003 max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3005 /* If device is not initialized, return */
3006 if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3009 XGE_DRV_STATS(tx_calls);
3012 * This loop will be executed for each packet in the kernel maintained
3013 * queue -- each packet can be with fragments as an mbuf chain
3016 IF_DEQUEUE(&ifnetp->if_snd, m_head);
3017 if (m_head == NULL) {
3018 ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3022 for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3023 if(m_buf->m_len) count += 1;
3026 if(count >= max_fragments) {
3027 m_buf = m_defrag(m_head, M_DONTWAIT);
3028 if(m_buf != NULL) m_head = m_buf;
3029 XGE_DRV_STATS(tx_defrag);
3032 /* Reserve descriptors */
3033 status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3034 if(status != XGE_HAL_OK) {
3035 XGE_DRV_STATS(tx_no_txd);
3036 xge_flush_txds(channelh);
3041 (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3042 xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3044 /* Update Tx private structure for this descriptor */
3045 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3046 ll_tx_priv->buffer = m_head;
3049 * Do mapping -- Required DMA tag has been created in xge_init
3050 * function and DMA maps have already been created in the
3051 * xgell_tx_replenish function.
3052 * Returns number of segments through nsegs
3054 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3055 ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3056 xge_trace(XGE_TRACE, "DMA map load failed");
3057 XGE_DRV_STATS(tx_map_fail);
3061 if(lldev->driver_stats.tx_max_frags < nsegs)
3062 lldev->driver_stats.tx_max_frags = nsegs;
3064 /* Set descriptor buffer for header and each fragment/segment */
3067 xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3068 (dma_addr_t)htole64(segs[count].ds_addr),
3069 segs[count].ds_len);
3071 } while(count < nsegs);
3073 /* Pre-write Sync of mapping */
3074 bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3075 BUS_DMASYNC_PREWRITE);
3077 if((lldev->enabled_tso) &&
3078 (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3079 XGE_DRV_STATS(tx_tso);
3080 xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3084 if(ifnetp->if_hwassist > 0) {
3085 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3086 | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3089 /* Post descriptor to FIFO channel */
3090 xge_hal_fifo_dtr_post(channelh, dtr);
3091 XGE_DRV_STATS(tx_posted);
3093 /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3094 * listener so that we can use tools like tcpdump */
3095 ETHER_BPF_MTAP(ifnetp, m_head);
3098 /* Prepend the packet back to queue */
3099 IF_PREPEND(&ifnetp->if_snd, m_head);
3100 ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3102 xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3103 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3104 XGE_DRV_STATS(tx_again);
3109 * Allocates new mbufs to be placed into descriptors
3111 * @dtrh Descriptor Handle
3112 * @rxd_priv Rx Descriptor Private Data
3113 * @lldev Per-adapter Data
3114 * @index Buffer Index (if multi-buffer mode)
3116 * Returns XGE_HAL_OK or HAL error enums
3119 xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3120 xge_lldev_t *lldev, int index)
3122 register mbuf_t mp = NULL;
3123 struct ifnet *ifnetp = lldev->ifnetp;
3124 int status = XGE_HAL_OK;
3125 int buffer_size = 0, cluster_size = 0, count;
3126 bus_dmamap_t map = rxd_priv->dmainfo[index].dma_map;
3127 bus_dma_segment_t segs[3];
3129 buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3130 ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3131 lldev->rxd_mbuf_len[index];
3133 if(buffer_size <= MCLBYTES) {
3134 cluster_size = MCLBYTES;
3135 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3138 cluster_size = MJUMPAGESIZE;
3139 if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3140 (buffer_size > MJUMPAGESIZE)) {
3141 cluster_size = MJUM9BYTES;
3143 mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, cluster_size);
3146 xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3147 status = XGE_HAL_FAIL;
3151 /* Update mbuf's length, packet length and receive interface */
3152 mp->m_len = mp->m_pkthdr.len = buffer_size;
3153 mp->m_pkthdr.rcvif = ifnetp;
3156 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3157 mp, segs, &count, BUS_DMA_NOWAIT)) {
3158 XGE_DRV_STATS(rx_map_fail);
3160 XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3163 /* Update descriptor private data */
3164 rxd_priv->bufferArray[index] = mp;
3165 rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3166 rxd_priv->dmainfo[index].dma_map = lldev->extra_dma_map;
3167 lldev->extra_dma_map = map;
3169 /* Pre-Read/Write sync */
3170 bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3172 /* Unload DMA map of mbuf in current descriptor */
3173 bus_dmamap_unload(lldev->dma_tag_rx, map);
3175 /* Set descriptor buffer */
3176 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3177 xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3187 * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3189 * @dtrh Descriptor Handle
3190 * @rxd_priv Rx Descriptor Private Data
3191 * @lldev Per-adapter Data
3193 * Returns XGE_HAL_OK or HAL error enums
3196 xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3199 bus_addr_t dma_pointers[5];
3201 int status = XGE_HAL_OK, index;
3204 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3205 status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3206 if(status != XGE_HAL_OK) {
3207 for(newindex = 0; newindex < index; newindex++) {
3208 m_freem(rxd_priv->bufferArray[newindex]);
3210 XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3214 for(index = 0; index < lldev->buffer_mode; index++) {
3215 if(lldev->rxd_mbuf_len[index] != 0) {
3216 dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3217 dma_sizes[index] = lldev->rxd_mbuf_len[index];
3220 dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3221 dma_sizes[index] = 1;
3225 /* Assigning second buffer to third pointer in 2 buffer mode */
3226 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3227 dma_pointers[2] = dma_pointers[1];
3228 dma_sizes[2] = dma_sizes[1];
3232 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3233 xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3236 xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3245 * If the interrupt is due to Tx completion, free the sent buffer
3247 * @channelh Channel Handle
3249 * @t_code Transfer Code indicating success or error
3250 * @userdata Per-adapter Data
3252 * Returns XGE_HAL_OK or HAL error enum
3255 xge_tx_compl(xge_hal_channel_h channelh,
3256 xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3258 xge_tx_priv_t *ll_tx_priv = NULL;
3259 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3260 struct ifnet *ifnetp = lldev->ifnetp;
3261 mbuf_t m_buffer = NULL;
3262 int qindex = xge_hal_channel_id(channelh);
3264 mtx_lock(&lldev->mtx_tx[qindex]);
3266 XGE_DRV_STATS(tx_completions);
3269 * For each completed descriptor: Get private structure, free buffer,
3270 * do unmapping, and free descriptor
3273 XGE_DRV_STATS(tx_desc_compl);
3276 XGE_DRV_STATS(tx_tcode);
3277 xge_trace(XGE_TRACE, "t_code %d", t_code);
3278 xge_hal_device_handle_tcode(channelh, dtr, t_code);
3281 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3282 m_buffer = ll_tx_priv->buffer;
3283 bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3285 ll_tx_priv->buffer = NULL;
3286 xge_hal_fifo_dtr_free(channelh, dtr);
3287 } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3289 xge_send_locked(ifnetp, qindex);
3290 ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3292 mtx_unlock(&lldev->mtx_tx[qindex]);
3298 * xge_tx_initial_replenish
3299 * Initially allocate buffers and set them into descriptors for later use
3301 * @channelh Tx Channel Handle
3302 * @dtrh Descriptor Handle
3304 * @userdata Per-adapter Data
3305 * @reopen Channel open/reopen option
3307 * Returns XGE_HAL_OK or HAL error enums
3310 xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3311 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3313 xge_tx_priv_t *txd_priv = NULL;
3314 int status = XGE_HAL_OK;
3316 /* Get the user data portion from channel handle */
3317 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3319 XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3323 /* Get the private data */
3324 txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3325 if(txd_priv == NULL) {
3326 XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3330 /* Create DMA map for this descriptor */
3331 if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3332 &txd_priv->dma_map)) {
3333 XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3334 txinit_out, XGE_HAL_FAIL);
3342 * xge_rx_initial_replenish
3343 * Initially allocate buffers and set them into descriptors for later use
3345 * @channelh Tx Channel Handle
3346 * @dtrh Descriptor Handle
3348 * @userdata Per-adapter Data
3349 * @reopen Channel open/reopen option
3351 * Returns XGE_HAL_OK or HAL error enums
3354 xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3355 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3357 xge_rx_priv_t *rxd_priv = NULL;
3358 int status = XGE_HAL_OK;
3359 int index1 = 0, index2 = 0;
3361 /* Get the user data portion from channel handle */
3362 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3364 XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3368 /* Get the private data */
3369 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3370 if(rxd_priv == NULL) {
3371 XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3375 rxd_priv->bufferArray = xge_os_malloc(NULL,
3376 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3378 if(rxd_priv->bufferArray == NULL) {
3379 XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3383 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3384 /* Create DMA map for these descriptors*/
3385 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3386 &rxd_priv->dmainfo[0].dma_map)) {
3387 XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3388 rxinit_err_out, XGE_HAL_FAIL);
3390 /* Get a buffer, attach it to this descriptor */
3391 status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3394 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3395 /* Create DMA map for this descriptor */
3396 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3397 &rxd_priv->dmainfo[index1].dma_map)) {
3398 for(index2 = index1 - 1; index2 >= 0; index2--) {
3399 bus_dmamap_destroy(lldev->dma_tag_rx,
3400 rxd_priv->dmainfo[index2].dma_map);
3403 "Jumbo DMA map creation for Rx descriptor failed",
3404 rxinit_err_out, XGE_HAL_FAIL);
3407 status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3410 if(status != XGE_HAL_OK) {
3411 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3412 bus_dmamap_destroy(lldev->dma_tag_rx,
3413 rxd_priv->dmainfo[index1].dma_map);
3415 goto rxinit_err_out;
3422 xge_os_free(NULL, rxd_priv->bufferArray,
3423 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3430 * During unload terminate and free all descriptors
3432 * @channelh Rx Channel Handle
3433 * @dtrh Rx Descriptor Handle
3434 * @state Descriptor State
3435 * @userdata Per-adapter Data
3436 * @reopen Channel open/reopen option
3439 xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3440 xge_hal_dtr_state_e state, void *userdata,
3441 xge_hal_channel_reopen_e reopen)
3443 xge_rx_priv_t *rxd_priv = NULL;
3444 xge_lldev_t *lldev = NULL;
3447 /* Descriptor state is not "Posted" */
3448 if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3450 /* Get the user data portion */
3451 lldev = xge_hal_channel_userdata(channelh);
3453 /* Get the private data */
3454 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3456 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3457 if(rxd_priv->dmainfo[index].dma_map != NULL) {
3458 bus_dmamap_sync(lldev->dma_tag_rx,
3459 rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3460 bus_dmamap_unload(lldev->dma_tag_rx,
3461 rxd_priv->dmainfo[index].dma_map);
3462 if(rxd_priv->bufferArray[index] != NULL)
3463 m_free(rxd_priv->bufferArray[index]);
3464 bus_dmamap_destroy(lldev->dma_tag_rx,
3465 rxd_priv->dmainfo[index].dma_map);
3468 xge_os_free(NULL, rxd_priv->bufferArray,
3469 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3471 /* Free the descriptor */
3472 xge_hal_ring_dtr_free(channelh, dtrh);
3480 * During unload terminate and free all descriptors
3482 * @channelh Rx Channel Handle
3483 * @dtrh Rx Descriptor Handle
3484 * @state Descriptor State
3485 * @userdata Per-adapter Data
3486 * @reopen Channel open/reopen option
3489 xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3490 xge_hal_dtr_state_e state, void *userdata,
3491 xge_hal_channel_reopen_e reopen)
3493 xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3494 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3496 /* Destroy DMA map */
3497 bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3503 * FreeBSD device interface entry points
3505 static device_method_t xge_methods[] = {
3506 DEVMETHOD(device_probe, xge_probe),
3507 DEVMETHOD(device_attach, xge_attach),
3508 DEVMETHOD(device_detach, xge_detach),
3509 DEVMETHOD(device_shutdown, xge_shutdown),
3513 static driver_t xge_driver = {
3516 sizeof(xge_lldev_t),
3518 static devclass_t xge_devclass;
3519 DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);