2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002-2007 Neterion, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <dev/nxge/if_nxge.h>
32 #include <dev/nxge/xge-osdep.h>
33 #include <net/if_arp.h>
34 #include <sys/types.h>
36 #include <net/if_var.h>
37 #include <net/if_vlan_var.h>
39 int copyright_print = 0;
40 int hal_driver_init_count = 0;
41 size_t size = sizeof(int);
43 static void inline xge_flush_txds(xge_hal_channel_h);
47 * Probes for Xframe devices
52 * BUS_PROBE_DEFAULT if device is supported
53 * ENXIO if device is not supported
56 xge_probe(device_t dev)
58 int devid = pci_get_device(dev);
59 int vendorid = pci_get_vendor(dev);
62 if(vendorid == XGE_PCI_VENDOR_ID) {
63 if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
64 (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
65 if(!copyright_print) {
66 xge_os_printf(XGE_COPYRIGHT);
69 device_set_desc_copy(dev,
70 "Neterion Xframe 10 Gigabit Ethernet Adapter");
71 retValue = BUS_PROBE_DEFAULT;
80 * Sets HAL parameter values (from kenv).
82 * @dconfig Device Configuration
86 xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
88 int qindex, tindex, revision;
90 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
92 dconfig->mtu = XGE_DEFAULT_INITIAL_MTU;
93 dconfig->pci_freq_mherz = XGE_DEFAULT_USER_HARDCODED;
94 dconfig->device_poll_millis = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
95 dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
96 dconfig->mac.rmac_bcast_en = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
97 dconfig->fifo.alignment_size = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
99 XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
100 XGE_DEFAULT_ENABLED_TSO);
101 XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
102 XGE_DEFAULT_ENABLED_LRO);
103 XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
104 XGE_DEFAULT_ENABLED_MSI);
106 XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
107 XGE_DEFAULT_LATENCY_TIMER);
108 XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
109 XGE_DEFAULT_MAX_SPLITS_TRANS);
110 XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
111 XGE_DEFAULT_MMRB_COUNT);
112 XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
113 XGE_DEFAULT_SHARED_SPLITS);
114 XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
115 XGE_DEFAULT_ISR_POLLING_CNT);
116 XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
117 stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
119 XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
120 XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
121 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
122 XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
123 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
124 XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
125 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
126 XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
127 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
128 XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
129 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
130 mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
131 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
132 mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
134 XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
135 XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
136 XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
137 XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
138 XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
139 XGE_DEFAULT_FIFO_MAX_FRAGS);
141 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
142 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
143 XGE_DEFAULT_FIFO_QUEUE_INTR);
144 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
145 XGE_DEFAULT_FIFO_QUEUE_MAX);
146 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
147 qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
149 for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
150 dconfig->fifo.queue[qindex].tti[tindex].enabled = 1;
151 dconfig->fifo.queue[qindex].configured = 1;
153 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
154 urange_a, qindex, tindex,
155 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
156 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
157 urange_b, qindex, tindex,
158 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
159 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
160 urange_c, qindex, tindex,
161 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
162 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
163 ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
164 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
165 ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
166 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
167 ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
168 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
169 ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
170 XGE_GET_PARAM_FIFO_QUEUE_TTI(
171 "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
172 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
173 XGE_GET_PARAM_FIFO_QUEUE_TTI(
174 "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
175 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
176 XGE_GET_PARAM_FIFO_QUEUE_TTI(
177 "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
178 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
182 XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
183 XGE_DEFAULT_RING_MEMBLOCK_SIZE);
185 XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
186 XGE_DEFAULT_RING_STRIP_VLAN_TAG);
188 XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
189 XGE_DEFAULT_BUFFER_MODE);
190 if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
191 (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
192 xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
193 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
196 for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
197 dconfig->ring.queue[qindex].max_frm_len = XGE_HAL_RING_USE_MTU;
198 dconfig->ring.queue[qindex].priority = 0;
199 dconfig->ring.queue[qindex].configured = 1;
200 dconfig->ring.queue[qindex].buffer_mode =
201 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
202 XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
204 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
205 XGE_DEFAULT_RING_QUEUE_MAX);
206 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
207 qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
208 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
209 dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
210 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
211 indicate_max_pkts, qindex,
212 XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
213 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
214 backoff_interval_us, qindex,
215 XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
217 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
218 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
219 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
220 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
221 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
222 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
223 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
224 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
225 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
226 timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
227 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
228 timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
229 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
230 urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
231 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
232 urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
233 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
234 urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
237 if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
238 xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
239 xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
240 (int)(PAGE_SIZE / 32))
241 xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
242 dconfig->fifo.max_frags = (PAGE_SIZE / 32);
245 checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
246 if(checkdev != NULL) {
247 /* Check Revision for 0x12 */
248 revision = pci_read_config(checkdev,
249 xge_offsetof(xge_hal_pci_config_t, revision), 1);
250 if(revision <= 0x12) {
251 /* Set mmrb_count to 1k and max splits = 2 */
252 dconfig->mmrb_count = 1;
253 dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
259 * xge_buffer_sizes_set
260 * Set buffer sizes based on Rx buffer mode
262 * @lldev Per-adapter Data
263 * @buffer_mode Rx Buffer Mode
266 xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
269 int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
270 int buffer_size = mtu + frame_header;
272 xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
274 if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
275 lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
277 lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
279 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
280 lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
282 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
284 buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
285 while(buffer_size > MJUMPAGESIZE) {
286 lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
287 buffer_size -= MJUMPAGESIZE;
289 XGE_ALIGN_TO(buffer_size, 128);
290 lldev->rxd_mbuf_len[index] = buffer_size;
291 lldev->rxd_mbuf_cnt = index + 1;
294 for(index = 0; index < buffer_mode; index++)
295 xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
296 lldev->rxd_mbuf_len[index]);
300 * xge_buffer_mode_init
301 * Init Rx buffer mode
303 * @lldev Per-adapter Data
307 xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
309 int index = 0, buffer_size = 0;
310 xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
312 buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
314 if(lldev->enabled_lro)
315 (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
317 (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
319 lldev->rxd_mbuf_cnt = lldev->buffer_mode;
320 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
321 XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
322 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
325 XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
326 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
328 xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
330 xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
331 ((lldev->enabled_tso) ? "Enabled":"Disabled"));
332 xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
333 ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
334 xge_os_printf("%s: Rx %d Buffer Mode Enabled",
335 device_get_nameunit(lldev->device), lldev->buffer_mode);
339 * xge_driver_initialize
340 * Initializes HAL driver (common for all devices)
343 * XGE_HAL_OK if success
344 * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
347 xge_driver_initialize(void)
349 xge_hal_uld_cbs_t uld_callbacks;
350 xge_hal_driver_config_t driver_config;
351 xge_hal_status_e status = XGE_HAL_OK;
353 /* Initialize HAL driver */
354 if(!hal_driver_init_count) {
355 xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
356 xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
359 * Initial and maximum size of the queue used to store the events
360 * like Link up/down (xge_hal_event_e)
362 driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
363 driver_config.queue_size_max = XGE_HAL_MAX_QUEUE_SIZE_MAX;
365 uld_callbacks.link_up = xge_callback_link_up;
366 uld_callbacks.link_down = xge_callback_link_down;
367 uld_callbacks.crit_err = xge_callback_crit_err;
368 uld_callbacks.event = xge_callback_event;
370 status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
371 if(status != XGE_HAL_OK) {
372 XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
376 hal_driver_init_count = hal_driver_init_count + 1;
378 xge_hal_driver_debug_module_mask_set(0xffffffff);
379 xge_hal_driver_debug_level_set(XGE_TRACE);
387 * Initializes, adds and sets media
389 * @devc Device Handle
392 xge_media_init(device_t devc)
394 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
396 /* Initialize Media */
397 ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
400 /* Add supported media */
401 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
402 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
403 ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO, 0, NULL);
404 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
405 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
408 ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
413 * Save PCI configuration space
418 xge_pci_space_save(device_t dev)
420 struct pci_devinfo *dinfo = NULL;
422 dinfo = device_get_ivars(dev);
423 xge_trace(XGE_TRACE, "Saving PCI configuration space");
424 pci_cfg_save(dev, dinfo, 0);
428 * xge_pci_space_restore
429 * Restore saved PCI configuration space
434 xge_pci_space_restore(device_t dev)
436 struct pci_devinfo *dinfo = NULL;
438 dinfo = device_get_ivars(dev);
439 xge_trace(XGE_TRACE, "Restoring PCI configuration space");
440 pci_cfg_restore(dev, dinfo);
447 * @lldev Per-adapter Data
450 xge_msi_info_save(xge_lldev_t * lldev)
452 xge_os_pci_read16(lldev->pdev, NULL,
453 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
454 &lldev->msi_info.msi_control);
455 xge_os_pci_read32(lldev->pdev, NULL,
456 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
457 &lldev->msi_info.msi_lower_address);
458 xge_os_pci_read32(lldev->pdev, NULL,
459 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
460 &lldev->msi_info.msi_higher_address);
461 xge_os_pci_read16(lldev->pdev, NULL,
462 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
463 &lldev->msi_info.msi_data);
467 * xge_msi_info_restore
468 * Restore saved MSI info
473 xge_msi_info_restore(xge_lldev_t *lldev)
476 * If interface is made down and up, traffic fails. It was observed that
477 * MSI information were getting reset on down. Restoring them.
479 xge_os_pci_write16(lldev->pdev, NULL,
480 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
481 lldev->msi_info.msi_control);
483 xge_os_pci_write32(lldev->pdev, NULL,
484 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
485 lldev->msi_info.msi_lower_address);
487 xge_os_pci_write32(lldev->pdev, NULL,
488 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
489 lldev->msi_info.msi_higher_address);
491 xge_os_pci_write16(lldev->pdev, NULL,
492 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
493 lldev->msi_info.msi_data);
498 * Initializes mutexes used in driver
500 * @lldev Per-adapter Data
503 xge_mutex_init(xge_lldev_t *lldev)
507 sprintf(lldev->mtx_name_drv, "%s_drv",
508 device_get_nameunit(lldev->device));
509 mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
512 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
513 sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
514 device_get_nameunit(lldev->device), qindex);
515 mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
522 * Destroys mutexes used in driver
524 * @lldev Per-adapter Data
527 xge_mutex_destroy(xge_lldev_t *lldev)
531 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
532 mtx_destroy(&lldev->mtx_tx[qindex]);
533 mtx_destroy(&lldev->mtx_drv);
538 * Print device and driver information
540 * @lldev Per-adapter Data
543 xge_print_info(xge_lldev_t *lldev)
545 device_t dev = lldev->device;
546 xge_hal_device_t *hldev = lldev->devh;
547 xge_hal_status_e status = XGE_HAL_OK;
549 const char *xge_pci_bus_speeds[17] = {
552 "PCIX(M1) 66MHz Bus",
553 "PCIX(M1) 100MHz Bus",
554 "PCIX(M1) 133MHz Bus",
555 "PCIX(M2) 133MHz Bus",
556 "PCIX(M2) 200MHz Bus",
557 "PCIX(M2) 266MHz Bus",
559 "PCIX(M1) 66MHz Bus (Not Supported)",
560 "PCIX(M1) 100MHz Bus (Not Supported)",
561 "PCIX(M1) 133MHz Bus (Not Supported)",
569 xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
570 device_get_nameunit(dev),
571 ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
572 hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
573 xge_os_printf("%s: Serial Number %s",
574 device_get_nameunit(dev), hldev->vpd_data.serial_num);
576 if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
577 status = xge_hal_mgmt_reg_read(hldev, 0,
578 xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
579 if(status != XGE_HAL_OK)
580 xge_trace(XGE_ERR, "Error for getting bus speed");
582 xge_os_printf("%s: Adapter is on %s bit %s",
583 device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
584 (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
587 xge_os_printf("%s: Using %s Interrupts",
588 device_get_nameunit(dev),
589 (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
593 * xge_create_dma_tags
594 * Creates DMA tags for both Tx and Rx
598 * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
601 xge_create_dma_tags(device_t dev)
603 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
604 xge_hal_status_e status = XGE_HAL_FAIL;
605 int mtu = (lldev->ifnetp)->if_mtu, maxsize;
608 status = bus_dma_tag_create(
609 bus_get_dma_tag(dev), /* Parent */
610 PAGE_SIZE, /* Alignment */
612 BUS_SPACE_MAXADDR, /* Low Address */
613 BUS_SPACE_MAXADDR, /* High Address */
614 NULL, /* Filter Function */
615 NULL, /* Filter Function Arguments */
616 MCLBYTES * XGE_MAX_SEGS, /* Maximum Size */
617 XGE_MAX_SEGS, /* Number of Segments */
618 MCLBYTES, /* Maximum Segment Size */
619 BUS_DMA_ALLOCNOW, /* Flags */
620 NULL, /* Lock Function */
621 NULL, /* Lock Function Arguments */
622 (&lldev->dma_tag_tx)); /* DMA Tag */
626 maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
627 if(maxsize <= MCLBYTES) {
631 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
632 maxsize = MJUMPAGESIZE;
634 maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
638 status = bus_dma_tag_create(
639 bus_get_dma_tag(dev), /* Parent */
640 PAGE_SIZE, /* Alignment */
642 BUS_SPACE_MAXADDR, /* Low Address */
643 BUS_SPACE_MAXADDR, /* High Address */
644 NULL, /* Filter Function */
645 NULL, /* Filter Function Arguments */
646 maxsize, /* Maximum Size */
647 1, /* Number of Segments */
648 maxsize, /* Maximum Segment Size */
649 BUS_DMA_ALLOCNOW, /* Flags */
650 NULL, /* Lock Function */
651 NULL, /* Lock Function Arguments */
652 (&lldev->dma_tag_rx)); /* DMA Tag */
656 status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
657 &lldev->extra_dma_map);
665 status = bus_dma_tag_destroy(lldev->dma_tag_rx);
667 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
669 status = bus_dma_tag_destroy(lldev->dma_tag_tx);
671 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
672 status = XGE_HAL_FAIL;
678 * xge_confirm_changes
679 * Disables and Enables interface to apply requested change
681 * @lldev Per-adapter Data
682 * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
684 * Returns 0 or Error Number
687 xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
689 if(lldev->initialized == 0) goto _exit1;
691 mtx_lock(&lldev->mtx_drv);
692 if_down(lldev->ifnetp);
693 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
695 if(option == XGE_SET_MTU)
696 (lldev->ifnetp)->if_mtu = lldev->mtu;
698 xge_buffer_mode_init(lldev, lldev->mtu);
700 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
701 if_up(lldev->ifnetp);
702 mtx_unlock(&lldev->mtx_drv);
706 /* Request was to change MTU and device not initialized */
707 if(option == XGE_SET_MTU) {
708 (lldev->ifnetp)->if_mtu = lldev->mtu;
709 xge_buffer_mode_init(lldev, lldev->mtu);
716 * xge_change_lro_status
717 * Enable/Disable LRO feature
719 * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
721 * Returns 0 or error number.
724 xge_change_lro_status(SYSCTL_HANDLER_ARGS)
726 xge_lldev_t *lldev = (xge_lldev_t *)arg1;
727 int request = lldev->enabled_lro, status = XGE_HAL_OK;
729 status = sysctl_handle_int(oidp, &request, arg2, req);
730 if((status != XGE_HAL_OK) || (!req->newptr))
733 if((request < 0) || (request > 1)) {
738 /* Return if current and requested states are same */
739 if(request == lldev->enabled_lro){
740 xge_trace(XGE_ERR, "LRO is already %s",
741 ((request) ? "enabled" : "disabled"));
745 lldev->enabled_lro = request;
746 xge_confirm_changes(lldev, XGE_CHANGE_LRO);
747 arg2 = lldev->enabled_lro;
754 * xge_add_sysctl_handlers
755 * Registers sysctl parameter value update handlers
757 * @lldev Per-adapter data
760 xge_add_sysctl_handlers(xge_lldev_t *lldev)
762 struct sysctl_ctx_list *context_list =
763 device_get_sysctl_ctx(lldev->device);
764 struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
766 SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
767 "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
768 xge_change_lro_status, "I", "Enable or disable LRO feature");
773 * Connects driver to the system if probe was success
778 xge_attach(device_t dev)
780 xge_hal_device_config_t *device_config;
781 xge_hal_device_attr_t attr;
783 xge_hal_device_t *hldev;
784 xge_pci_info_t *pci_info;
785 struct ifnet *ifnetp;
786 int rid, rid0, rid1, error;
787 int msi_count = 0, status = XGE_HAL_OK;
788 int enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
790 device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
792 XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
793 attach_out_config, ENOMEM);
796 lldev = (xge_lldev_t *) device_get_softc(dev);
798 XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
802 xge_mutex_init(lldev);
804 error = xge_driver_initialize();
805 if(error != XGE_HAL_OK) {
806 xge_resources_free(dev, xge_free_mutex);
807 XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
812 (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
814 xge_resources_free(dev, xge_free_terminate_hal_driver);
815 XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
820 /* Our private structure */
822 (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
824 xge_resources_free(dev, xge_free_hal_device);
825 XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
828 lldev->pdev = pci_info;
829 pci_info->device = dev;
832 pci_enable_busmaster(dev);
834 /* Get virtual address for BAR0 */
836 pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
838 if(pci_info->regmap0 == NULL) {
839 xge_resources_free(dev, xge_free_pci_info);
840 XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
843 attr.bar0 = (char *)pci_info->regmap0;
845 pci_info->bar0resource = (xge_bus_resource_t*)
846 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
847 if(pci_info->bar0resource == NULL) {
848 xge_resources_free(dev, xge_free_bar0);
849 XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
852 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
853 rman_get_bustag(pci_info->regmap0);
854 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
855 rman_get_bushandle(pci_info->regmap0);
856 ((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
859 /* Get virtual address for BAR1 */
861 pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
863 if(pci_info->regmap1 == NULL) {
864 xge_resources_free(dev, xge_free_bar0_resource);
865 XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
868 attr.bar1 = (char *)pci_info->regmap1;
870 pci_info->bar1resource = (xge_bus_resource_t*)
871 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
872 if(pci_info->bar1resource == NULL) {
873 xge_resources_free(dev, xge_free_bar1);
874 XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
877 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
878 rman_get_bustag(pci_info->regmap1);
879 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
880 rman_get_bushandle(pci_info->regmap1);
881 ((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
884 /* Save PCI config space */
885 xge_pci_space_save(dev);
887 attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
888 attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
889 attr.irqh = lldev->irqhandle;
890 attr.cfgh = pci_info;
891 attr.pdev = pci_info;
893 /* Initialize device configuration parameters */
894 xge_init_params(device_config, dev);
897 if(lldev->enabled_msi) {
898 /* Number of MSI messages supported by device */
899 msi_count = pci_msi_count(dev);
901 /* Device supports MSI */
903 xge_trace(XGE_ERR, "MSI count: %d", msi_count);
904 xge_trace(XGE_ERR, "Now, driver supporting 1 message");
907 error = pci_alloc_msi(dev, &msi_count);
910 xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
911 enable_msi = XGE_HAL_INTR_MODE_MSI;
916 xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
920 lldev->enabled_msi = enable_msi;
922 /* Allocate resource for irq */
923 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
924 (RF_SHAREABLE | RF_ACTIVE));
925 if(lldev->irq == NULL) {
926 xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
927 ((rid == 0) ? "line interrupt" : "MSI"));
929 error = pci_release_msi(dev);
931 xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
933 xge_trace(XGE_ERR, "Requires reboot to use MSI again");
935 xge_trace(XGE_ERR, "Trying line interrupts");
937 lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
938 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
939 (RF_SHAREABLE | RF_ACTIVE));
941 if(lldev->irq == NULL) {
942 xge_trace(XGE_ERR, "Allocating irq resource failed");
943 xge_resources_free(dev, xge_free_bar1_resource);
949 device_config->intr_mode = lldev->enabled_msi;
951 xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
952 lldev->enabled_msi, msi_count);
955 /* Initialize HAL device */
956 error = xge_hal_device_initialize(hldev, &attr, device_config);
957 if(error != XGE_HAL_OK) {
958 xge_resources_free(dev, xge_free_irq_resource);
959 XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
963 xge_hal_device_private_set(hldev, lldev);
965 error = xge_interface_setup(dev);
971 ifnetp = lldev->ifnetp;
972 ifnetp->if_mtu = device_config->mtu;
976 /* Associate interrupt handler with the device */
977 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
978 error = bus_setup_intr(dev, lldev->irq,
979 (INTR_TYPE_NET | INTR_MPSAFE),
980 #if __FreeBSD_version > 700030
983 xge_isr_msi, lldev, &lldev->irqhandle);
984 xge_msi_info_save(lldev);
987 error = bus_setup_intr(dev, lldev->irq,
988 (INTR_TYPE_NET | INTR_MPSAFE),
989 #if __FreeBSD_version > 700030
992 xge_isr_line, lldev, &lldev->irqhandle);
995 xge_resources_free(dev, xge_free_media_interface);
996 XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
1000 xge_print_info(lldev);
1002 xge_add_sysctl_handlers(lldev);
1004 xge_buffer_mode_init(lldev, device_config->mtu);
1007 xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1013 * xge_resources_free
1014 * Undo what-all we did during load/attach
1016 * @dev Device Handle
1017 * @error Identifies what-all to undo
1020 xge_resources_free(device_t dev, xge_lables_e error)
1023 xge_pci_info_t *pci_info;
1024 xge_hal_device_t *hldev;
1028 lldev = (xge_lldev_t *) device_get_softc(dev);
1029 pci_info = lldev->pdev;
1032 hldev = lldev->devh;
1036 /* Teardown interrupt handler - device association */
1037 bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1039 case xge_free_media_interface:
1041 ifmedia_removeall(&lldev->media);
1044 ether_ifdetach(lldev->ifnetp);
1045 if_free(lldev->ifnetp);
1047 xge_hal_device_private_set(hldev, NULL);
1048 xge_hal_device_disable(hldev);
1050 case xge_free_terminate_hal_device:
1052 xge_hal_device_terminate(hldev);
1054 case xge_free_irq_resource:
1055 /* Release IRQ resource */
1056 bus_release_resource(dev, SYS_RES_IRQ,
1057 ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1060 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1061 status = pci_release_msi(dev);
1065 "pci_release_msi returned %d", status);
1070 case xge_free_bar1_resource:
1071 /* Restore PCI configuration space */
1072 xge_pci_space_restore(dev);
1074 /* Free bar1resource */
1075 xge_os_free(NULL, pci_info->bar1resource,
1076 sizeof(xge_bus_resource_t));
1081 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1084 case xge_free_bar0_resource:
1085 /* Free bar0resource */
1086 xge_os_free(NULL, pci_info->bar0resource,
1087 sizeof(xge_bus_resource_t));
1092 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1095 case xge_free_pci_info:
1096 /* Disable Bus Master */
1097 pci_disable_busmaster(dev);
1099 /* Free pci_info_t */
1101 xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1103 case xge_free_hal_device:
1104 /* Free device configuration struct and HAL device */
1105 xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1107 case xge_free_terminate_hal_driver:
1108 /* Terminate HAL driver */
1109 hal_driver_init_count = hal_driver_init_count - 1;
1110 if(!hal_driver_init_count) {
1111 xge_hal_driver_terminate();
1114 case xge_free_mutex:
1115 xge_mutex_destroy(lldev);
1121 * Detaches driver from the Kernel subsystem
1123 * @dev Device Handle
1126 xge_detach(device_t dev)
1128 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1130 if(lldev->in_detach == 0) {
1131 lldev->in_detach = 1;
1133 xge_resources_free(dev, xge_free_all);
1141 * To shutdown device before system shutdown
1143 * @dev Device Handle
1146 xge_shutdown(device_t dev)
1148 xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1155 * xge_interface_setup
1158 * @dev Device Handle
1160 * Returns 0 on success, ENXIO/ENOMEM on failure
1163 xge_interface_setup(device_t dev)
1165 u8 mcaddr[ETHER_ADDR_LEN];
1166 xge_hal_status_e status;
1167 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1168 struct ifnet *ifnetp;
1169 xge_hal_device_t *hldev = lldev->devh;
1171 /* Get the MAC address of the device */
1172 status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1173 if(status != XGE_HAL_OK) {
1174 xge_resources_free(dev, xge_free_terminate_hal_device);
1175 XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1178 /* Get interface ifnet structure for this Ether device */
1179 ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1180 if(ifnetp == NULL) {
1181 xge_resources_free(dev, xge_free_terminate_hal_device);
1182 XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1185 /* Initialize interface ifnet structure */
1186 if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1187 ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU;
1188 ifnetp->if_baudrate = XGE_BAUDRATE;
1189 ifnetp->if_init = xge_init;
1190 ifnetp->if_softc = lldev;
1191 ifnetp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1192 ifnetp->if_ioctl = xge_ioctl;
1193 ifnetp->if_start = xge_send;
1195 /* TODO: Check and assign optimal value */
1196 ifnetp->if_snd.ifq_maxlen = ifqmaxlen;
1198 ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1200 if(lldev->enabled_tso)
1201 ifnetp->if_capabilities |= IFCAP_TSO4;
1202 if(lldev->enabled_lro)
1203 ifnetp->if_capabilities |= IFCAP_LRO;
1205 ifnetp->if_capenable = ifnetp->if_capabilities;
1207 /* Attach the interface */
1208 ether_ifattach(ifnetp, mcaddr);
1215 * xge_callback_link_up
1216 * Callback for Link-up indication from HAL
1218 * @userdata Per-adapter data
1221 xge_callback_link_up(void *userdata)
1223 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1224 struct ifnet *ifnetp = lldev->ifnetp;
1226 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1227 if_link_state_change(ifnetp, LINK_STATE_UP);
1231 * xge_callback_link_down
1232 * Callback for Link-down indication from HAL
1234 * @userdata Per-adapter data
1237 xge_callback_link_down(void *userdata)
1239 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1240 struct ifnet *ifnetp = lldev->ifnetp;
1242 ifnetp->if_flags |= IFF_DRV_OACTIVE;
1243 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1247 * xge_callback_crit_err
1248 * Callback for Critical error indication from HAL
1250 * @userdata Per-adapter data
1251 * @type Event type (Enumerated hardware error)
1252 * @serr_data Hardware status
1255 xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1257 xge_trace(XGE_ERR, "Critical Error");
1258 xge_reset(userdata);
1262 * xge_callback_event
1263 * Callback from HAL indicating that some event has been queued
1265 * @item Queued event item
1268 xge_callback_event(xge_queue_item_t *item)
1270 xge_lldev_t *lldev = NULL;
1271 xge_hal_device_t *hldev = NULL;
1272 struct ifnet *ifnetp = NULL;
1274 hldev = item->context;
1275 lldev = xge_hal_device_private(hldev);
1276 ifnetp = lldev->ifnetp;
1278 switch((int)item->event_type) {
1279 case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1280 if(lldev->initialized) {
1281 if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1282 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1285 xge_queue_produce_context(
1286 xge_hal_device_queue(lldev->devh),
1287 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1292 case XGE_LL_EVENT_DEVICE_RESETTING:
1293 xge_reset(item->context);
1302 * xge_ifmedia_change
1303 * Media change driver callback
1305 * @ifnetp Interface Handle
1307 * Returns 0 if media is Ether else EINVAL
1310 xge_ifmedia_change(struct ifnet *ifnetp)
1312 xge_lldev_t *lldev = ifnetp->if_softc;
1313 struct ifmedia *ifmediap = &lldev->media;
1315 return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ? EINVAL:0;
1319 * xge_ifmedia_status
1320 * Media status driver callback
1322 * @ifnetp Interface Handle
1323 * @ifmr Interface Media Settings
1326 xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1328 xge_hal_status_e status;
1330 xge_lldev_t *lldev = ifnetp->if_softc;
1331 xge_hal_device_t *hldev = lldev->devh;
1333 ifmr->ifm_status = IFM_AVALID;
1334 ifmr->ifm_active = IFM_ETHER;
1336 status = xge_hal_mgmt_reg_read(hldev, 0,
1337 xge_offsetof(xge_hal_pci_bar0_t, adapter_status), ®value);
1338 if(status != XGE_HAL_OK) {
1339 xge_trace(XGE_TRACE, "Getting adapter status failed");
1343 if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1344 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1345 ifmr->ifm_status |= IFM_ACTIVE;
1346 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1347 if_link_state_change(ifnetp, LINK_STATE_UP);
1350 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1358 * IOCTL to get statistics
1360 * @lldev Per-adapter data
1361 * @ifreqp Interface request
1364 xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1366 xge_hal_status_e status = XGE_HAL_OK;
1367 char *data = (char *)ifreqp->ifr_data;
1369 int retValue = EINVAL;
1372 case XGE_QUERY_STATS:
1373 mtx_lock(&lldev->mtx_drv);
1374 status = xge_hal_stats_hw(lldev->devh,
1375 (xge_hal_stats_hw_info_t **)&info);
1376 mtx_unlock(&lldev->mtx_drv);
1377 if(status == XGE_HAL_OK) {
1378 if(copyout(info, ifreqp->ifr_data,
1379 sizeof(xge_hal_stats_hw_info_t)) == 0)
1383 xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1388 case XGE_QUERY_PCICONF:
1389 info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1391 mtx_lock(&lldev->mtx_drv);
1392 status = xge_hal_mgmt_pci_config(lldev->devh, info,
1393 sizeof(xge_hal_pci_config_t));
1394 mtx_unlock(&lldev->mtx_drv);
1395 if(status == XGE_HAL_OK) {
1396 if(copyout(info, ifreqp->ifr_data,
1397 sizeof(xge_hal_pci_config_t)) == 0)
1402 "Getting PCI configuration failed (%d)", status);
1404 xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1408 case XGE_QUERY_DEVSTATS:
1409 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1411 mtx_lock(&lldev->mtx_drv);
1412 status =xge_hal_mgmt_device_stats(lldev->devh, info,
1413 sizeof(xge_hal_stats_device_info_t));
1414 mtx_unlock(&lldev->mtx_drv);
1415 if(status == XGE_HAL_OK) {
1416 if(copyout(info, ifreqp->ifr_data,
1417 sizeof(xge_hal_stats_device_info_t)) == 0)
1421 xge_trace(XGE_ERR, "Getting device info failed (%d)",
1424 xge_os_free(NULL, info,
1425 sizeof(xge_hal_stats_device_info_t));
1429 case XGE_QUERY_SWSTATS:
1430 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1432 mtx_lock(&lldev->mtx_drv);
1433 status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1434 sizeof(xge_hal_stats_sw_err_t));
1435 mtx_unlock(&lldev->mtx_drv);
1436 if(status == XGE_HAL_OK) {
1437 if(copyout(info, ifreqp->ifr_data,
1438 sizeof(xge_hal_stats_sw_err_t)) == 0)
1443 "Getting tcode statistics failed (%d)", status);
1445 xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1449 case XGE_QUERY_DRIVERSTATS:
1450 if(copyout(&lldev->driver_stats, ifreqp->ifr_data,
1451 sizeof(xge_driver_stats_t)) == 0) {
1456 "Copyout of driver statistics failed (%d)", status);
1460 case XGE_READ_VERSION:
1461 info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1463 strcpy(info, XGE_DRIVER_VERSION);
1464 if(copyout(info, ifreqp->ifr_data, XGE_BUFFER_SIZE) == 0)
1466 xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1470 case XGE_QUERY_DEVCONF:
1471 info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1473 mtx_lock(&lldev->mtx_drv);
1474 status = xge_hal_mgmt_device_config(lldev->devh, info,
1475 sizeof(xge_hal_device_config_t));
1476 mtx_unlock(&lldev->mtx_drv);
1477 if(status == XGE_HAL_OK) {
1478 if(copyout(info, ifreqp->ifr_data,
1479 sizeof(xge_hal_device_config_t)) == 0)
1483 xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1486 xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1490 case XGE_QUERY_BUFFER_MODE:
1491 if(copyout(&lldev->buffer_mode, ifreqp->ifr_data,
1496 case XGE_SET_BUFFER_MODE_1:
1497 case XGE_SET_BUFFER_MODE_2:
1498 case XGE_SET_BUFFER_MODE_5:
1499 *data = (*data == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1500 if(copyout(data, ifreqp->ifr_data, sizeof(data)) == 0)
1504 xge_trace(XGE_TRACE, "Nothing is matching");
1512 * xge_ioctl_registers
1513 * IOCTL to get registers
1515 * @lldev Per-adapter data
1516 * @ifreqp Interface request
1519 xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1521 xge_register_t *data = (xge_register_t *)ifreqp->ifr_data;
1522 xge_hal_status_e status = XGE_HAL_OK;
1523 int retValue = EINVAL, offset = 0, index = 0;
1526 /* Reading a register */
1527 if(strcmp(data->option, "-r") == 0) {
1528 data->value = 0x0000;
1529 mtx_lock(&lldev->mtx_drv);
1530 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1532 mtx_unlock(&lldev->mtx_drv);
1533 if(status == XGE_HAL_OK) {
1534 if(copyout(data, ifreqp->ifr_data, sizeof(xge_register_t)) == 0)
1538 /* Writing to a register */
1539 else if(strcmp(data->option, "-w") == 0) {
1540 mtx_lock(&lldev->mtx_drv);
1541 status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1543 if(status == XGE_HAL_OK) {
1545 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1547 if(status != XGE_HAL_OK) {
1548 xge_trace(XGE_ERR, "Reading back updated register failed");
1551 if(val64 != data->value) {
1553 "Read and written register values mismatched");
1559 xge_trace(XGE_ERR, "Getting register value failed");
1561 mtx_unlock(&lldev->mtx_drv);
1564 mtx_lock(&lldev->mtx_drv);
1565 for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1566 index++, offset += 0x0008) {
1568 status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1569 if(status != XGE_HAL_OK) {
1570 xge_trace(XGE_ERR, "Getting register value failed");
1573 *((u64 *)((u64 *)data + index)) = val64;
1576 mtx_unlock(&lldev->mtx_drv);
1579 if(copyout(data, ifreqp->ifr_data,
1580 sizeof(xge_hal_pci_bar0_t)) != 0) {
1581 xge_trace(XGE_ERR, "Copyout of register values failed");
1586 xge_trace(XGE_ERR, "Getting register values failed");
1594 * Callback to control the device - Interface configuration
1596 * @ifnetp Interface Handle
1597 * @command Device control command
1598 * @data Parameters associated with command (if any)
1601 xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1603 struct ifreq *ifreqp = (struct ifreq *)data;
1604 xge_lldev_t *lldev = ifnetp->if_softc;
1605 struct ifmedia *ifmediap = &lldev->media;
1606 int retValue = 0, mask = 0;
1608 if(lldev->in_detach) {
1613 /* Set/Get ifnet address */
1616 ether_ioctl(ifnetp, command, data);
1621 retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1624 /* Set ifnet flags */
1626 if(ifnetp->if_flags & IFF_UP) {
1627 /* Link status is UP */
1628 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1631 xge_disable_promisc(lldev);
1632 xge_enable_promisc(lldev);
1635 /* Link status is DOWN */
1636 /* If device is in running, make it down */
1637 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1643 /* Add/delete multicast address */
1646 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1647 xge_setmulti(lldev);
1651 /* Set/Get net media */
1654 retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1657 /* Set capabilities */
1659 mtx_lock(&lldev->mtx_drv);
1660 mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1661 if(mask & IFCAP_TXCSUM) {
1662 if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1663 ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1664 ifnetp->if_hwassist &=
1665 ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1668 ifnetp->if_capenable |= IFCAP_TXCSUM;
1669 ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1672 if(mask & IFCAP_TSO4) {
1673 if(ifnetp->if_capenable & IFCAP_TSO4) {
1674 ifnetp->if_capenable &= ~IFCAP_TSO4;
1675 ifnetp->if_hwassist &= ~CSUM_TSO;
1677 xge_os_printf("%s: TSO Disabled",
1678 device_get_nameunit(lldev->device));
1680 else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1681 ifnetp->if_capenable |= IFCAP_TSO4;
1682 ifnetp->if_hwassist |= CSUM_TSO;
1684 xge_os_printf("%s: TSO Enabled",
1685 device_get_nameunit(lldev->device));
1689 mtx_unlock(&lldev->mtx_drv);
1692 /* Custom IOCTL 0 */
1693 case SIOCGPRIVATE_0:
1694 retValue = xge_ioctl_stats(lldev, ifreqp);
1697 /* Custom IOCTL 1 */
1698 case SIOCGPRIVATE_1:
1699 retValue = xge_ioctl_registers(lldev, ifreqp);
1711 * Initialize the interface
1713 * @plldev Per-adapter Data
1716 xge_init(void *plldev)
1718 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1720 mtx_lock(&lldev->mtx_drv);
1721 xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1722 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1723 mtx_unlock(&lldev->mtx_drv);
1728 * Initialize the interface (called by holding lock)
1730 * @pdevin Per-adapter Data
1733 xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1735 struct ifnet *ifnetp = lldev->ifnetp;
1736 xge_hal_device_t *hldev = lldev->devh;
1737 struct ifaddr *ifaddrp;
1738 unsigned char *macaddr;
1739 struct sockaddr_dl *sockaddrp;
1740 int status = XGE_HAL_OK;
1742 mtx_assert((&lldev->mtx_drv), MA_OWNED);
1744 /* If device is in running state, initializing is not required */
1745 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1748 /* Initializing timer */
1749 callout_init(&lldev->timer, 1);
1751 xge_trace(XGE_TRACE, "Set MTU size");
1752 status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1753 if(status != XGE_HAL_OK) {
1754 xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1758 /* Enable HAL device */
1759 xge_hal_device_enable(hldev);
1761 /* Get MAC address and update in HAL */
1762 ifaddrp = ifnetp->if_addr;
1763 sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1764 sockaddrp->sdl_type = IFT_ETHER;
1765 sockaddrp->sdl_alen = ifnetp->if_addrlen;
1766 macaddr = LLADDR(sockaddrp);
1767 xge_trace(XGE_TRACE,
1768 "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1769 *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1770 *(macaddr + 4), *(macaddr + 5));
1771 status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1772 if(status != XGE_HAL_OK)
1773 xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1775 /* Opening channels */
1776 mtx_unlock(&lldev->mtx_drv);
1777 status = xge_channel_open(lldev, option);
1778 mtx_lock(&lldev->mtx_drv);
1779 if(status != XGE_HAL_OK)
1782 /* Set appropriate flags */
1783 ifnetp->if_drv_flags |= IFF_DRV_RUNNING;
1784 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1786 /* Checksum capability */
1787 ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1788 (CSUM_TCP | CSUM_UDP) : 0;
1790 if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1791 ifnetp->if_hwassist |= CSUM_TSO;
1793 /* Enable interrupts */
1794 xge_hal_device_intr_enable(hldev);
1796 callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1798 /* Disable promiscuous mode */
1799 xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1800 xge_enable_promisc(lldev);
1802 /* Device is initialized */
1803 lldev->initialized = 1;
1804 xge_os_mdelay(1000);
1812 * Timer timeout function to handle link status
1814 * @devp Per-adapter Data
1817 xge_timer(void *devp)
1819 xge_lldev_t *lldev = (xge_lldev_t *)devp;
1820 xge_hal_device_t *hldev = lldev->devh;
1822 /* Poll for changes */
1823 xge_hal_device_poll(hldev);
1826 callout_reset(&lldev->timer, hz, xge_timer, lldev);
1833 * De-activate the interface
1835 * @lldev Per-adater Data
1838 xge_stop(xge_lldev_t *lldev)
1840 mtx_lock(&lldev->mtx_drv);
1841 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1842 mtx_unlock(&lldev->mtx_drv);
1847 * ISR filter function - to filter interrupts from other devices (shared)
1849 * @handle Per-adapter Data
1852 * FILTER_STRAY if interrupt is from other device
1853 * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1856 xge_isr_filter(void *handle)
1858 xge_lldev_t *lldev = (xge_lldev_t *)handle;
1859 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1860 u16 retValue = FILTER_STRAY;
1863 XGE_DRV_STATS(isr_filter);
1865 val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1866 &bar0->general_int_status);
1867 retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1874 * Interrupt service routine for Line interrupts
1876 * @plldev Per-adapter Data
1879 xge_isr_line(void *plldev)
1881 xge_hal_status_e status;
1882 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1883 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1884 struct ifnet *ifnetp = lldev->ifnetp;
1886 XGE_DRV_STATS(isr_line);
1888 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1889 status = xge_hal_device_handle_irq(hldev);
1890 if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1897 * ISR for Message signaled interrupts
1900 xge_isr_msi(void *plldev)
1902 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1903 XGE_DRV_STATS(isr_msi);
1904 xge_hal_device_continue_irq(lldev->devh);
1909 * Initiate and open all Rx channels
1912 * @lldev Per-adapter Data
1913 * @rflag Channel open/close/reopen flag
1915 * Returns 0 or Error Number
1918 xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1920 u64 adapter_status = 0x0;
1921 xge_hal_status_e status = XGE_HAL_FAIL;
1923 xge_hal_channel_attr_t attr = {
1926 .callback = xge_rx_compl,
1927 .per_dtr_space = sizeof(xge_rx_priv_t),
1929 .type = XGE_HAL_CHANNEL_TYPE_RING,
1931 .dtr_init = xge_rx_initial_replenish,
1932 .dtr_term = xge_rx_term
1935 /* If device is not ready, return */
1936 status = xge_hal_device_status(lldev->devh, &adapter_status);
1937 if(status != XGE_HAL_OK) {
1938 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1939 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1942 status = xge_hal_channel_open(lldev->devh, &attr,
1943 &lldev->ring_channel[qid], rflag);
1952 * Initialize and open all Tx channels
1954 * @lldev Per-adapter Data
1955 * @tflag Channel open/close/reopen flag
1957 * Returns 0 or Error Number
1960 xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1962 xge_hal_status_e status = XGE_HAL_FAIL;
1963 u64 adapter_status = 0x0;
1966 xge_hal_channel_attr_t attr = {
1968 .callback = xge_tx_compl,
1969 .per_dtr_space = sizeof(xge_tx_priv_t),
1971 .type = XGE_HAL_CHANNEL_TYPE_FIFO,
1973 .dtr_init = xge_tx_initial_replenish,
1974 .dtr_term = xge_tx_term
1977 /* If device is not ready, return */
1978 status = xge_hal_device_status(lldev->devh, &adapter_status);
1979 if(status != XGE_HAL_OK) {
1980 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1981 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1984 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1985 attr.post_qid = qindex,
1986 status = xge_hal_channel_open(lldev->devh, &attr,
1987 &lldev->fifo_channel[qindex], tflag);
1988 if(status != XGE_HAL_OK) {
1989 for(index = 0; index < qindex; index++)
1990 xge_hal_channel_close(lldev->fifo_channel[index], tflag);
2002 * @lldev Per-adapter Data
2005 xge_enable_msi(xge_lldev_t *lldev)
2007 xge_list_t *item = NULL;
2008 xge_hal_device_t *hldev = lldev->devh;
2009 xge_hal_channel_t *channel = NULL;
2010 u16 offset = 0, val16 = 0;
2012 xge_os_pci_read16(lldev->pdev, NULL,
2013 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2015 /* Update msi_data */
2016 offset = (val16 & 0x80) ? 0x4c : 0x48;
2017 xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2022 xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2024 /* Update msi_control */
2025 xge_os_pci_read16(lldev->pdev, NULL,
2026 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2028 xge_os_pci_write16(lldev->pdev, NULL,
2029 xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2031 /* Set TxMAT and RxMAT registers with MSI */
2032 xge_list_for_each(item, &hldev->free_channels) {
2033 channel = xge_container_of(item, xge_hal_channel_t, item);
2034 xge_hal_channel_msi_set(channel, 1, (u32)val16);
2040 * Open both Tx and Rx channels
2042 * @lldev Per-adapter Data
2043 * @option Channel reopen option
2046 xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2048 xge_lro_entry_t *lro_session = NULL;
2049 xge_hal_status_e status = XGE_HAL_OK;
2050 int index = 0, index2 = 0;
2052 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2053 xge_msi_info_restore(lldev);
2054 xge_enable_msi(lldev);
2058 status = xge_create_dma_tags(lldev->device);
2059 if(status != XGE_HAL_OK)
2060 XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2062 /* Open ring (Rx) channel */
2063 for(index = 0; index < XGE_RING_COUNT; index++) {
2064 status = xge_rx_open(index, lldev, option);
2065 if(status != XGE_HAL_OK) {
2067 * DMA mapping fails in the unpatched Kernel which can't
2068 * allocate contiguous memory for Jumbo frames.
2069 * Try using 5 buffer mode.
2071 if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2072 (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2074 /* Close so far opened channels */
2075 for(index2 = 0; index2 < index; index2++) {
2076 xge_hal_channel_close(lldev->ring_channel[index2],
2080 /* Destroy DMA tags intended to use for 1 buffer mode */
2081 if(bus_dmamap_destroy(lldev->dma_tag_rx,
2082 lldev->extra_dma_map)) {
2083 xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2085 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2086 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2087 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2088 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2090 /* Switch to 5 buffer mode */
2091 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2092 xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2098 XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2104 if(lldev->enabled_lro) {
2105 SLIST_INIT(&lldev->lro_free);
2106 SLIST_INIT(&lldev->lro_active);
2107 lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2109 for(index = 0; index < lldev->lro_num; index++) {
2110 lro_session = (xge_lro_entry_t *)
2111 xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2112 if(lro_session == NULL) {
2113 lldev->lro_num = index;
2116 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2120 /* Open FIFO (Tx) channel */
2121 status = xge_tx_open(lldev, option);
2122 if(status != XGE_HAL_OK)
2123 XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2129 * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2130 * Initialization of LRO failed (index is XGE_RING_COUNT)
2131 * Opening Tx channel failed (index is XGE_RING_COUNT)
2133 for(index2 = 0; index2 < index; index2++)
2134 xge_hal_channel_close(lldev->ring_channel[index2], option);
2142 * Close both Tx and Rx channels
2144 * @lldev Per-adapter Data
2145 * @option Channel reopen option
2149 xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2155 /* Close FIFO (Tx) channel */
2156 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2157 xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2159 /* Close Ring (Rx) channels */
2160 for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2161 xge_hal_channel_close(lldev->ring_channel[qindex], option);
2163 if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2164 xge_trace(XGE_ERR, "Rx extra map destroy failed");
2165 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2166 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2167 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2168 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2175 * @arg Parameter passed from dmamap
2177 * @nseg Number of segments
2181 dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2184 *(bus_addr_t *) arg = segs->ds_addr;
2192 * @lldev Per-adapter Data
2195 xge_reset(xge_lldev_t *lldev)
2197 xge_trace(XGE_TRACE, "Reseting the chip");
2199 /* If the device is not initialized, return */
2200 if(lldev->initialized) {
2201 mtx_lock(&lldev->mtx_drv);
2202 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2203 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2204 mtx_unlock(&lldev->mtx_drv);
2212 * Set an address as a multicast address
2214 * @lldev Per-adapter Data
2217 xge_setmulti(xge_lldev_t *lldev)
2219 struct ifmultiaddr *ifma;
2221 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2222 struct ifnet *ifnetp = lldev->ifnetp;
2225 int table_size = 47;
2226 xge_hal_status_e status = XGE_HAL_OK;
2227 u8 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2229 if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2230 status = xge_hal_device_mcast_enable(hldev);
2231 lldev->all_multicast = 1;
2233 else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2234 status = xge_hal_device_mcast_disable(hldev);
2235 lldev->all_multicast = 0;
2238 if(status != XGE_HAL_OK) {
2239 xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2243 /* Updating address list */
2244 if_maddr_rlock(ifnetp);
2246 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2247 if(ifma->ifma_addr->sa_family != AF_LINK) {
2250 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2253 if_maddr_runlock(ifnetp);
2255 if((!lldev->all_multicast) && (index)) {
2256 lldev->macaddr_count = (index + 1);
2257 if(lldev->macaddr_count > table_size) {
2261 /* Clear old addresses */
2262 for(index = 0; index < 48; index++) {
2263 xge_hal_device_macaddr_set(hldev, (offset + index),
2268 /* Add new addresses */
2269 if_maddr_rlock(ifnetp);
2271 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2272 if(ifma->ifma_addr->sa_family != AF_LINK) {
2275 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2276 xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2279 if_maddr_runlock(ifnetp);
2286 * xge_enable_promisc
2287 * Enable Promiscuous Mode
2289 * @lldev Per-adapter Data
2292 xge_enable_promisc(xge_lldev_t *lldev)
2294 struct ifnet *ifnetp = lldev->ifnetp;
2295 xge_hal_device_t *hldev = lldev->devh;
2296 xge_hal_pci_bar0_t *bar0 = NULL;
2299 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2301 if(ifnetp->if_flags & IFF_PROMISC) {
2302 xge_hal_device_promisc_enable(lldev->devh);
2305 * When operating in promiscuous mode, don't strip the VLAN tag
2307 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2309 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2310 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2311 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2314 xge_trace(XGE_TRACE, "Promiscuous mode ON");
2319 * xge_disable_promisc
2320 * Disable Promiscuous Mode
2322 * @lldev Per-adapter Data
2325 xge_disable_promisc(xge_lldev_t *lldev)
2327 xge_hal_device_t *hldev = lldev->devh;
2328 xge_hal_pci_bar0_t *bar0 = NULL;
2331 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2333 xge_hal_device_promisc_disable(lldev->devh);
2336 * Strip VLAN tag when operating in non-promiscuous mode
2338 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2340 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2341 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2342 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2345 xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2350 * Change interface MTU to a requested valid size
2352 * @lldev Per-adapter Data
2353 * @NewMtu Requested MTU
2355 * Returns 0 or Error Number
2358 xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2360 int status = XGE_HAL_OK;
2362 /* Check requested MTU size for boundary */
2363 if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2364 XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2367 lldev->mtu = new_mtu;
2368 xge_confirm_changes(lldev, XGE_SET_MTU);
2377 * Common code for both stop and part of reset. Disables device, interrupts and
2380 * @dev Device Handle
2381 * @option Channel normal/reset option
2384 xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2386 xge_hal_device_t *hldev = lldev->devh;
2387 struct ifnet *ifnetp = lldev->ifnetp;
2390 mtx_assert((&lldev->mtx_drv), MA_OWNED);
2392 /* If device is not in "Running" state, return */
2393 if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2396 /* Set appropriate flags */
2397 ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2400 callout_stop(&lldev->timer);
2402 /* Disable interrupts */
2403 xge_hal_device_intr_disable(hldev);
2405 mtx_unlock(&lldev->mtx_drv);
2406 xge_queue_flush(xge_hal_device_queue(lldev->devh));
2407 mtx_lock(&lldev->mtx_drv);
2409 /* Disable HAL device */
2410 if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2411 xge_trace(XGE_ERR, "Disabling HAL device failed");
2412 xge_hal_device_status(hldev, &val64);
2413 xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2416 /* Close Tx and Rx channels */
2417 xge_channel_close(lldev, option);
2419 /* Reset HAL device */
2420 xge_hal_device_reset(hldev);
2422 xge_os_mdelay(1000);
2423 lldev->initialized = 0;
2425 if_link_state_change(ifnetp, LINK_STATE_DOWN);
2432 * xge_set_mbuf_cflags
2433 * set checksum flag for the mbuf
2438 xge_set_mbuf_cflags(mbuf_t pkt)
2440 pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2441 pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2442 pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2443 pkt->m_pkthdr.csum_data = htons(0xffff);
2447 * xge_lro_flush_sessions
2448 * Flush LRO session and send accumulated LRO packet to upper layer
2450 * @lldev Per-adapter Data
2453 xge_lro_flush_sessions(xge_lldev_t *lldev)
2455 xge_lro_entry_t *lro_session = NULL;
2457 while(!SLIST_EMPTY(&lldev->lro_active)) {
2458 lro_session = SLIST_FIRST(&lldev->lro_active);
2459 SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2460 xge_lro_flush(lldev, lro_session);
2466 * Flush LRO session. Send accumulated LRO packet to upper layer
2468 * @lldev Per-adapter Data
2469 * @lro LRO session to be flushed
2472 xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2474 struct ip *header_ip;
2475 struct tcphdr *header_tcp;
2478 if(lro_session->append_cnt) {
2479 header_ip = lro_session->lro_header_ip;
2480 header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2481 lro_session->m_head->m_pkthdr.len = lro_session->len;
2482 header_tcp = (struct tcphdr *)(header_ip + 1);
2483 header_tcp->th_ack = lro_session->ack_seq;
2484 header_tcp->th_win = lro_session->window;
2485 if(lro_session->timestamp) {
2486 ptr = (u32 *)(header_tcp + 1);
2487 ptr[1] = htonl(lro_session->tsval);
2488 ptr[2] = lro_session->tsecr;
2492 (*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2493 lro_session->m_head = NULL;
2494 lro_session->timestamp = 0;
2495 lro_session->append_cnt = 0;
2496 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2500 * xge_lro_accumulate
2501 * Accumulate packets to form a large LRO packet based on various conditions
2503 * @lldev Per-adapter Data
2504 * @m_head Current Packet
2506 * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2509 xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2511 struct ether_header *header_ethernet;
2512 struct ip *header_ip;
2513 struct tcphdr *header_tcp;
2515 struct mbuf *buffer_next, *buffer_tail;
2516 xge_lro_entry_t *lro_session;
2517 xge_hal_status_e status = XGE_HAL_FAIL;
2518 int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2521 /* Get Ethernet header */
2522 header_ethernet = mtod(m_head, struct ether_header *);
2524 /* Return if it is not IP packet */
2525 if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2529 header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2530 (struct ip *)(header_ethernet + 1) :
2531 mtod(m_head->m_next, struct ip *);
2533 /* Return if it is not TCP packet */
2534 if(header_ip->ip_p != IPPROTO_TCP)
2537 /* Return if packet has options */
2538 if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2541 /* Return if packet is fragmented */
2542 if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2545 /* Get TCP header */
2546 header_tcp = (struct tcphdr *)(header_ip + 1);
2548 /* Return if not ACK or PUSH */
2549 if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2552 /* Only timestamp option is handled */
2553 tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2554 tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2555 ptr = (u32 *)(header_tcp + 1);
2556 if(tcp_options != 0) {
2557 if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2558 (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2559 TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2564 /* Total length of packet (IP) */
2565 ip_len = ntohs(header_ip->ip_len);
2568 tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2570 /* If the frame is padded, trim it */
2571 tot_len = m_head->m_pkthdr.len;
2572 trim = tot_len - (ip_len + ETHER_HDR_LEN);
2576 m_adj(m_head, -trim);
2577 tot_len = m_head->m_pkthdr.len;
2580 buffer_next = m_head;
2582 while(buffer_next != NULL) {
2583 buffer_tail = buffer_next;
2584 buffer_next = buffer_tail->m_next;
2587 /* Total size of only headers */
2588 hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2590 /* Get sequence number */
2591 seq = ntohl(header_tcp->th_seq);
2593 SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2594 if(lro_session->source_port == header_tcp->th_sport &&
2595 lro_session->dest_port == header_tcp->th_dport &&
2596 lro_session->source_ip == header_ip->ip_src.s_addr &&
2597 lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2599 /* Unmatched sequence number, flush LRO session */
2600 if(__predict_false(seq != lro_session->next_seq)) {
2601 SLIST_REMOVE(&lldev->lro_active, lro_session,
2602 xge_lro_entry_t, next);
2603 xge_lro_flush(lldev, lro_session);
2607 /* Handle timestamp option */
2609 u32 tsval = ntohl(*(ptr + 1));
2610 if(__predict_false(lro_session->tsval > tsval ||
2614 lro_session->tsval = tsval;
2615 lro_session->tsecr = *(ptr + 2);
2618 lro_session->next_seq += tcp_data_len;
2619 lro_session->ack_seq = header_tcp->th_ack;
2620 lro_session->window = header_tcp->th_win;
2622 /* If TCP data/payload is of 0 size, free mbuf */
2623 if(tcp_data_len == 0) {
2625 status = XGE_HAL_OK;
2629 lro_session->append_cnt++;
2630 lro_session->len += tcp_data_len;
2632 /* Adjust mbuf so that m_data points to payload than headers */
2633 m_adj(m_head, hlen);
2635 /* Append this packet to LRO accumulated packet */
2636 lro_session->m_tail->m_next = m_head;
2637 lro_session->m_tail = buffer_tail;
2639 /* Flush if LRO packet is exceeding maximum size */
2640 if(lro_session->len >
2641 (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2642 SLIST_REMOVE(&lldev->lro_active, lro_session,
2643 xge_lro_entry_t, next);
2644 xge_lro_flush(lldev, lro_session);
2646 status = XGE_HAL_OK;
2651 if(SLIST_EMPTY(&lldev->lro_free))
2654 /* Start a new LRO session */
2655 lro_session = SLIST_FIRST(&lldev->lro_free);
2656 SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2657 SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2658 lro_session->source_port = header_tcp->th_sport;
2659 lro_session->dest_port = header_tcp->th_dport;
2660 lro_session->source_ip = header_ip->ip_src.s_addr;
2661 lro_session->dest_ip = header_ip->ip_dst.s_addr;
2662 lro_session->next_seq = seq + tcp_data_len;
2663 lro_session->mss = tcp_data_len;
2664 lro_session->ack_seq = header_tcp->th_ack;
2665 lro_session->window = header_tcp->th_win;
2667 lro_session->lro_header_ip = header_ip;
2669 /* Handle timestamp option */
2671 lro_session->timestamp = 1;
2672 lro_session->tsval = ntohl(*(ptr + 1));
2673 lro_session->tsecr = *(ptr + 2);
2676 lro_session->len = tot_len;
2677 lro_session->m_head = m_head;
2678 lro_session->m_tail = buffer_tail;
2679 status = XGE_HAL_OK;
2686 * xge_accumulate_large_rx
2687 * Accumulate packets to form a large LRO packet based on various conditions
2689 * @lldev Per-adapter Data
2690 * @pkt Current packet
2691 * @pkt_length Packet Length
2692 * @rxd_priv Rx Descriptor Private Data
2695 xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2696 xge_rx_priv_t *rxd_priv)
2698 if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2699 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2700 BUS_DMASYNC_POSTREAD);
2701 (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2707 * If the interrupt is due to received frame (Rx completion), send it up
2709 * @channelh Ring Channel Handle
2710 * @dtr Current Descriptor
2711 * @t_code Transfer Code indicating success or error
2712 * @userdata Per-adapter Data
2714 * Returns XGE_HAL_OK or HAL error enums
2717 xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2720 struct ifnet *ifnetp;
2721 xge_rx_priv_t *rxd_priv = NULL;
2722 mbuf_t mbuf_up = NULL;
2723 xge_hal_status_e status = XGE_HAL_OK;
2724 xge_hal_dtr_info_t ext_info;
2728 /*get the user data portion*/
2729 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2731 XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2734 XGE_DRV_STATS(rx_completions);
2736 /* get the interface pointer */
2737 ifnetp = lldev->ifnetp;
2740 XGE_DRV_STATS(rx_desc_compl);
2742 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2743 status = XGE_HAL_FAIL;
2748 xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2749 XGE_DRV_STATS(rx_tcode);
2750 xge_hal_device_handle_tcode(channelh, dtr, t_code);
2751 xge_hal_ring_dtr_post(channelh,dtr);
2755 /* Get the private data for this descriptor*/
2756 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2759 XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2764 * Prepare one buffer to send it to upper layer -- since the upper
2765 * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2766 * prepare a new buffer, do mapping, use it in the current
2767 * descriptor and post descriptor back to ring channel
2769 mbuf_up = rxd_priv->bufferArray[0];
2771 /* Gets details of mbuf i.e., packet length */
2772 xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2775 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2776 xge_get_buf(dtr, rxd_priv, lldev, 0) :
2777 xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2779 if(status != XGE_HAL_OK) {
2780 xge_trace(XGE_ERR, "No memory");
2781 XGE_DRV_STATS(rx_no_buf);
2784 * Unable to allocate buffer. Instead of discarding, post
2785 * descriptor back to channel for future processing of same
2788 xge_hal_ring_dtr_post(channelh, dtr);
2792 /* Get the extended information */
2793 xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2796 * As we have allocated a new mbuf for this descriptor, post this
2797 * descriptor with new mbuf back to ring channel
2799 vlan_tag = ext_info.vlan;
2800 xge_hal_ring_dtr_post(channelh, dtr);
2801 if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2802 (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2803 (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2804 (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2806 /* set Checksum Flag */
2807 xge_set_mbuf_cflags(mbuf_up);
2809 if(lldev->enabled_lro) {
2810 xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2814 /* Post-Read sync for buffers*/
2815 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2816 bus_dmamap_sync(lldev->dma_tag_rx,
2817 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2819 (*ifnetp->if_input)(ifnetp, mbuf_up);
2824 * Packet with erroneous checksum , let the upper layer deal
2828 /* Post-Read sync for buffers*/
2829 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2830 bus_dmamap_sync(lldev->dma_tag_rx,
2831 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2835 mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2836 mbuf_up->m_flags |= M_VLANTAG;
2839 if(lldev->enabled_lro)
2840 xge_lro_flush_sessions(lldev);
2842 (*ifnetp->if_input)(ifnetp, mbuf_up);
2844 } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2847 if(lldev->enabled_lro)
2848 xge_lro_flush_sessions(lldev);
2858 * @mbuf_up Packet to send up
2859 * @channelh Ring Channel Handle
2861 * @lldev Per-adapter Data
2862 * @rxd_priv Rx Descriptor Private Data
2864 * Returns XGE_HAL_OK or HAL error enums
2867 xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2868 xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2871 int pkt_length[5]={0,0}, pkt_len=0;
2872 dma_addr_t dma_data[5];
2878 if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2879 xge_os_memzero(pkt_length, sizeof(pkt_length));
2882 * Retrieve data of interest from the completed descriptor -- This
2883 * returns the packet length
2885 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2886 xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2889 xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2892 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2893 m->m_len = pkt_length[index];
2895 if(index < (lldev->rxd_mbuf_cnt-1)) {
2896 m->m_next = rxd_priv->bufferArray[index + 1];
2902 pkt_len+=pkt_length[index];
2906 * Since 2 buffer mode is an exceptional case where data is in 3rd
2907 * buffer but not in 2nd buffer
2909 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2910 m->m_len = pkt_length[2];
2911 pkt_len+=pkt_length[2];
2915 * Update length of newly created buffer to be sent up with packet
2918 mbuf_up->m_pkthdr.len = pkt_len;
2922 * Retrieve data of interest from the completed descriptor -- This
2923 * returns the packet length
2925 xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2928 * Update length of newly created buffer to be sent up with packet
2931 mbuf_up->m_len = mbuf_up->m_pkthdr.len = pkt_length[0];
2939 * Flush Tx descriptors
2941 * @channelh Channel handle
2944 xge_flush_txds(xge_hal_channel_h channelh)
2946 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2947 xge_hal_dtr_h tx_dtr;
2948 xge_tx_priv_t *tx_priv;
2951 while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2953 XGE_DRV_STATS(tx_desc_compl);
2955 xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2956 XGE_DRV_STATS(tx_tcode);
2957 xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2960 tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2961 bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2962 m_freem(tx_priv->buffer);
2963 tx_priv->buffer = NULL;
2964 xge_hal_fifo_dtr_free(channelh, tx_dtr);
2972 * @ifnetp Interface Handle
2975 xge_send(struct ifnet *ifnetp)
2978 xge_lldev_t *lldev = ifnetp->if_softc;
2980 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2981 if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2982 XGE_DRV_STATS(tx_lock_fail);
2985 xge_send_locked(ifnetp, qindex);
2986 mtx_unlock(&lldev->mtx_tx[qindex]);
2991 xge_send_locked(struct ifnet *ifnetp, int qindex)
2994 static bus_dma_segment_t segs[XGE_MAX_SEGS];
2995 xge_hal_status_e status;
2996 unsigned int max_fragments;
2997 xge_lldev_t *lldev = ifnetp->if_softc;
2998 xge_hal_channel_h channelh = lldev->fifo_channel[qindex];
2999 mbuf_t m_head = NULL;
3000 mbuf_t m_buf = NULL;
3001 xge_tx_priv_t *ll_tx_priv = NULL;
3002 register unsigned int count = 0;
3003 unsigned int nsegs = 0;
3006 max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3008 /* If device is not initialized, return */
3009 if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3012 XGE_DRV_STATS(tx_calls);
3015 * This loop will be executed for each packet in the kernel maintained
3016 * queue -- each packet can be with fragments as an mbuf chain
3019 IF_DEQUEUE(&ifnetp->if_snd, m_head);
3020 if (m_head == NULL) {
3021 ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3025 for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3026 if(m_buf->m_len) count += 1;
3029 if(count >= max_fragments) {
3030 m_buf = m_defrag(m_head, M_NOWAIT);
3031 if(m_buf != NULL) m_head = m_buf;
3032 XGE_DRV_STATS(tx_defrag);
3035 /* Reserve descriptors */
3036 status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3037 if(status != XGE_HAL_OK) {
3038 XGE_DRV_STATS(tx_no_txd);
3039 xge_flush_txds(channelh);
3044 (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3045 xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3047 /* Update Tx private structure for this descriptor */
3048 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3049 ll_tx_priv->buffer = m_head;
3052 * Do mapping -- Required DMA tag has been created in xge_init
3053 * function and DMA maps have already been created in the
3054 * xgell_tx_replenish function.
3055 * Returns number of segments through nsegs
3057 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3058 ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3059 xge_trace(XGE_TRACE, "DMA map load failed");
3060 XGE_DRV_STATS(tx_map_fail);
3064 if(lldev->driver_stats.tx_max_frags < nsegs)
3065 lldev->driver_stats.tx_max_frags = nsegs;
3067 /* Set descriptor buffer for header and each fragment/segment */
3070 xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3071 (dma_addr_t)htole64(segs[count].ds_addr),
3072 segs[count].ds_len);
3074 } while(count < nsegs);
3076 /* Pre-write Sync of mapping */
3077 bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3078 BUS_DMASYNC_PREWRITE);
3080 if((lldev->enabled_tso) &&
3081 (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3082 XGE_DRV_STATS(tx_tso);
3083 xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3087 if(ifnetp->if_hwassist > 0) {
3088 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3089 | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3092 /* Post descriptor to FIFO channel */
3093 xge_hal_fifo_dtr_post(channelh, dtr);
3094 XGE_DRV_STATS(tx_posted);
3096 /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3097 * listener so that we can use tools like tcpdump */
3098 ETHER_BPF_MTAP(ifnetp, m_head);
3101 /* Prepend the packet back to queue */
3102 IF_PREPEND(&ifnetp->if_snd, m_head);
3103 ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3105 xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3106 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3107 XGE_DRV_STATS(tx_again);
3112 * Allocates new mbufs to be placed into descriptors
3114 * @dtrh Descriptor Handle
3115 * @rxd_priv Rx Descriptor Private Data
3116 * @lldev Per-adapter Data
3117 * @index Buffer Index (if multi-buffer mode)
3119 * Returns XGE_HAL_OK or HAL error enums
3122 xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3123 xge_lldev_t *lldev, int index)
3125 register mbuf_t mp = NULL;
3126 struct ifnet *ifnetp = lldev->ifnetp;
3127 int status = XGE_HAL_OK;
3128 int buffer_size = 0, cluster_size = 0, count;
3129 bus_dmamap_t map = rxd_priv->dmainfo[index].dma_map;
3130 bus_dma_segment_t segs[3];
3132 buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3133 ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3134 lldev->rxd_mbuf_len[index];
3136 if(buffer_size <= MCLBYTES) {
3137 cluster_size = MCLBYTES;
3138 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3141 cluster_size = MJUMPAGESIZE;
3142 if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3143 (buffer_size > MJUMPAGESIZE)) {
3144 cluster_size = MJUM9BYTES;
3146 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, cluster_size);
3149 xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3150 status = XGE_HAL_FAIL;
3154 /* Update mbuf's length, packet length and receive interface */
3155 mp->m_len = mp->m_pkthdr.len = buffer_size;
3156 mp->m_pkthdr.rcvif = ifnetp;
3159 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3160 mp, segs, &count, BUS_DMA_NOWAIT)) {
3161 XGE_DRV_STATS(rx_map_fail);
3163 XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3166 /* Update descriptor private data */
3167 rxd_priv->bufferArray[index] = mp;
3168 rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3169 rxd_priv->dmainfo[index].dma_map = lldev->extra_dma_map;
3170 lldev->extra_dma_map = map;
3172 /* Pre-Read/Write sync */
3173 bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3175 /* Unload DMA map of mbuf in current descriptor */
3176 bus_dmamap_unload(lldev->dma_tag_rx, map);
3178 /* Set descriptor buffer */
3179 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3180 xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3190 * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3192 * @dtrh Descriptor Handle
3193 * @rxd_priv Rx Descriptor Private Data
3194 * @lldev Per-adapter Data
3196 * Returns XGE_HAL_OK or HAL error enums
3199 xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3202 bus_addr_t dma_pointers[5];
3204 int status = XGE_HAL_OK, index;
3207 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3208 status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3209 if(status != XGE_HAL_OK) {
3210 for(newindex = 0; newindex < index; newindex++) {
3211 m_freem(rxd_priv->bufferArray[newindex]);
3213 XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3217 for(index = 0; index < lldev->buffer_mode; index++) {
3218 if(lldev->rxd_mbuf_len[index] != 0) {
3219 dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3220 dma_sizes[index] = lldev->rxd_mbuf_len[index];
3223 dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3224 dma_sizes[index] = 1;
3228 /* Assigning second buffer to third pointer in 2 buffer mode */
3229 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3230 dma_pointers[2] = dma_pointers[1];
3231 dma_sizes[2] = dma_sizes[1];
3235 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3236 xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3239 xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3248 * If the interrupt is due to Tx completion, free the sent buffer
3250 * @channelh Channel Handle
3252 * @t_code Transfer Code indicating success or error
3253 * @userdata Per-adapter Data
3255 * Returns XGE_HAL_OK or HAL error enum
3258 xge_tx_compl(xge_hal_channel_h channelh,
3259 xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3261 xge_tx_priv_t *ll_tx_priv = NULL;
3262 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3263 struct ifnet *ifnetp = lldev->ifnetp;
3264 mbuf_t m_buffer = NULL;
3265 int qindex = xge_hal_channel_id(channelh);
3267 mtx_lock(&lldev->mtx_tx[qindex]);
3269 XGE_DRV_STATS(tx_completions);
3272 * For each completed descriptor: Get private structure, free buffer,
3273 * do unmapping, and free descriptor
3276 XGE_DRV_STATS(tx_desc_compl);
3279 XGE_DRV_STATS(tx_tcode);
3280 xge_trace(XGE_TRACE, "t_code %d", t_code);
3281 xge_hal_device_handle_tcode(channelh, dtr, t_code);
3284 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3285 m_buffer = ll_tx_priv->buffer;
3286 bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3288 ll_tx_priv->buffer = NULL;
3289 xge_hal_fifo_dtr_free(channelh, dtr);
3290 } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3292 xge_send_locked(ifnetp, qindex);
3293 ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3295 mtx_unlock(&lldev->mtx_tx[qindex]);
3301 * xge_tx_initial_replenish
3302 * Initially allocate buffers and set them into descriptors for later use
3304 * @channelh Tx Channel Handle
3305 * @dtrh Descriptor Handle
3307 * @userdata Per-adapter Data
3308 * @reopen Channel open/reopen option
3310 * Returns XGE_HAL_OK or HAL error enums
3313 xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3314 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3316 xge_tx_priv_t *txd_priv = NULL;
3317 int status = XGE_HAL_OK;
3319 /* Get the user data portion from channel handle */
3320 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3322 XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3326 /* Get the private data */
3327 txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3328 if(txd_priv == NULL) {
3329 XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3333 /* Create DMA map for this descriptor */
3334 if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3335 &txd_priv->dma_map)) {
3336 XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3337 txinit_out, XGE_HAL_FAIL);
3345 * xge_rx_initial_replenish
3346 * Initially allocate buffers and set them into descriptors for later use
3348 * @channelh Tx Channel Handle
3349 * @dtrh Descriptor Handle
3351 * @userdata Per-adapter Data
3352 * @reopen Channel open/reopen option
3354 * Returns XGE_HAL_OK or HAL error enums
3357 xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3358 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3360 xge_rx_priv_t *rxd_priv = NULL;
3361 int status = XGE_HAL_OK;
3362 int index1 = 0, index2 = 0;
3364 /* Get the user data portion from channel handle */
3365 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3367 XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3371 /* Get the private data */
3372 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3373 if(rxd_priv == NULL) {
3374 XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3378 rxd_priv->bufferArray = xge_os_malloc(NULL,
3379 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3381 if(rxd_priv->bufferArray == NULL) {
3382 XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3386 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3387 /* Create DMA map for these descriptors*/
3388 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3389 &rxd_priv->dmainfo[0].dma_map)) {
3390 XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3391 rxinit_err_out, XGE_HAL_FAIL);
3393 /* Get a buffer, attach it to this descriptor */
3394 status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3397 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3398 /* Create DMA map for this descriptor */
3399 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3400 &rxd_priv->dmainfo[index1].dma_map)) {
3401 for(index2 = index1 - 1; index2 >= 0; index2--) {
3402 bus_dmamap_destroy(lldev->dma_tag_rx,
3403 rxd_priv->dmainfo[index2].dma_map);
3406 "Jumbo DMA map creation for Rx descriptor failed",
3407 rxinit_err_out, XGE_HAL_FAIL);
3410 status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3413 if(status != XGE_HAL_OK) {
3414 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3415 bus_dmamap_destroy(lldev->dma_tag_rx,
3416 rxd_priv->dmainfo[index1].dma_map);
3418 goto rxinit_err_out;
3425 xge_os_free(NULL, rxd_priv->bufferArray,
3426 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3433 * During unload terminate and free all descriptors
3435 * @channelh Rx Channel Handle
3436 * @dtrh Rx Descriptor Handle
3437 * @state Descriptor State
3438 * @userdata Per-adapter Data
3439 * @reopen Channel open/reopen option
3442 xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3443 xge_hal_dtr_state_e state, void *userdata,
3444 xge_hal_channel_reopen_e reopen)
3446 xge_rx_priv_t *rxd_priv = NULL;
3447 xge_lldev_t *lldev = NULL;
3450 /* Descriptor state is not "Posted" */
3451 if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3453 /* Get the user data portion */
3454 lldev = xge_hal_channel_userdata(channelh);
3456 /* Get the private data */
3457 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3459 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3460 if(rxd_priv->dmainfo[index].dma_map != NULL) {
3461 bus_dmamap_sync(lldev->dma_tag_rx,
3462 rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3463 bus_dmamap_unload(lldev->dma_tag_rx,
3464 rxd_priv->dmainfo[index].dma_map);
3465 if(rxd_priv->bufferArray[index] != NULL)
3466 m_free(rxd_priv->bufferArray[index]);
3467 bus_dmamap_destroy(lldev->dma_tag_rx,
3468 rxd_priv->dmainfo[index].dma_map);
3471 xge_os_free(NULL, rxd_priv->bufferArray,
3472 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3474 /* Free the descriptor */
3475 xge_hal_ring_dtr_free(channelh, dtrh);
3483 * During unload terminate and free all descriptors
3485 * @channelh Rx Channel Handle
3486 * @dtrh Rx Descriptor Handle
3487 * @state Descriptor State
3488 * @userdata Per-adapter Data
3489 * @reopen Channel open/reopen option
3492 xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3493 xge_hal_dtr_state_e state, void *userdata,
3494 xge_hal_channel_reopen_e reopen)
3496 xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3497 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3499 /* Destroy DMA map */
3500 bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3506 * FreeBSD device interface entry points
3508 static device_method_t xge_methods[] = {
3509 DEVMETHOD(device_probe, xge_probe),
3510 DEVMETHOD(device_attach, xge_attach),
3511 DEVMETHOD(device_detach, xge_detach),
3512 DEVMETHOD(device_shutdown, xge_shutdown),
3517 static driver_t xge_driver = {
3520 sizeof(xge_lldev_t),
3522 static devclass_t xge_devclass;
3523 DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);