2 * Copyright (c) 2002-2007 Neterion, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <dev/nxge/if_nxge.h>
30 #include <dev/nxge/xge-osdep.h>
31 #include <net/if_arp.h>
32 #include <sys/types.h>
34 #include <net/if_var.h>
35 #include <net/if_vlan_var.h>
37 int copyright_print = 0;
38 int hal_driver_init_count = 0;
39 size_t size = sizeof(int);
41 static void inline xge_flush_txds(xge_hal_channel_h);
45 * Probes for Xframe devices
50 * BUS_PROBE_DEFAULT if device is supported
51 * ENXIO if device is not supported
54 xge_probe(device_t dev)
56 int devid = pci_get_device(dev);
57 int vendorid = pci_get_vendor(dev);
60 if(vendorid == XGE_PCI_VENDOR_ID) {
61 if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
62 (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
63 if(!copyright_print) {
64 xge_os_printf(XGE_COPYRIGHT);
67 device_set_desc_copy(dev,
68 "Neterion Xframe 10 Gigabit Ethernet Adapter");
69 retValue = BUS_PROBE_DEFAULT;
78 * Sets HAL parameter values (from kenv).
80 * @dconfig Device Configuration
84 xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
86 int qindex, tindex, revision;
88 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
90 dconfig->mtu = XGE_DEFAULT_INITIAL_MTU;
91 dconfig->pci_freq_mherz = XGE_DEFAULT_USER_HARDCODED;
92 dconfig->device_poll_millis = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
93 dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
94 dconfig->mac.rmac_bcast_en = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
95 dconfig->fifo.alignment_size = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
97 XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
98 XGE_DEFAULT_ENABLED_TSO);
99 XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
100 XGE_DEFAULT_ENABLED_LRO);
101 XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
102 XGE_DEFAULT_ENABLED_MSI);
104 XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
105 XGE_DEFAULT_LATENCY_TIMER);
106 XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
107 XGE_DEFAULT_MAX_SPLITS_TRANS);
108 XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
109 XGE_DEFAULT_MMRB_COUNT);
110 XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
111 XGE_DEFAULT_SHARED_SPLITS);
112 XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
113 XGE_DEFAULT_ISR_POLLING_CNT);
114 XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
115 stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
117 XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
118 XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
119 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
120 XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
121 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
122 XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
123 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
124 XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
125 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
126 XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
127 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
128 mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
129 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
130 mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
132 XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
133 XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
134 XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
135 XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
136 XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
137 XGE_DEFAULT_FIFO_MAX_FRAGS);
139 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
140 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
141 XGE_DEFAULT_FIFO_QUEUE_INTR);
142 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
143 XGE_DEFAULT_FIFO_QUEUE_MAX);
144 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
145 qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
147 for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
148 dconfig->fifo.queue[qindex].tti[tindex].enabled = 1;
149 dconfig->fifo.queue[qindex].configured = 1;
151 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
152 urange_a, qindex, tindex,
153 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
154 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
155 urange_b, qindex, tindex,
156 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
157 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
158 urange_c, qindex, tindex,
159 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
160 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
161 ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
162 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
163 ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
164 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
165 ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
166 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
167 ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
168 XGE_GET_PARAM_FIFO_QUEUE_TTI(
169 "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
170 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
171 XGE_GET_PARAM_FIFO_QUEUE_TTI(
172 "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
173 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
174 XGE_GET_PARAM_FIFO_QUEUE_TTI(
175 "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
176 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
180 XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
181 XGE_DEFAULT_RING_MEMBLOCK_SIZE);
183 XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
184 XGE_DEFAULT_RING_STRIP_VLAN_TAG);
186 XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
187 XGE_DEFAULT_BUFFER_MODE);
188 if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
189 (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
190 xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
191 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
194 for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
195 dconfig->ring.queue[qindex].max_frm_len = XGE_HAL_RING_USE_MTU;
196 dconfig->ring.queue[qindex].priority = 0;
197 dconfig->ring.queue[qindex].configured = 1;
198 dconfig->ring.queue[qindex].buffer_mode =
199 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
200 XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
202 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
203 XGE_DEFAULT_RING_QUEUE_MAX);
204 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
205 qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
206 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
207 dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
208 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
209 indicate_max_pkts, qindex,
210 XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
211 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
212 backoff_interval_us, qindex,
213 XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
215 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
216 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
217 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
218 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
219 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
220 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
221 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
222 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
223 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
224 timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
225 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
226 timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
227 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
228 urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
229 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
230 urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
231 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
232 urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
235 if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
236 xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
237 xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
238 (int)(PAGE_SIZE / 32))
239 xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
240 dconfig->fifo.max_frags = (PAGE_SIZE / 32);
243 checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
244 if(checkdev != NULL) {
245 /* Check Revision for 0x12 */
246 revision = pci_read_config(checkdev,
247 xge_offsetof(xge_hal_pci_config_t, revision), 1);
248 if(revision <= 0x12) {
249 /* Set mmrb_count to 1k and max splits = 2 */
250 dconfig->mmrb_count = 1;
251 dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
257 * xge_buffer_sizes_set
258 * Set buffer sizes based on Rx buffer mode
260 * @lldev Per-adapter Data
261 * @buffer_mode Rx Buffer Mode
264 xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
267 int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
268 int buffer_size = mtu + frame_header;
270 xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
272 if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
273 lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
275 lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
277 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
278 lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
280 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
282 buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
283 while(buffer_size > MJUMPAGESIZE) {
284 lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
285 buffer_size -= MJUMPAGESIZE;
287 XGE_ALIGN_TO(buffer_size, 128);
288 lldev->rxd_mbuf_len[index] = buffer_size;
289 lldev->rxd_mbuf_cnt = index + 1;
292 for(index = 0; index < buffer_mode; index++)
293 xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
294 lldev->rxd_mbuf_len[index]);
298 * xge_buffer_mode_init
299 * Init Rx buffer mode
301 * @lldev Per-adapter Data
305 xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
307 int index = 0, buffer_size = 0;
308 xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
310 buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
312 if(lldev->enabled_lro)
313 (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
315 (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
317 lldev->rxd_mbuf_cnt = lldev->buffer_mode;
318 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
319 XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
320 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
323 XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
324 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
326 xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
328 xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
329 ((lldev->enabled_tso) ? "Enabled":"Disabled"));
330 xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
331 ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
332 xge_os_printf("%s: Rx %d Buffer Mode Enabled",
333 device_get_nameunit(lldev->device), lldev->buffer_mode);
337 * xge_driver_initialize
338 * Initializes HAL driver (common for all devices)
341 * XGE_HAL_OK if success
342 * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
345 xge_driver_initialize(void)
347 xge_hal_uld_cbs_t uld_callbacks;
348 xge_hal_driver_config_t driver_config;
349 xge_hal_status_e status = XGE_HAL_OK;
351 /* Initialize HAL driver */
352 if(!hal_driver_init_count) {
353 xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
354 xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
357 * Initial and maximum size of the queue used to store the events
358 * like Link up/down (xge_hal_event_e)
360 driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
361 driver_config.queue_size_max = XGE_HAL_MAX_QUEUE_SIZE_MAX;
363 uld_callbacks.link_up = xge_callback_link_up;
364 uld_callbacks.link_down = xge_callback_link_down;
365 uld_callbacks.crit_err = xge_callback_crit_err;
366 uld_callbacks.event = xge_callback_event;
368 status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
369 if(status != XGE_HAL_OK) {
370 XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
374 hal_driver_init_count = hal_driver_init_count + 1;
376 xge_hal_driver_debug_module_mask_set(0xffffffff);
377 xge_hal_driver_debug_level_set(XGE_TRACE);
385 * Initializes, adds and sets media
387 * @devc Device Handle
390 xge_media_init(device_t devc)
392 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
394 /* Initialize Media */
395 ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
398 /* Add supported media */
399 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
400 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
401 ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO, 0, NULL);
402 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
403 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
406 ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
411 * Save PCI configuration space
416 xge_pci_space_save(device_t dev)
418 struct pci_devinfo *dinfo = NULL;
420 dinfo = device_get_ivars(dev);
421 xge_trace(XGE_TRACE, "Saving PCI configuration space");
422 pci_cfg_save(dev, dinfo, 0);
426 * xge_pci_space_restore
427 * Restore saved PCI configuration space
432 xge_pci_space_restore(device_t dev)
434 struct pci_devinfo *dinfo = NULL;
436 dinfo = device_get_ivars(dev);
437 xge_trace(XGE_TRACE, "Restoring PCI configuration space");
438 pci_cfg_restore(dev, dinfo);
445 * @lldev Per-adapter Data
448 xge_msi_info_save(xge_lldev_t * lldev)
450 xge_os_pci_read16(lldev->pdev, NULL,
451 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
452 &lldev->msi_info.msi_control);
453 xge_os_pci_read32(lldev->pdev, NULL,
454 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
455 &lldev->msi_info.msi_lower_address);
456 xge_os_pci_read32(lldev->pdev, NULL,
457 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
458 &lldev->msi_info.msi_higher_address);
459 xge_os_pci_read16(lldev->pdev, NULL,
460 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
461 &lldev->msi_info.msi_data);
465 * xge_msi_info_restore
466 * Restore saved MSI info
471 xge_msi_info_restore(xge_lldev_t *lldev)
474 * If interface is made down and up, traffic fails. It was observed that
475 * MSI information were getting reset on down. Restoring them.
477 xge_os_pci_write16(lldev->pdev, NULL,
478 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
479 lldev->msi_info.msi_control);
481 xge_os_pci_write32(lldev->pdev, NULL,
482 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
483 lldev->msi_info.msi_lower_address);
485 xge_os_pci_write32(lldev->pdev, NULL,
486 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
487 lldev->msi_info.msi_higher_address);
489 xge_os_pci_write16(lldev->pdev, NULL,
490 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
491 lldev->msi_info.msi_data);
496 * Initializes mutexes used in driver
498 * @lldev Per-adapter Data
501 xge_mutex_init(xge_lldev_t *lldev)
505 sprintf(lldev->mtx_name_drv, "%s_drv",
506 device_get_nameunit(lldev->device));
507 mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
510 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
511 sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
512 device_get_nameunit(lldev->device), qindex);
513 mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
520 * Destroys mutexes used in driver
522 * @lldev Per-adapter Data
525 xge_mutex_destroy(xge_lldev_t *lldev)
529 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
530 mtx_destroy(&lldev->mtx_tx[qindex]);
531 mtx_destroy(&lldev->mtx_drv);
536 * Print device and driver information
538 * @lldev Per-adapter Data
541 xge_print_info(xge_lldev_t *lldev)
543 device_t dev = lldev->device;
544 xge_hal_device_t *hldev = lldev->devh;
545 xge_hal_status_e status = XGE_HAL_OK;
547 const char *xge_pci_bus_speeds[17] = {
550 "PCIX(M1) 66MHz Bus",
551 "PCIX(M1) 100MHz Bus",
552 "PCIX(M1) 133MHz Bus",
553 "PCIX(M2) 133MHz Bus",
554 "PCIX(M2) 200MHz Bus",
555 "PCIX(M2) 266MHz Bus",
557 "PCIX(M1) 66MHz Bus (Not Supported)",
558 "PCIX(M1) 100MHz Bus (Not Supported)",
559 "PCIX(M1) 133MHz Bus (Not Supported)",
567 xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
568 device_get_nameunit(dev),
569 ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
570 hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
571 xge_os_printf("%s: Serial Number %s",
572 device_get_nameunit(dev), hldev->vpd_data.serial_num);
574 if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
575 status = xge_hal_mgmt_reg_read(hldev, 0,
576 xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
577 if(status != XGE_HAL_OK)
578 xge_trace(XGE_ERR, "Error for getting bus speed");
580 xge_os_printf("%s: Adapter is on %s bit %s",
581 device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
582 (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
585 xge_os_printf("%s: Using %s Interrupts",
586 device_get_nameunit(dev),
587 (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
591 * xge_create_dma_tags
592 * Creates DMA tags for both Tx and Rx
596 * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
599 xge_create_dma_tags(device_t dev)
601 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
602 xge_hal_status_e status = XGE_HAL_FAIL;
603 int mtu = (lldev->ifnetp)->if_mtu, maxsize;
606 status = bus_dma_tag_create(
607 bus_get_dma_tag(dev), /* Parent */
608 PAGE_SIZE, /* Alignment */
610 BUS_SPACE_MAXADDR, /* Low Address */
611 BUS_SPACE_MAXADDR, /* High Address */
612 NULL, /* Filter Function */
613 NULL, /* Filter Function Arguments */
614 MCLBYTES * XGE_MAX_SEGS, /* Maximum Size */
615 XGE_MAX_SEGS, /* Number of Segments */
616 MCLBYTES, /* Maximum Segment Size */
617 BUS_DMA_ALLOCNOW, /* Flags */
618 NULL, /* Lock Function */
619 NULL, /* Lock Function Arguments */
620 (&lldev->dma_tag_tx)); /* DMA Tag */
624 maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
625 if(maxsize <= MCLBYTES) {
629 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
630 maxsize = MJUMPAGESIZE;
632 maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
636 status = bus_dma_tag_create(
637 bus_get_dma_tag(dev), /* Parent */
638 PAGE_SIZE, /* Alignment */
640 BUS_SPACE_MAXADDR, /* Low Address */
641 BUS_SPACE_MAXADDR, /* High Address */
642 NULL, /* Filter Function */
643 NULL, /* Filter Function Arguments */
644 maxsize, /* Maximum Size */
645 1, /* Number of Segments */
646 maxsize, /* Maximum Segment Size */
647 BUS_DMA_ALLOCNOW, /* Flags */
648 NULL, /* Lock Function */
649 NULL, /* Lock Function Arguments */
650 (&lldev->dma_tag_rx)); /* DMA Tag */
654 status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
655 &lldev->extra_dma_map);
663 status = bus_dma_tag_destroy(lldev->dma_tag_rx);
665 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
667 status = bus_dma_tag_destroy(lldev->dma_tag_tx);
669 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
670 status = XGE_HAL_FAIL;
676 * xge_confirm_changes
677 * Disables and Enables interface to apply requested change
679 * @lldev Per-adapter Data
680 * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
682 * Returns 0 or Error Number
685 xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
687 if(lldev->initialized == 0) goto _exit1;
689 mtx_lock(&lldev->mtx_drv);
690 if_down(lldev->ifnetp);
691 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
693 if(option == XGE_SET_MTU)
694 (lldev->ifnetp)->if_mtu = lldev->mtu;
696 xge_buffer_mode_init(lldev, lldev->mtu);
698 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
699 if_up(lldev->ifnetp);
700 mtx_unlock(&lldev->mtx_drv);
704 /* Request was to change MTU and device not initialized */
705 if(option == XGE_SET_MTU) {
706 (lldev->ifnetp)->if_mtu = lldev->mtu;
707 xge_buffer_mode_init(lldev, lldev->mtu);
714 * xge_change_lro_status
715 * Enable/Disable LRO feature
717 * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
719 * Returns 0 or error number.
722 xge_change_lro_status(SYSCTL_HANDLER_ARGS)
724 xge_lldev_t *lldev = (xge_lldev_t *)arg1;
725 int request = lldev->enabled_lro, status = XGE_HAL_OK;
727 status = sysctl_handle_int(oidp, &request, arg2, req);
728 if((status != XGE_HAL_OK) || (!req->newptr))
731 if((request < 0) || (request > 1)) {
736 /* Return if current and requested states are same */
737 if(request == lldev->enabled_lro){
738 xge_trace(XGE_ERR, "LRO is already %s",
739 ((request) ? "enabled" : "disabled"));
743 lldev->enabled_lro = request;
744 xge_confirm_changes(lldev, XGE_CHANGE_LRO);
745 arg2 = lldev->enabled_lro;
752 * xge_add_sysctl_handlers
753 * Registers sysctl parameter value update handlers
755 * @lldev Per-adapter data
758 xge_add_sysctl_handlers(xge_lldev_t *lldev)
760 struct sysctl_ctx_list *context_list =
761 device_get_sysctl_ctx(lldev->device);
762 struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
764 SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
765 "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
766 xge_change_lro_status, "I", "Enable or disable LRO feature");
771 * Connects driver to the system if probe was success
776 xge_attach(device_t dev)
778 xge_hal_device_config_t *device_config;
779 xge_hal_device_attr_t attr;
781 xge_hal_device_t *hldev;
782 xge_pci_info_t *pci_info;
783 struct ifnet *ifnetp;
784 int rid, rid0, rid1, error;
785 int msi_count = 0, status = XGE_HAL_OK;
786 int enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
788 device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
790 XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
791 attach_out_config, ENOMEM);
794 lldev = (xge_lldev_t *) device_get_softc(dev);
796 XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
800 xge_mutex_init(lldev);
802 error = xge_driver_initialize();
803 if(error != XGE_HAL_OK) {
804 xge_resources_free(dev, xge_free_mutex);
805 XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
810 (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
812 xge_resources_free(dev, xge_free_terminate_hal_driver);
813 XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
818 /* Our private structure */
820 (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
822 xge_resources_free(dev, xge_free_hal_device);
823 XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
826 lldev->pdev = pci_info;
827 pci_info->device = dev;
830 pci_enable_busmaster(dev);
832 /* Get virtual address for BAR0 */
834 pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
836 if(pci_info->regmap0 == NULL) {
837 xge_resources_free(dev, xge_free_pci_info);
838 XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
841 attr.bar0 = (char *)pci_info->regmap0;
843 pci_info->bar0resource = (xge_bus_resource_t*)
844 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
845 if(pci_info->bar0resource == NULL) {
846 xge_resources_free(dev, xge_free_bar0);
847 XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
850 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
851 rman_get_bustag(pci_info->regmap0);
852 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
853 rman_get_bushandle(pci_info->regmap0);
854 ((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
857 /* Get virtual address for BAR1 */
859 pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
861 if(pci_info->regmap1 == NULL) {
862 xge_resources_free(dev, xge_free_bar0_resource);
863 XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
866 attr.bar1 = (char *)pci_info->regmap1;
868 pci_info->bar1resource = (xge_bus_resource_t*)
869 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
870 if(pci_info->bar1resource == NULL) {
871 xge_resources_free(dev, xge_free_bar1);
872 XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
875 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
876 rman_get_bustag(pci_info->regmap1);
877 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
878 rman_get_bushandle(pci_info->regmap1);
879 ((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
882 /* Save PCI config space */
883 xge_pci_space_save(dev);
885 attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
886 attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
887 attr.irqh = lldev->irqhandle;
888 attr.cfgh = pci_info;
889 attr.pdev = pci_info;
891 /* Initialize device configuration parameters */
892 xge_init_params(device_config, dev);
895 if(lldev->enabled_msi) {
896 /* Number of MSI messages supported by device */
897 msi_count = pci_msi_count(dev);
899 /* Device supports MSI */
901 xge_trace(XGE_ERR, "MSI count: %d", msi_count);
902 xge_trace(XGE_ERR, "Now, driver supporting 1 message");
905 error = pci_alloc_msi(dev, &msi_count);
908 xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
909 enable_msi = XGE_HAL_INTR_MODE_MSI;
914 xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
918 lldev->enabled_msi = enable_msi;
920 /* Allocate resource for irq */
921 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
922 (RF_SHAREABLE | RF_ACTIVE));
923 if(lldev->irq == NULL) {
924 xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
925 ((rid == 0) ? "line interrupt" : "MSI"));
927 error = pci_release_msi(dev);
929 xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
931 xge_trace(XGE_ERR, "Requires reboot to use MSI again");
933 xge_trace(XGE_ERR, "Trying line interrupts");
935 lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
936 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
937 (RF_SHAREABLE | RF_ACTIVE));
939 if(lldev->irq == NULL) {
940 xge_trace(XGE_ERR, "Allocating irq resource failed");
941 xge_resources_free(dev, xge_free_bar1_resource);
947 device_config->intr_mode = lldev->enabled_msi;
949 xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
950 lldev->enabled_msi, msi_count);
953 /* Initialize HAL device */
954 error = xge_hal_device_initialize(hldev, &attr, device_config);
955 if(error != XGE_HAL_OK) {
956 xge_resources_free(dev, xge_free_irq_resource);
957 XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
961 xge_hal_device_private_set(hldev, lldev);
963 error = xge_interface_setup(dev);
969 ifnetp = lldev->ifnetp;
970 ifnetp->if_mtu = device_config->mtu;
974 /* Associate interrupt handler with the device */
975 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
976 error = bus_setup_intr(dev, lldev->irq,
977 (INTR_TYPE_NET | INTR_MPSAFE),
978 #if __FreeBSD_version > 700030
981 xge_isr_msi, lldev, &lldev->irqhandle);
982 xge_msi_info_save(lldev);
985 error = bus_setup_intr(dev, lldev->irq,
986 (INTR_TYPE_NET | INTR_MPSAFE),
987 #if __FreeBSD_version > 700030
990 xge_isr_line, lldev, &lldev->irqhandle);
993 xge_resources_free(dev, xge_free_media_interface);
994 XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
998 xge_print_info(lldev);
1000 xge_add_sysctl_handlers(lldev);
1002 xge_buffer_mode_init(lldev, device_config->mtu);
1005 xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1007 gone_in_dev(dev, 12, "nxge(4) driver");
1012 * xge_resources_free
1013 * Undo what-all we did during load/attach
1015 * @dev Device Handle
1016 * @error Identifies what-all to undo
1019 xge_resources_free(device_t dev, xge_lables_e error)
1022 xge_pci_info_t *pci_info;
1023 xge_hal_device_t *hldev;
1027 lldev = (xge_lldev_t *) device_get_softc(dev);
1028 pci_info = lldev->pdev;
1031 hldev = lldev->devh;
1035 /* Teardown interrupt handler - device association */
1036 bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1038 case xge_free_media_interface:
1040 ifmedia_removeall(&lldev->media);
1043 ether_ifdetach(lldev->ifnetp);
1044 if_free(lldev->ifnetp);
1046 xge_hal_device_private_set(hldev, NULL);
1047 xge_hal_device_disable(hldev);
1049 case xge_free_terminate_hal_device:
1051 xge_hal_device_terminate(hldev);
1053 case xge_free_irq_resource:
1054 /* Release IRQ resource */
1055 bus_release_resource(dev, SYS_RES_IRQ,
1056 ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1059 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1060 status = pci_release_msi(dev);
1064 "pci_release_msi returned %d", status);
1069 case xge_free_bar1_resource:
1070 /* Restore PCI configuration space */
1071 xge_pci_space_restore(dev);
1073 /* Free bar1resource */
1074 xge_os_free(NULL, pci_info->bar1resource,
1075 sizeof(xge_bus_resource_t));
1080 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1083 case xge_free_bar0_resource:
1084 /* Free bar0resource */
1085 xge_os_free(NULL, pci_info->bar0resource,
1086 sizeof(xge_bus_resource_t));
1091 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1094 case xge_free_pci_info:
1095 /* Disable Bus Master */
1096 pci_disable_busmaster(dev);
1098 /* Free pci_info_t */
1100 xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1102 case xge_free_hal_device:
1103 /* Free device configuration struct and HAL device */
1104 xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1106 case xge_free_terminate_hal_driver:
1107 /* Terminate HAL driver */
1108 hal_driver_init_count = hal_driver_init_count - 1;
1109 if(!hal_driver_init_count) {
1110 xge_hal_driver_terminate();
1113 case xge_free_mutex:
1114 xge_mutex_destroy(lldev);
1120 * Detaches driver from the Kernel subsystem
1122 * @dev Device Handle
1125 xge_detach(device_t dev)
1127 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1129 if(lldev->in_detach == 0) {
1130 lldev->in_detach = 1;
1132 xge_resources_free(dev, xge_free_all);
1140 * To shutdown device before system shutdown
1142 * @dev Device Handle
1145 xge_shutdown(device_t dev)
1147 xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1154 * xge_interface_setup
1157 * @dev Device Handle
1159 * Returns 0 on success, ENXIO/ENOMEM on failure
1162 xge_interface_setup(device_t dev)
1164 u8 mcaddr[ETHER_ADDR_LEN];
1165 xge_hal_status_e status;
1166 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1167 struct ifnet *ifnetp;
1168 xge_hal_device_t *hldev = lldev->devh;
1170 /* Get the MAC address of the device */
1171 status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1172 if(status != XGE_HAL_OK) {
1173 xge_resources_free(dev, xge_free_terminate_hal_device);
1174 XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1177 /* Get interface ifnet structure for this Ether device */
1178 ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1179 if(ifnetp == NULL) {
1180 xge_resources_free(dev, xge_free_terminate_hal_device);
1181 XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1184 /* Initialize interface ifnet structure */
1185 if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1186 ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU;
1187 ifnetp->if_baudrate = XGE_BAUDRATE;
1188 ifnetp->if_init = xge_init;
1189 ifnetp->if_softc = lldev;
1190 ifnetp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1191 ifnetp->if_ioctl = xge_ioctl;
1192 ifnetp->if_start = xge_send;
1194 /* TODO: Check and assign optimal value */
1195 ifnetp->if_snd.ifq_maxlen = ifqmaxlen;
1197 ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1199 if(lldev->enabled_tso)
1200 ifnetp->if_capabilities |= IFCAP_TSO4;
1201 if(lldev->enabled_lro)
1202 ifnetp->if_capabilities |= IFCAP_LRO;
1204 ifnetp->if_capenable = ifnetp->if_capabilities;
1206 /* Attach the interface */
1207 ether_ifattach(ifnetp, mcaddr);
1214 * xge_callback_link_up
1215 * Callback for Link-up indication from HAL
1217 * @userdata Per-adapter data
1220 xge_callback_link_up(void *userdata)
1222 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1223 struct ifnet *ifnetp = lldev->ifnetp;
1225 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1226 if_link_state_change(ifnetp, LINK_STATE_UP);
1230 * xge_callback_link_down
1231 * Callback for Link-down indication from HAL
1233 * @userdata Per-adapter data
1236 xge_callback_link_down(void *userdata)
1238 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1239 struct ifnet *ifnetp = lldev->ifnetp;
1241 ifnetp->if_flags |= IFF_DRV_OACTIVE;
1242 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1246 * xge_callback_crit_err
1247 * Callback for Critical error indication from HAL
1249 * @userdata Per-adapter data
1250 * @type Event type (Enumerated hardware error)
1251 * @serr_data Hardware status
1254 xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1256 xge_trace(XGE_ERR, "Critical Error");
1257 xge_reset(userdata);
1261 * xge_callback_event
1262 * Callback from HAL indicating that some event has been queued
1264 * @item Queued event item
1267 xge_callback_event(xge_queue_item_t *item)
1269 xge_lldev_t *lldev = NULL;
1270 xge_hal_device_t *hldev = NULL;
1271 struct ifnet *ifnetp = NULL;
1273 hldev = item->context;
1274 lldev = xge_hal_device_private(hldev);
1275 ifnetp = lldev->ifnetp;
1277 switch((int)item->event_type) {
1278 case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1279 if(lldev->initialized) {
1280 if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1281 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1284 xge_queue_produce_context(
1285 xge_hal_device_queue(lldev->devh),
1286 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1291 case XGE_LL_EVENT_DEVICE_RESETTING:
1292 xge_reset(item->context);
1301 * xge_ifmedia_change
1302 * Media change driver callback
1304 * @ifnetp Interface Handle
1306 * Returns 0 if media is Ether else EINVAL
1309 xge_ifmedia_change(struct ifnet *ifnetp)
1311 xge_lldev_t *lldev = ifnetp->if_softc;
1312 struct ifmedia *ifmediap = &lldev->media;
1314 return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ? EINVAL:0;
1318 * xge_ifmedia_status
1319 * Media status driver callback
1321 * @ifnetp Interface Handle
1322 * @ifmr Interface Media Settings
1325 xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1327 xge_hal_status_e status;
1329 xge_lldev_t *lldev = ifnetp->if_softc;
1330 xge_hal_device_t *hldev = lldev->devh;
1332 ifmr->ifm_status = IFM_AVALID;
1333 ifmr->ifm_active = IFM_ETHER;
1335 status = xge_hal_mgmt_reg_read(hldev, 0,
1336 xge_offsetof(xge_hal_pci_bar0_t, adapter_status), ®value);
1337 if(status != XGE_HAL_OK) {
1338 xge_trace(XGE_TRACE, "Getting adapter status failed");
1342 if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1343 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1344 ifmr->ifm_status |= IFM_ACTIVE;
1345 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1346 if_link_state_change(ifnetp, LINK_STATE_UP);
1349 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1357 * IOCTL to get statistics
1359 * @lldev Per-adapter data
1360 * @ifreqp Interface request
1363 xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1365 xge_hal_status_e status = XGE_HAL_OK;
1370 cmd = retValue = fubyte(ifr_data_get_ptr(ifreqp));
1376 case XGE_QUERY_STATS:
1377 mtx_lock(&lldev->mtx_drv);
1378 status = xge_hal_stats_hw(lldev->devh,
1379 (xge_hal_stats_hw_info_t **)&info);
1380 mtx_unlock(&lldev->mtx_drv);
1381 if(status == XGE_HAL_OK) {
1382 if(copyout(info, ifr_data_get_ptr(ifreqp),
1383 sizeof(xge_hal_stats_hw_info_t)) == 0)
1387 xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1392 case XGE_QUERY_PCICONF:
1393 info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1395 mtx_lock(&lldev->mtx_drv);
1396 status = xge_hal_mgmt_pci_config(lldev->devh, info,
1397 sizeof(xge_hal_pci_config_t));
1398 mtx_unlock(&lldev->mtx_drv);
1399 if(status == XGE_HAL_OK) {
1400 if(copyout(info, ifr_data_get_ptr(ifreqp),
1401 sizeof(xge_hal_pci_config_t)) == 0)
1406 "Getting PCI configuration failed (%d)", status);
1408 xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1412 case XGE_QUERY_DEVSTATS:
1413 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1415 mtx_lock(&lldev->mtx_drv);
1416 status =xge_hal_mgmt_device_stats(lldev->devh, info,
1417 sizeof(xge_hal_stats_device_info_t));
1418 mtx_unlock(&lldev->mtx_drv);
1419 if(status == XGE_HAL_OK) {
1420 if(copyout(info, ifr_data_get_ptr(ifreqp),
1421 sizeof(xge_hal_stats_device_info_t)) == 0)
1425 xge_trace(XGE_ERR, "Getting device info failed (%d)",
1428 xge_os_free(NULL, info,
1429 sizeof(xge_hal_stats_device_info_t));
1433 case XGE_QUERY_SWSTATS:
1434 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1436 mtx_lock(&lldev->mtx_drv);
1437 status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1438 sizeof(xge_hal_stats_sw_err_t));
1439 mtx_unlock(&lldev->mtx_drv);
1440 if(status == XGE_HAL_OK) {
1441 if(copyout(info, ifr_data_get_ptr(ifreqp),
1442 sizeof(xge_hal_stats_sw_err_t)) == 0)
1447 "Getting tcode statistics failed (%d)", status);
1449 xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1453 case XGE_QUERY_DRIVERSTATS:
1454 if(copyout(&lldev->driver_stats, ifr_data_get_ptr(ifreqp),
1455 sizeof(xge_driver_stats_t)) == 0) {
1460 "Copyout of driver statistics failed (%d)", status);
1464 case XGE_READ_VERSION:
1465 info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1467 strcpy(info, XGE_DRIVER_VERSION);
1468 if(copyout(info, ifr_data_get_ptr(ifreqp),
1469 XGE_BUFFER_SIZE) == 0)
1471 xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1475 case XGE_QUERY_DEVCONF:
1476 info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1478 mtx_lock(&lldev->mtx_drv);
1479 status = xge_hal_mgmt_device_config(lldev->devh, info,
1480 sizeof(xge_hal_device_config_t));
1481 mtx_unlock(&lldev->mtx_drv);
1482 if(status == XGE_HAL_OK) {
1483 if(copyout(info, ifr_data_get_ptr(ifreqp),
1484 sizeof(xge_hal_device_config_t)) == 0)
1488 xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1491 xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1495 case XGE_QUERY_BUFFER_MODE:
1496 if(copyout(&lldev->buffer_mode, ifr_data_get_ptr(ifreqp),
1501 case XGE_SET_BUFFER_MODE_1:
1502 case XGE_SET_BUFFER_MODE_2:
1503 case XGE_SET_BUFFER_MODE_5:
1504 mode = (cmd == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1505 if(copyout(&mode, ifr_data_get_ptr(ifreqp), sizeof(mode)) == 0)
1509 xge_trace(XGE_TRACE, "Nothing is matching");
1517 * xge_ioctl_registers
1518 * IOCTL to get registers
1520 * @lldev Per-adapter data
1521 * @ifreqp Interface request
1524 xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1526 xge_register_t tmpdata;
1527 xge_register_t *data;
1528 xge_hal_status_e status = XGE_HAL_OK;
1529 int retValue = EINVAL, offset = 0, index = 0;
1533 error = copyin(ifr_data_get_ptr(ifreqp), &tmpdata, sizeof(tmpdata));
1538 /* Reading a register */
1539 if(strcmp(data->option, "-r") == 0) {
1540 data->value = 0x0000;
1541 mtx_lock(&lldev->mtx_drv);
1542 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1544 mtx_unlock(&lldev->mtx_drv);
1545 if(status == XGE_HAL_OK) {
1546 if(copyout(data, ifr_data_get_ptr(ifreqp),
1547 sizeof(xge_register_t)) == 0)
1551 /* Writing to a register */
1552 else if(strcmp(data->option, "-w") == 0) {
1553 mtx_lock(&lldev->mtx_drv);
1554 status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1556 if(status == XGE_HAL_OK) {
1558 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1560 if(status != XGE_HAL_OK) {
1561 xge_trace(XGE_ERR, "Reading back updated register failed");
1564 if(val64 != data->value) {
1566 "Read and written register values mismatched");
1572 xge_trace(XGE_ERR, "Getting register value failed");
1574 mtx_unlock(&lldev->mtx_drv);
1577 mtx_lock(&lldev->mtx_drv);
1578 for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1579 index++, offset += 0x0008) {
1581 status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1582 if(status != XGE_HAL_OK) {
1583 xge_trace(XGE_ERR, "Getting register value failed");
1586 *((u64 *)((u64 *)data + index)) = val64;
1589 mtx_unlock(&lldev->mtx_drv);
1592 if(copyout(data, ifr_data_get_ptr(ifreqp),
1593 sizeof(xge_hal_pci_bar0_t)) != 0) {
1594 xge_trace(XGE_ERR, "Copyout of register values failed");
1599 xge_trace(XGE_ERR, "Getting register values failed");
1607 * Callback to control the device - Interface configuration
1609 * @ifnetp Interface Handle
1610 * @command Device control command
1611 * @data Parameters associated with command (if any)
1614 xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1616 struct ifreq *ifreqp = (struct ifreq *)data;
1617 xge_lldev_t *lldev = ifnetp->if_softc;
1618 struct ifmedia *ifmediap = &lldev->media;
1619 int retValue = 0, mask = 0;
1621 if(lldev->in_detach) {
1626 /* Set/Get ifnet address */
1629 ether_ioctl(ifnetp, command, data);
1634 retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1637 /* Set ifnet flags */
1639 if(ifnetp->if_flags & IFF_UP) {
1640 /* Link status is UP */
1641 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1644 xge_disable_promisc(lldev);
1645 xge_enable_promisc(lldev);
1648 /* Link status is DOWN */
1649 /* If device is in running, make it down */
1650 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1656 /* Add/delete multicast address */
1659 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1660 xge_setmulti(lldev);
1664 /* Set/Get net media */
1667 retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1670 /* Set capabilities */
1672 mtx_lock(&lldev->mtx_drv);
1673 mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1674 if(mask & IFCAP_TXCSUM) {
1675 if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1676 ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1677 ifnetp->if_hwassist &=
1678 ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1681 ifnetp->if_capenable |= IFCAP_TXCSUM;
1682 ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1685 if(mask & IFCAP_TSO4) {
1686 if(ifnetp->if_capenable & IFCAP_TSO4) {
1687 ifnetp->if_capenable &= ~IFCAP_TSO4;
1688 ifnetp->if_hwassist &= ~CSUM_TSO;
1690 xge_os_printf("%s: TSO Disabled",
1691 device_get_nameunit(lldev->device));
1693 else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1694 ifnetp->if_capenable |= IFCAP_TSO4;
1695 ifnetp->if_hwassist |= CSUM_TSO;
1697 xge_os_printf("%s: TSO Enabled",
1698 device_get_nameunit(lldev->device));
1702 mtx_unlock(&lldev->mtx_drv);
1705 /* Custom IOCTL 0 */
1706 case SIOCGPRIVATE_0:
1707 retValue = xge_ioctl_stats(lldev, ifreqp);
1710 /* Custom IOCTL 1 */
1711 case SIOCGPRIVATE_1:
1712 retValue = xge_ioctl_registers(lldev, ifreqp);
1724 * Initialize the interface
1726 * @plldev Per-adapter Data
1729 xge_init(void *plldev)
1731 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1733 mtx_lock(&lldev->mtx_drv);
1734 xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1735 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1736 mtx_unlock(&lldev->mtx_drv);
1741 * Initialize the interface (called by holding lock)
1743 * @pdevin Per-adapter Data
1746 xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1748 struct ifnet *ifnetp = lldev->ifnetp;
1749 xge_hal_device_t *hldev = lldev->devh;
1750 struct ifaddr *ifaddrp;
1751 unsigned char *macaddr;
1752 struct sockaddr_dl *sockaddrp;
1753 int status = XGE_HAL_OK;
1755 mtx_assert((&lldev->mtx_drv), MA_OWNED);
1757 /* If device is in running state, initializing is not required */
1758 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1761 /* Initializing timer */
1762 callout_init(&lldev->timer, 1);
1764 xge_trace(XGE_TRACE, "Set MTU size");
1765 status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1766 if(status != XGE_HAL_OK) {
1767 xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1771 /* Enable HAL device */
1772 xge_hal_device_enable(hldev);
1774 /* Get MAC address and update in HAL */
1775 ifaddrp = ifnetp->if_addr;
1776 sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1777 sockaddrp->sdl_type = IFT_ETHER;
1778 sockaddrp->sdl_alen = ifnetp->if_addrlen;
1779 macaddr = LLADDR(sockaddrp);
1780 xge_trace(XGE_TRACE,
1781 "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1782 *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1783 *(macaddr + 4), *(macaddr + 5));
1784 status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1785 if(status != XGE_HAL_OK)
1786 xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1788 /* Opening channels */
1789 mtx_unlock(&lldev->mtx_drv);
1790 status = xge_channel_open(lldev, option);
1791 mtx_lock(&lldev->mtx_drv);
1792 if(status != XGE_HAL_OK)
1795 /* Set appropriate flags */
1796 ifnetp->if_drv_flags |= IFF_DRV_RUNNING;
1797 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1799 /* Checksum capability */
1800 ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1801 (CSUM_TCP | CSUM_UDP) : 0;
1803 if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1804 ifnetp->if_hwassist |= CSUM_TSO;
1806 /* Enable interrupts */
1807 xge_hal_device_intr_enable(hldev);
1809 callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1811 /* Disable promiscuous mode */
1812 xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1813 xge_enable_promisc(lldev);
1815 /* Device is initialized */
1816 lldev->initialized = 1;
1817 xge_os_mdelay(1000);
1825 * Timer timeout function to handle link status
1827 * @devp Per-adapter Data
1830 xge_timer(void *devp)
1832 xge_lldev_t *lldev = (xge_lldev_t *)devp;
1833 xge_hal_device_t *hldev = lldev->devh;
1835 /* Poll for changes */
1836 xge_hal_device_poll(hldev);
1839 callout_reset(&lldev->timer, hz, xge_timer, lldev);
1846 * De-activate the interface
1848 * @lldev Per-adater Data
1851 xge_stop(xge_lldev_t *lldev)
1853 mtx_lock(&lldev->mtx_drv);
1854 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1855 mtx_unlock(&lldev->mtx_drv);
1860 * ISR filter function - to filter interrupts from other devices (shared)
1862 * @handle Per-adapter Data
1865 * FILTER_STRAY if interrupt is from other device
1866 * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1869 xge_isr_filter(void *handle)
1871 xge_lldev_t *lldev = (xge_lldev_t *)handle;
1872 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1873 u16 retValue = FILTER_STRAY;
1876 XGE_DRV_STATS(isr_filter);
1878 val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1879 &bar0->general_int_status);
1880 retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1887 * Interrupt service routine for Line interrupts
1889 * @plldev Per-adapter Data
1892 xge_isr_line(void *plldev)
1894 xge_hal_status_e status;
1895 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1896 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1897 struct ifnet *ifnetp = lldev->ifnetp;
1899 XGE_DRV_STATS(isr_line);
1901 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1902 status = xge_hal_device_handle_irq(hldev);
1903 if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1910 * ISR for Message signaled interrupts
1913 xge_isr_msi(void *plldev)
1915 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1916 XGE_DRV_STATS(isr_msi);
1917 xge_hal_device_continue_irq(lldev->devh);
1922 * Initiate and open all Rx channels
1925 * @lldev Per-adapter Data
1926 * @rflag Channel open/close/reopen flag
1928 * Returns 0 or Error Number
1931 xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1933 u64 adapter_status = 0x0;
1934 xge_hal_status_e status = XGE_HAL_FAIL;
1936 xge_hal_channel_attr_t attr = {
1939 .callback = xge_rx_compl,
1940 .per_dtr_space = sizeof(xge_rx_priv_t),
1942 .type = XGE_HAL_CHANNEL_TYPE_RING,
1944 .dtr_init = xge_rx_initial_replenish,
1945 .dtr_term = xge_rx_term
1948 /* If device is not ready, return */
1949 status = xge_hal_device_status(lldev->devh, &adapter_status);
1950 if(status != XGE_HAL_OK) {
1951 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1952 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1955 status = xge_hal_channel_open(lldev->devh, &attr,
1956 &lldev->ring_channel[qid], rflag);
1965 * Initialize and open all Tx channels
1967 * @lldev Per-adapter Data
1968 * @tflag Channel open/close/reopen flag
1970 * Returns 0 or Error Number
1973 xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1975 xge_hal_status_e status = XGE_HAL_FAIL;
1976 u64 adapter_status = 0x0;
1979 xge_hal_channel_attr_t attr = {
1981 .callback = xge_tx_compl,
1982 .per_dtr_space = sizeof(xge_tx_priv_t),
1984 .type = XGE_HAL_CHANNEL_TYPE_FIFO,
1986 .dtr_init = xge_tx_initial_replenish,
1987 .dtr_term = xge_tx_term
1990 /* If device is not ready, return */
1991 status = xge_hal_device_status(lldev->devh, &adapter_status);
1992 if(status != XGE_HAL_OK) {
1993 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1994 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1997 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1998 attr.post_qid = qindex,
1999 status = xge_hal_channel_open(lldev->devh, &attr,
2000 &lldev->fifo_channel[qindex], tflag);
2001 if(status != XGE_HAL_OK) {
2002 for(index = 0; index < qindex; index++)
2003 xge_hal_channel_close(lldev->fifo_channel[index], tflag);
2015 * @lldev Per-adapter Data
2018 xge_enable_msi(xge_lldev_t *lldev)
2020 xge_list_t *item = NULL;
2021 xge_hal_device_t *hldev = lldev->devh;
2022 xge_hal_channel_t *channel = NULL;
2023 u16 offset = 0, val16 = 0;
2025 xge_os_pci_read16(lldev->pdev, NULL,
2026 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2028 /* Update msi_data */
2029 offset = (val16 & 0x80) ? 0x4c : 0x48;
2030 xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2035 xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2037 /* Update msi_control */
2038 xge_os_pci_read16(lldev->pdev, NULL,
2039 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2041 xge_os_pci_write16(lldev->pdev, NULL,
2042 xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2044 /* Set TxMAT and RxMAT registers with MSI */
2045 xge_list_for_each(item, &hldev->free_channels) {
2046 channel = xge_container_of(item, xge_hal_channel_t, item);
2047 xge_hal_channel_msi_set(channel, 1, (u32)val16);
2053 * Open both Tx and Rx channels
2055 * @lldev Per-adapter Data
2056 * @option Channel reopen option
2059 xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2061 xge_lro_entry_t *lro_session = NULL;
2062 xge_hal_status_e status = XGE_HAL_OK;
2063 int index = 0, index2 = 0;
2065 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2066 xge_msi_info_restore(lldev);
2067 xge_enable_msi(lldev);
2071 status = xge_create_dma_tags(lldev->device);
2072 if(status != XGE_HAL_OK)
2073 XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2075 /* Open ring (Rx) channel */
2076 for(index = 0; index < XGE_RING_COUNT; index++) {
2077 status = xge_rx_open(index, lldev, option);
2078 if(status != XGE_HAL_OK) {
2080 * DMA mapping fails in the unpatched Kernel which can't
2081 * allocate contiguous memory for Jumbo frames.
2082 * Try using 5 buffer mode.
2084 if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2085 (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2087 /* Close so far opened channels */
2088 for(index2 = 0; index2 < index; index2++) {
2089 xge_hal_channel_close(lldev->ring_channel[index2],
2093 /* Destroy DMA tags intended to use for 1 buffer mode */
2094 if(bus_dmamap_destroy(lldev->dma_tag_rx,
2095 lldev->extra_dma_map)) {
2096 xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2098 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2099 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2100 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2101 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2103 /* Switch to 5 buffer mode */
2104 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2105 xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2111 XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2117 if(lldev->enabled_lro) {
2118 SLIST_INIT(&lldev->lro_free);
2119 SLIST_INIT(&lldev->lro_active);
2120 lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2122 for(index = 0; index < lldev->lro_num; index++) {
2123 lro_session = (xge_lro_entry_t *)
2124 xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2125 if(lro_session == NULL) {
2126 lldev->lro_num = index;
2129 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2133 /* Open FIFO (Tx) channel */
2134 status = xge_tx_open(lldev, option);
2135 if(status != XGE_HAL_OK)
2136 XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2142 * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2143 * Initialization of LRO failed (index is XGE_RING_COUNT)
2144 * Opening Tx channel failed (index is XGE_RING_COUNT)
2146 for(index2 = 0; index2 < index; index2++)
2147 xge_hal_channel_close(lldev->ring_channel[index2], option);
2155 * Close both Tx and Rx channels
2157 * @lldev Per-adapter Data
2158 * @option Channel reopen option
2162 xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2168 /* Close FIFO (Tx) channel */
2169 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2170 xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2172 /* Close Ring (Rx) channels */
2173 for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2174 xge_hal_channel_close(lldev->ring_channel[qindex], option);
2176 if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2177 xge_trace(XGE_ERR, "Rx extra map destroy failed");
2178 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2179 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2180 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2181 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2188 * @arg Parameter passed from dmamap
2190 * @nseg Number of segments
2194 dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2197 *(bus_addr_t *) arg = segs->ds_addr;
2205 * @lldev Per-adapter Data
2208 xge_reset(xge_lldev_t *lldev)
2210 xge_trace(XGE_TRACE, "Reseting the chip");
2212 /* If the device is not initialized, return */
2213 if(lldev->initialized) {
2214 mtx_lock(&lldev->mtx_drv);
2215 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2216 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2217 mtx_unlock(&lldev->mtx_drv);
2225 * Set an address as a multicast address
2227 * @lldev Per-adapter Data
2230 xge_setmulti(xge_lldev_t *lldev)
2232 struct ifmultiaddr *ifma;
2234 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2235 struct ifnet *ifnetp = lldev->ifnetp;
2238 int table_size = 47;
2239 xge_hal_status_e status = XGE_HAL_OK;
2240 u8 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2242 if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2243 status = xge_hal_device_mcast_enable(hldev);
2244 lldev->all_multicast = 1;
2246 else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2247 status = xge_hal_device_mcast_disable(hldev);
2248 lldev->all_multicast = 0;
2251 if(status != XGE_HAL_OK) {
2252 xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2256 /* Updating address list */
2257 if_maddr_rlock(ifnetp);
2259 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2260 if(ifma->ifma_addr->sa_family != AF_LINK) {
2263 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2266 if_maddr_runlock(ifnetp);
2268 if((!lldev->all_multicast) && (index)) {
2269 lldev->macaddr_count = (index + 1);
2270 if(lldev->macaddr_count > table_size) {
2274 /* Clear old addresses */
2275 for(index = 0; index < 48; index++) {
2276 xge_hal_device_macaddr_set(hldev, (offset + index),
2281 /* Add new addresses */
2282 if_maddr_rlock(ifnetp);
2284 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2285 if(ifma->ifma_addr->sa_family != AF_LINK) {
2288 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2289 xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2292 if_maddr_runlock(ifnetp);
2299 * xge_enable_promisc
2300 * Enable Promiscuous Mode
2302 * @lldev Per-adapter Data
2305 xge_enable_promisc(xge_lldev_t *lldev)
2307 struct ifnet *ifnetp = lldev->ifnetp;
2308 xge_hal_device_t *hldev = lldev->devh;
2309 xge_hal_pci_bar0_t *bar0 = NULL;
2312 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2314 if(ifnetp->if_flags & IFF_PROMISC) {
2315 xge_hal_device_promisc_enable(lldev->devh);
2318 * When operating in promiscuous mode, don't strip the VLAN tag
2320 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2322 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2323 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2324 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2327 xge_trace(XGE_TRACE, "Promiscuous mode ON");
2332 * xge_disable_promisc
2333 * Disable Promiscuous Mode
2335 * @lldev Per-adapter Data
2338 xge_disable_promisc(xge_lldev_t *lldev)
2340 xge_hal_device_t *hldev = lldev->devh;
2341 xge_hal_pci_bar0_t *bar0 = NULL;
2344 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2346 xge_hal_device_promisc_disable(lldev->devh);
2349 * Strip VLAN tag when operating in non-promiscuous mode
2351 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2353 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2354 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2355 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2358 xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2363 * Change interface MTU to a requested valid size
2365 * @lldev Per-adapter Data
2366 * @NewMtu Requested MTU
2368 * Returns 0 or Error Number
2371 xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2373 int status = XGE_HAL_OK;
2375 /* Check requested MTU size for boundary */
2376 if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2377 XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2380 lldev->mtu = new_mtu;
2381 xge_confirm_changes(lldev, XGE_SET_MTU);
2390 * Common code for both stop and part of reset. Disables device, interrupts and
2393 * @dev Device Handle
2394 * @option Channel normal/reset option
2397 xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2399 xge_hal_device_t *hldev = lldev->devh;
2400 struct ifnet *ifnetp = lldev->ifnetp;
2403 mtx_assert((&lldev->mtx_drv), MA_OWNED);
2405 /* If device is not in "Running" state, return */
2406 if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2409 /* Set appropriate flags */
2410 ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2413 callout_stop(&lldev->timer);
2415 /* Disable interrupts */
2416 xge_hal_device_intr_disable(hldev);
2418 mtx_unlock(&lldev->mtx_drv);
2419 xge_queue_flush(xge_hal_device_queue(lldev->devh));
2420 mtx_lock(&lldev->mtx_drv);
2422 /* Disable HAL device */
2423 if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2424 xge_trace(XGE_ERR, "Disabling HAL device failed");
2425 xge_hal_device_status(hldev, &val64);
2426 xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2429 /* Close Tx and Rx channels */
2430 xge_channel_close(lldev, option);
2432 /* Reset HAL device */
2433 xge_hal_device_reset(hldev);
2435 xge_os_mdelay(1000);
2436 lldev->initialized = 0;
2438 if_link_state_change(ifnetp, LINK_STATE_DOWN);
2445 * xge_set_mbuf_cflags
2446 * set checksum flag for the mbuf
2451 xge_set_mbuf_cflags(mbuf_t pkt)
2453 pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2454 pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2455 pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2456 pkt->m_pkthdr.csum_data = htons(0xffff);
2460 * xge_lro_flush_sessions
2461 * Flush LRO session and send accumulated LRO packet to upper layer
2463 * @lldev Per-adapter Data
2466 xge_lro_flush_sessions(xge_lldev_t *lldev)
2468 xge_lro_entry_t *lro_session = NULL;
2470 while(!SLIST_EMPTY(&lldev->lro_active)) {
2471 lro_session = SLIST_FIRST(&lldev->lro_active);
2472 SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2473 xge_lro_flush(lldev, lro_session);
2479 * Flush LRO session. Send accumulated LRO packet to upper layer
2481 * @lldev Per-adapter Data
2482 * @lro LRO session to be flushed
2485 xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2487 struct ip *header_ip;
2488 struct tcphdr *header_tcp;
2491 if(lro_session->append_cnt) {
2492 header_ip = lro_session->lro_header_ip;
2493 header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2494 lro_session->m_head->m_pkthdr.len = lro_session->len;
2495 header_tcp = (struct tcphdr *)(header_ip + 1);
2496 header_tcp->th_ack = lro_session->ack_seq;
2497 header_tcp->th_win = lro_session->window;
2498 if(lro_session->timestamp) {
2499 ptr = (u32 *)(header_tcp + 1);
2500 ptr[1] = htonl(lro_session->tsval);
2501 ptr[2] = lro_session->tsecr;
2505 (*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2506 lro_session->m_head = NULL;
2507 lro_session->timestamp = 0;
2508 lro_session->append_cnt = 0;
2509 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2513 * xge_lro_accumulate
2514 * Accumulate packets to form a large LRO packet based on various conditions
2516 * @lldev Per-adapter Data
2517 * @m_head Current Packet
2519 * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2522 xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2524 struct ether_header *header_ethernet;
2525 struct ip *header_ip;
2526 struct tcphdr *header_tcp;
2528 struct mbuf *buffer_next, *buffer_tail;
2529 xge_lro_entry_t *lro_session;
2530 xge_hal_status_e status = XGE_HAL_FAIL;
2531 int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2534 /* Get Ethernet header */
2535 header_ethernet = mtod(m_head, struct ether_header *);
2537 /* Return if it is not IP packet */
2538 if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2542 header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2543 (struct ip *)(header_ethernet + 1) :
2544 mtod(m_head->m_next, struct ip *);
2546 /* Return if it is not TCP packet */
2547 if(header_ip->ip_p != IPPROTO_TCP)
2550 /* Return if packet has options */
2551 if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2554 /* Return if packet is fragmented */
2555 if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2558 /* Get TCP header */
2559 header_tcp = (struct tcphdr *)(header_ip + 1);
2561 /* Return if not ACK or PUSH */
2562 if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2565 /* Only timestamp option is handled */
2566 tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2567 tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2568 ptr = (u32 *)(header_tcp + 1);
2569 if(tcp_options != 0) {
2570 if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2571 (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2572 TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2577 /* Total length of packet (IP) */
2578 ip_len = ntohs(header_ip->ip_len);
2581 tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2583 /* If the frame is padded, trim it */
2584 tot_len = m_head->m_pkthdr.len;
2585 trim = tot_len - (ip_len + ETHER_HDR_LEN);
2589 m_adj(m_head, -trim);
2590 tot_len = m_head->m_pkthdr.len;
2593 buffer_next = m_head;
2595 while(buffer_next != NULL) {
2596 buffer_tail = buffer_next;
2597 buffer_next = buffer_tail->m_next;
2600 /* Total size of only headers */
2601 hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2603 /* Get sequence number */
2604 seq = ntohl(header_tcp->th_seq);
2606 SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2607 if(lro_session->source_port == header_tcp->th_sport &&
2608 lro_session->dest_port == header_tcp->th_dport &&
2609 lro_session->source_ip == header_ip->ip_src.s_addr &&
2610 lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2612 /* Unmatched sequence number, flush LRO session */
2613 if(__predict_false(seq != lro_session->next_seq)) {
2614 SLIST_REMOVE(&lldev->lro_active, lro_session,
2615 xge_lro_entry_t, next);
2616 xge_lro_flush(lldev, lro_session);
2620 /* Handle timestamp option */
2622 u32 tsval = ntohl(*(ptr + 1));
2623 if(__predict_false(lro_session->tsval > tsval ||
2627 lro_session->tsval = tsval;
2628 lro_session->tsecr = *(ptr + 2);
2631 lro_session->next_seq += tcp_data_len;
2632 lro_session->ack_seq = header_tcp->th_ack;
2633 lro_session->window = header_tcp->th_win;
2635 /* If TCP data/payload is of 0 size, free mbuf */
2636 if(tcp_data_len == 0) {
2638 status = XGE_HAL_OK;
2642 lro_session->append_cnt++;
2643 lro_session->len += tcp_data_len;
2645 /* Adjust mbuf so that m_data points to payload than headers */
2646 m_adj(m_head, hlen);
2648 /* Append this packet to LRO accumulated packet */
2649 lro_session->m_tail->m_next = m_head;
2650 lro_session->m_tail = buffer_tail;
2652 /* Flush if LRO packet is exceeding maximum size */
2653 if(lro_session->len >
2654 (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2655 SLIST_REMOVE(&lldev->lro_active, lro_session,
2656 xge_lro_entry_t, next);
2657 xge_lro_flush(lldev, lro_session);
2659 status = XGE_HAL_OK;
2664 if(SLIST_EMPTY(&lldev->lro_free))
2667 /* Start a new LRO session */
2668 lro_session = SLIST_FIRST(&lldev->lro_free);
2669 SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2670 SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2671 lro_session->source_port = header_tcp->th_sport;
2672 lro_session->dest_port = header_tcp->th_dport;
2673 lro_session->source_ip = header_ip->ip_src.s_addr;
2674 lro_session->dest_ip = header_ip->ip_dst.s_addr;
2675 lro_session->next_seq = seq + tcp_data_len;
2676 lro_session->mss = tcp_data_len;
2677 lro_session->ack_seq = header_tcp->th_ack;
2678 lro_session->window = header_tcp->th_win;
2680 lro_session->lro_header_ip = header_ip;
2682 /* Handle timestamp option */
2684 lro_session->timestamp = 1;
2685 lro_session->tsval = ntohl(*(ptr + 1));
2686 lro_session->tsecr = *(ptr + 2);
2689 lro_session->len = tot_len;
2690 lro_session->m_head = m_head;
2691 lro_session->m_tail = buffer_tail;
2692 status = XGE_HAL_OK;
2699 * xge_accumulate_large_rx
2700 * Accumulate packets to form a large LRO packet based on various conditions
2702 * @lldev Per-adapter Data
2703 * @pkt Current packet
2704 * @pkt_length Packet Length
2705 * @rxd_priv Rx Descriptor Private Data
2708 xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2709 xge_rx_priv_t *rxd_priv)
2711 if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2712 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2713 BUS_DMASYNC_POSTREAD);
2714 (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2720 * If the interrupt is due to received frame (Rx completion), send it up
2722 * @channelh Ring Channel Handle
2723 * @dtr Current Descriptor
2724 * @t_code Transfer Code indicating success or error
2725 * @userdata Per-adapter Data
2727 * Returns XGE_HAL_OK or HAL error enums
2730 xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2733 struct ifnet *ifnetp;
2734 xge_rx_priv_t *rxd_priv = NULL;
2735 mbuf_t mbuf_up = NULL;
2736 xge_hal_status_e status = XGE_HAL_OK;
2737 xge_hal_dtr_info_t ext_info;
2741 /*get the user data portion*/
2742 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2744 XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2747 XGE_DRV_STATS(rx_completions);
2749 /* get the interface pointer */
2750 ifnetp = lldev->ifnetp;
2753 XGE_DRV_STATS(rx_desc_compl);
2755 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2756 status = XGE_HAL_FAIL;
2761 xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2762 XGE_DRV_STATS(rx_tcode);
2763 xge_hal_device_handle_tcode(channelh, dtr, t_code);
2764 xge_hal_ring_dtr_post(channelh,dtr);
2768 /* Get the private data for this descriptor*/
2769 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2772 XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2777 * Prepare one buffer to send it to upper layer -- since the upper
2778 * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2779 * prepare a new buffer, do mapping, use it in the current
2780 * descriptor and post descriptor back to ring channel
2782 mbuf_up = rxd_priv->bufferArray[0];
2784 /* Gets details of mbuf i.e., packet length */
2785 xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2788 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2789 xge_get_buf(dtr, rxd_priv, lldev, 0) :
2790 xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2792 if(status != XGE_HAL_OK) {
2793 xge_trace(XGE_ERR, "No memory");
2794 XGE_DRV_STATS(rx_no_buf);
2797 * Unable to allocate buffer. Instead of discarding, post
2798 * descriptor back to channel for future processing of same
2801 xge_hal_ring_dtr_post(channelh, dtr);
2805 /* Get the extended information */
2806 xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2809 * As we have allocated a new mbuf for this descriptor, post this
2810 * descriptor with new mbuf back to ring channel
2812 vlan_tag = ext_info.vlan;
2813 xge_hal_ring_dtr_post(channelh, dtr);
2814 if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2815 (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2816 (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2817 (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2819 /* set Checksum Flag */
2820 xge_set_mbuf_cflags(mbuf_up);
2822 if(lldev->enabled_lro) {
2823 xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2827 /* Post-Read sync for buffers*/
2828 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2829 bus_dmamap_sync(lldev->dma_tag_rx,
2830 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2832 (*ifnetp->if_input)(ifnetp, mbuf_up);
2837 * Packet with erroneous checksum , let the upper layer deal
2841 /* Post-Read sync for buffers*/
2842 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2843 bus_dmamap_sync(lldev->dma_tag_rx,
2844 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2848 mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2849 mbuf_up->m_flags |= M_VLANTAG;
2852 if(lldev->enabled_lro)
2853 xge_lro_flush_sessions(lldev);
2855 (*ifnetp->if_input)(ifnetp, mbuf_up);
2857 } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2860 if(lldev->enabled_lro)
2861 xge_lro_flush_sessions(lldev);
2871 * @mbuf_up Packet to send up
2872 * @channelh Ring Channel Handle
2874 * @lldev Per-adapter Data
2875 * @rxd_priv Rx Descriptor Private Data
2877 * Returns XGE_HAL_OK or HAL error enums
2880 xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2881 xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2884 int pkt_length[5]={0,0}, pkt_len=0;
2885 dma_addr_t dma_data[5];
2891 if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2892 xge_os_memzero(pkt_length, sizeof(pkt_length));
2895 * Retrieve data of interest from the completed descriptor -- This
2896 * returns the packet length
2898 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2899 xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2902 xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2905 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2906 m->m_len = pkt_length[index];
2908 if(index < (lldev->rxd_mbuf_cnt-1)) {
2909 m->m_next = rxd_priv->bufferArray[index + 1];
2915 pkt_len+=pkt_length[index];
2919 * Since 2 buffer mode is an exceptional case where data is in 3rd
2920 * buffer but not in 2nd buffer
2922 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2923 m->m_len = pkt_length[2];
2924 pkt_len+=pkt_length[2];
2928 * Update length of newly created buffer to be sent up with packet
2931 mbuf_up->m_pkthdr.len = pkt_len;
2935 * Retrieve data of interest from the completed descriptor -- This
2936 * returns the packet length
2938 xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2941 * Update length of newly created buffer to be sent up with packet
2944 mbuf_up->m_len = mbuf_up->m_pkthdr.len = pkt_length[0];
2952 * Flush Tx descriptors
2954 * @channelh Channel handle
2957 xge_flush_txds(xge_hal_channel_h channelh)
2959 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2960 xge_hal_dtr_h tx_dtr;
2961 xge_tx_priv_t *tx_priv;
2964 while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2966 XGE_DRV_STATS(tx_desc_compl);
2968 xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2969 XGE_DRV_STATS(tx_tcode);
2970 xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2973 tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2974 bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2975 m_freem(tx_priv->buffer);
2976 tx_priv->buffer = NULL;
2977 xge_hal_fifo_dtr_free(channelh, tx_dtr);
2985 * @ifnetp Interface Handle
2988 xge_send(struct ifnet *ifnetp)
2991 xge_lldev_t *lldev = ifnetp->if_softc;
2993 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2994 if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2995 XGE_DRV_STATS(tx_lock_fail);
2998 xge_send_locked(ifnetp, qindex);
2999 mtx_unlock(&lldev->mtx_tx[qindex]);
3004 xge_send_locked(struct ifnet *ifnetp, int qindex)
3007 static bus_dma_segment_t segs[XGE_MAX_SEGS];
3008 xge_hal_status_e status;
3009 unsigned int max_fragments;
3010 xge_lldev_t *lldev = ifnetp->if_softc;
3011 xge_hal_channel_h channelh = lldev->fifo_channel[qindex];
3012 mbuf_t m_head = NULL;
3013 mbuf_t m_buf = NULL;
3014 xge_tx_priv_t *ll_tx_priv = NULL;
3015 register unsigned int count = 0;
3016 unsigned int nsegs = 0;
3019 max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3021 /* If device is not initialized, return */
3022 if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3025 XGE_DRV_STATS(tx_calls);
3028 * This loop will be executed for each packet in the kernel maintained
3029 * queue -- each packet can be with fragments as an mbuf chain
3032 IF_DEQUEUE(&ifnetp->if_snd, m_head);
3033 if (m_head == NULL) {
3034 ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3038 for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3039 if(m_buf->m_len) count += 1;
3042 if(count >= max_fragments) {
3043 m_buf = m_defrag(m_head, M_NOWAIT);
3044 if(m_buf != NULL) m_head = m_buf;
3045 XGE_DRV_STATS(tx_defrag);
3048 /* Reserve descriptors */
3049 status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3050 if(status != XGE_HAL_OK) {
3051 XGE_DRV_STATS(tx_no_txd);
3052 xge_flush_txds(channelh);
3057 (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3058 xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3060 /* Update Tx private structure for this descriptor */
3061 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3062 ll_tx_priv->buffer = m_head;
3065 * Do mapping -- Required DMA tag has been created in xge_init
3066 * function and DMA maps have already been created in the
3067 * xgell_tx_replenish function.
3068 * Returns number of segments through nsegs
3070 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3071 ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3072 xge_trace(XGE_TRACE, "DMA map load failed");
3073 XGE_DRV_STATS(tx_map_fail);
3077 if(lldev->driver_stats.tx_max_frags < nsegs)
3078 lldev->driver_stats.tx_max_frags = nsegs;
3080 /* Set descriptor buffer for header and each fragment/segment */
3083 xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3084 (dma_addr_t)htole64(segs[count].ds_addr),
3085 segs[count].ds_len);
3087 } while(count < nsegs);
3089 /* Pre-write Sync of mapping */
3090 bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3091 BUS_DMASYNC_PREWRITE);
3093 if((lldev->enabled_tso) &&
3094 (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3095 XGE_DRV_STATS(tx_tso);
3096 xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3100 if(ifnetp->if_hwassist > 0) {
3101 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3102 | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3105 /* Post descriptor to FIFO channel */
3106 xge_hal_fifo_dtr_post(channelh, dtr);
3107 XGE_DRV_STATS(tx_posted);
3109 /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3110 * listener so that we can use tools like tcpdump */
3111 ETHER_BPF_MTAP(ifnetp, m_head);
3114 /* Prepend the packet back to queue */
3115 IF_PREPEND(&ifnetp->if_snd, m_head);
3116 ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3118 xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3119 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3120 XGE_DRV_STATS(tx_again);
3125 * Allocates new mbufs to be placed into descriptors
3127 * @dtrh Descriptor Handle
3128 * @rxd_priv Rx Descriptor Private Data
3129 * @lldev Per-adapter Data
3130 * @index Buffer Index (if multi-buffer mode)
3132 * Returns XGE_HAL_OK or HAL error enums
3135 xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3136 xge_lldev_t *lldev, int index)
3138 register mbuf_t mp = NULL;
3139 struct ifnet *ifnetp = lldev->ifnetp;
3140 int status = XGE_HAL_OK;
3141 int buffer_size = 0, cluster_size = 0, count;
3142 bus_dmamap_t map = rxd_priv->dmainfo[index].dma_map;
3143 bus_dma_segment_t segs[3];
3145 buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3146 ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3147 lldev->rxd_mbuf_len[index];
3149 if(buffer_size <= MCLBYTES) {
3150 cluster_size = MCLBYTES;
3151 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3154 cluster_size = MJUMPAGESIZE;
3155 if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3156 (buffer_size > MJUMPAGESIZE)) {
3157 cluster_size = MJUM9BYTES;
3159 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, cluster_size);
3162 xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3163 status = XGE_HAL_FAIL;
3167 /* Update mbuf's length, packet length and receive interface */
3168 mp->m_len = mp->m_pkthdr.len = buffer_size;
3169 mp->m_pkthdr.rcvif = ifnetp;
3172 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3173 mp, segs, &count, BUS_DMA_NOWAIT)) {
3174 XGE_DRV_STATS(rx_map_fail);
3176 XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3179 /* Update descriptor private data */
3180 rxd_priv->bufferArray[index] = mp;
3181 rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3182 rxd_priv->dmainfo[index].dma_map = lldev->extra_dma_map;
3183 lldev->extra_dma_map = map;
3185 /* Pre-Read/Write sync */
3186 bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3188 /* Unload DMA map of mbuf in current descriptor */
3189 bus_dmamap_unload(lldev->dma_tag_rx, map);
3191 /* Set descriptor buffer */
3192 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3193 xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3203 * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3205 * @dtrh Descriptor Handle
3206 * @rxd_priv Rx Descriptor Private Data
3207 * @lldev Per-adapter Data
3209 * Returns XGE_HAL_OK or HAL error enums
3212 xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3215 bus_addr_t dma_pointers[5];
3217 int status = XGE_HAL_OK, index;
3220 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3221 status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3222 if(status != XGE_HAL_OK) {
3223 for(newindex = 0; newindex < index; newindex++) {
3224 m_freem(rxd_priv->bufferArray[newindex]);
3226 XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3230 for(index = 0; index < lldev->buffer_mode; index++) {
3231 if(lldev->rxd_mbuf_len[index] != 0) {
3232 dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3233 dma_sizes[index] = lldev->rxd_mbuf_len[index];
3236 dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3237 dma_sizes[index] = 1;
3241 /* Assigning second buffer to third pointer in 2 buffer mode */
3242 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3243 dma_pointers[2] = dma_pointers[1];
3244 dma_sizes[2] = dma_sizes[1];
3248 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3249 xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3252 xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3261 * If the interrupt is due to Tx completion, free the sent buffer
3263 * @channelh Channel Handle
3265 * @t_code Transfer Code indicating success or error
3266 * @userdata Per-adapter Data
3268 * Returns XGE_HAL_OK or HAL error enum
3271 xge_tx_compl(xge_hal_channel_h channelh,
3272 xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3274 xge_tx_priv_t *ll_tx_priv = NULL;
3275 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3276 struct ifnet *ifnetp = lldev->ifnetp;
3277 mbuf_t m_buffer = NULL;
3278 int qindex = xge_hal_channel_id(channelh);
3280 mtx_lock(&lldev->mtx_tx[qindex]);
3282 XGE_DRV_STATS(tx_completions);
3285 * For each completed descriptor: Get private structure, free buffer,
3286 * do unmapping, and free descriptor
3289 XGE_DRV_STATS(tx_desc_compl);
3292 XGE_DRV_STATS(tx_tcode);
3293 xge_trace(XGE_TRACE, "t_code %d", t_code);
3294 xge_hal_device_handle_tcode(channelh, dtr, t_code);
3297 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3298 m_buffer = ll_tx_priv->buffer;
3299 bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3301 ll_tx_priv->buffer = NULL;
3302 xge_hal_fifo_dtr_free(channelh, dtr);
3303 } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3305 xge_send_locked(ifnetp, qindex);
3306 ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3308 mtx_unlock(&lldev->mtx_tx[qindex]);
3314 * xge_tx_initial_replenish
3315 * Initially allocate buffers and set them into descriptors for later use
3317 * @channelh Tx Channel Handle
3318 * @dtrh Descriptor Handle
3320 * @userdata Per-adapter Data
3321 * @reopen Channel open/reopen option
3323 * Returns XGE_HAL_OK or HAL error enums
3326 xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3327 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3329 xge_tx_priv_t *txd_priv = NULL;
3330 int status = XGE_HAL_OK;
3332 /* Get the user data portion from channel handle */
3333 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3335 XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3339 /* Get the private data */
3340 txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3341 if(txd_priv == NULL) {
3342 XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3346 /* Create DMA map for this descriptor */
3347 if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3348 &txd_priv->dma_map)) {
3349 XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3350 txinit_out, XGE_HAL_FAIL);
3358 * xge_rx_initial_replenish
3359 * Initially allocate buffers and set them into descriptors for later use
3361 * @channelh Tx Channel Handle
3362 * @dtrh Descriptor Handle
3364 * @userdata Per-adapter Data
3365 * @reopen Channel open/reopen option
3367 * Returns XGE_HAL_OK or HAL error enums
3370 xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3371 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3373 xge_rx_priv_t *rxd_priv = NULL;
3374 int status = XGE_HAL_OK;
3375 int index1 = 0, index2 = 0;
3377 /* Get the user data portion from channel handle */
3378 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3380 XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3384 /* Get the private data */
3385 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3386 if(rxd_priv == NULL) {
3387 XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3391 rxd_priv->bufferArray = xge_os_malloc(NULL,
3392 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3394 if(rxd_priv->bufferArray == NULL) {
3395 XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3399 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3400 /* Create DMA map for these descriptors*/
3401 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3402 &rxd_priv->dmainfo[0].dma_map)) {
3403 XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3404 rxinit_err_out, XGE_HAL_FAIL);
3406 /* Get a buffer, attach it to this descriptor */
3407 status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3410 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3411 /* Create DMA map for this descriptor */
3412 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3413 &rxd_priv->dmainfo[index1].dma_map)) {
3414 for(index2 = index1 - 1; index2 >= 0; index2--) {
3415 bus_dmamap_destroy(lldev->dma_tag_rx,
3416 rxd_priv->dmainfo[index2].dma_map);
3419 "Jumbo DMA map creation for Rx descriptor failed",
3420 rxinit_err_out, XGE_HAL_FAIL);
3423 status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3426 if(status != XGE_HAL_OK) {
3427 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3428 bus_dmamap_destroy(lldev->dma_tag_rx,
3429 rxd_priv->dmainfo[index1].dma_map);
3431 goto rxinit_err_out;
3438 xge_os_free(NULL, rxd_priv->bufferArray,
3439 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3446 * During unload terminate and free all descriptors
3448 * @channelh Rx Channel Handle
3449 * @dtrh Rx Descriptor Handle
3450 * @state Descriptor State
3451 * @userdata Per-adapter Data
3452 * @reopen Channel open/reopen option
3455 xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3456 xge_hal_dtr_state_e state, void *userdata,
3457 xge_hal_channel_reopen_e reopen)
3459 xge_rx_priv_t *rxd_priv = NULL;
3460 xge_lldev_t *lldev = NULL;
3463 /* Descriptor state is not "Posted" */
3464 if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3466 /* Get the user data portion */
3467 lldev = xge_hal_channel_userdata(channelh);
3469 /* Get the private data */
3470 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3472 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3473 if(rxd_priv->dmainfo[index].dma_map != NULL) {
3474 bus_dmamap_sync(lldev->dma_tag_rx,
3475 rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3476 bus_dmamap_unload(lldev->dma_tag_rx,
3477 rxd_priv->dmainfo[index].dma_map);
3478 if(rxd_priv->bufferArray[index] != NULL)
3479 m_free(rxd_priv->bufferArray[index]);
3480 bus_dmamap_destroy(lldev->dma_tag_rx,
3481 rxd_priv->dmainfo[index].dma_map);
3484 xge_os_free(NULL, rxd_priv->bufferArray,
3485 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3487 /* Free the descriptor */
3488 xge_hal_ring_dtr_free(channelh, dtrh);
3496 * During unload terminate and free all descriptors
3498 * @channelh Rx Channel Handle
3499 * @dtrh Rx Descriptor Handle
3500 * @state Descriptor State
3501 * @userdata Per-adapter Data
3502 * @reopen Channel open/reopen option
3505 xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3506 xge_hal_dtr_state_e state, void *userdata,
3507 xge_hal_channel_reopen_e reopen)
3509 xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3510 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3512 /* Destroy DMA map */
3513 bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3519 * FreeBSD device interface entry points
3521 static device_method_t xge_methods[] = {
3522 DEVMETHOD(device_probe, xge_probe),
3523 DEVMETHOD(device_attach, xge_attach),
3524 DEVMETHOD(device_detach, xge_detach),
3525 DEVMETHOD(device_shutdown, xge_shutdown),
3530 static driver_t xge_driver = {
3533 sizeof(xge_lldev_t),
3535 static devclass_t xge_devclass;
3536 DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);