1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
41 /*********************************************************************
42 * Set this to one to display debug statistics
43 *********************************************************************/
44 int ixgbe_display_debug_stats = 0;
46 /*********************************************************************
48 *********************************************************************/
49 char ixgbe_driver_version[] = "2.3.7";
51 /*********************************************************************
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixgbe_strings
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 *********************************************************************/
61 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
83 /* required last entry */
87 /*********************************************************************
88 * Table of branding strings
89 *********************************************************************/
91 static char *ixgbe_strings[] = {
92 "Intel(R) PRO/10GbE PCI-Express Network Driver"
95 /*********************************************************************
97 *********************************************************************/
98 static int ixgbe_probe(device_t);
99 static int ixgbe_attach(device_t);
100 static int ixgbe_detach(device_t);
101 static int ixgbe_shutdown(device_t);
102 static void ixgbe_start(struct ifnet *);
103 static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
104 #if __FreeBSD_version >= 800000
105 static int ixgbe_mq_start(struct ifnet *, struct mbuf *);
106 static int ixgbe_mq_start_locked(struct ifnet *,
107 struct tx_ring *, struct mbuf *);
108 static void ixgbe_qflush(struct ifnet *);
110 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
111 static void ixgbe_init(void *);
112 static void ixgbe_init_locked(struct adapter *);
113 static void ixgbe_stop(void *);
114 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
115 static int ixgbe_media_change(struct ifnet *);
116 static void ixgbe_identify_hardware(struct adapter *);
117 static int ixgbe_allocate_pci_resources(struct adapter *);
118 static int ixgbe_allocate_msix(struct adapter *);
119 static int ixgbe_allocate_legacy(struct adapter *);
120 static int ixgbe_allocate_queues(struct adapter *);
121 static int ixgbe_setup_msix(struct adapter *);
122 static void ixgbe_free_pci_resources(struct adapter *);
123 static void ixgbe_local_timer(void *);
124 static int ixgbe_setup_interface(device_t, struct adapter *);
125 static void ixgbe_config_link(struct adapter *);
127 static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
128 static int ixgbe_setup_transmit_structures(struct adapter *);
129 static void ixgbe_setup_transmit_ring(struct tx_ring *);
130 static void ixgbe_initialize_transmit_units(struct adapter *);
131 static void ixgbe_free_transmit_structures(struct adapter *);
132 static void ixgbe_free_transmit_buffers(struct tx_ring *);
134 static int ixgbe_allocate_receive_buffers(struct rx_ring *);
135 static int ixgbe_setup_receive_structures(struct adapter *);
136 static int ixgbe_setup_receive_ring(struct rx_ring *);
137 static void ixgbe_initialize_receive_units(struct adapter *);
138 static void ixgbe_free_receive_structures(struct adapter *);
139 static void ixgbe_free_receive_buffers(struct rx_ring *);
140 static void ixgbe_setup_hw_rsc(struct rx_ring *);
142 static void ixgbe_enable_intr(struct adapter *);
143 static void ixgbe_disable_intr(struct adapter *);
144 static void ixgbe_update_stats_counters(struct adapter *);
145 static bool ixgbe_txeof(struct tx_ring *);
146 static bool ixgbe_rxeof(struct ix_queue *, int);
147 static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
148 static void ixgbe_set_promisc(struct adapter *);
149 static void ixgbe_set_multi(struct adapter *);
150 static void ixgbe_update_link_status(struct adapter *);
151 static void ixgbe_refresh_mbufs(struct rx_ring *, int);
152 static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
153 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
154 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
155 static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
156 struct ixgbe_dma_alloc *, int);
157 static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
158 static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
159 const char *, int *, int);
160 static bool ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
161 static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
162 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
163 static void ixgbe_configure_ivars(struct adapter *);
164 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
166 static void ixgbe_setup_vlan_hw_support(struct adapter *);
167 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
168 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
170 static void ixgbe_add_hw_stats(struct adapter *adapter);
172 static __inline void ixgbe_rx_discard(struct rx_ring *, int);
173 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
176 /* Support for pluggable optic modules */
177 static bool ixgbe_sfp_probe(struct adapter *);
179 /* Legacy (single vector interrupt handler */
180 static void ixgbe_legacy_irq(void *);
182 /* The MSI/X Interrupt handlers */
183 static void ixgbe_msix_que(void *);
184 static void ixgbe_msix_link(void *);
186 /* Deferred interrupt tasklets */
187 static void ixgbe_handle_que(void *, int);
188 static void ixgbe_handle_link(void *, int);
189 static void ixgbe_handle_msf(void *, int);
190 static void ixgbe_handle_mod(void *, int);
193 static void ixgbe_atr(struct tx_ring *, struct mbuf *);
194 static void ixgbe_reinit_fdir(void *, int);
197 /*********************************************************************
198 * FreeBSD Device Interface Entry Points
199 *********************************************************************/
201 static device_method_t ixgbe_methods[] = {
202 /* Device interface */
203 DEVMETHOD(device_probe, ixgbe_probe),
204 DEVMETHOD(device_attach, ixgbe_attach),
205 DEVMETHOD(device_detach, ixgbe_detach),
206 DEVMETHOD(device_shutdown, ixgbe_shutdown),
210 static driver_t ixgbe_driver = {
211 "ix", ixgbe_methods, sizeof(struct adapter),
214 devclass_t ixgbe_devclass;
215 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
217 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
218 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
221 ** TUNEABLE PARAMETERS:
225 ** AIM: Adaptive Interrupt Moderation
226 ** which means that the interrupt rate
227 ** is varied over time based on the
228 ** traffic for that interrupt vector
230 static int ixgbe_enable_aim = TRUE;
231 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
233 static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY);
234 TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
236 /* How many packets rxeof tries to clean at a time */
237 static int ixgbe_rx_process_limit = 128;
238 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
240 /* Flow control setting, default to full */
241 static int ixgbe_flow_control = ixgbe_fc_full;
242 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
245 ** Smart speed setting, default to on
246 ** this only works as a compile option
247 ** right now as its during attach, set
248 ** this to 'ixgbe_smart_speed_off' to
251 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
254 * MSIX should be the default for best performance,
255 * but this allows it to be forced off for testing.
257 static int ixgbe_enable_msix = 1;
258 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
261 * Header split: this causes the hardware to DMA
262 * the header into a separate mbuf from the payload,
263 * it can be a performance win in some workloads, but
264 * in others it actually hurts, its off by default.
266 static bool ixgbe_header_split = FALSE;
267 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
270 * Number of Queues, can be set to 0,
271 * it then autoconfigures based on the
272 * number of cpus. Each queue is a pair
273 * of RX and TX rings with a msix vector
275 static int ixgbe_num_queues = 0;
276 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
279 ** Number of TX descriptors per ring,
280 ** setting higher than RX as this seems
281 ** the better performing choice.
283 static int ixgbe_txd = PERFORM_TXD;
284 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
286 /* Number of RX descriptors per ring */
287 static int ixgbe_rxd = PERFORM_RXD;
288 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
290 /* Keep running tab on them for sanity check */
291 static int ixgbe_total_ports;
294 ** The number of scatter-gather segments
295 ** differs for 82598 and 82599, default to
298 static int ixgbe_num_segs = IXGBE_82598_SCATTER;
302 ** For Flow Director: this is the
303 ** number of TX packets we sample
304 ** for the filter pool, this means
305 ** every 20th packet will be probed.
307 ** This feature can be disabled by
308 ** setting this to 0.
310 static int atr_sample_rate = 20;
312 ** Flow Director actually 'steals'
313 ** part of the packet buffer as its
314 ** filter pool, this variable controls
316 ** 0 = 64K, 1 = 128K, 2 = 256K
318 static int fdir_pballoc = 1;
321 /*********************************************************************
322 * Device identification routine
324 * ixgbe_probe determines if the driver should be loaded on
325 * adapter based on PCI vendor/device id of the adapter.
327 * return 0 on success, positive on failure
328 *********************************************************************/
331 ixgbe_probe(device_t dev)
333 ixgbe_vendor_info_t *ent;
335 u16 pci_vendor_id = 0;
336 u16 pci_device_id = 0;
337 u16 pci_subvendor_id = 0;
338 u16 pci_subdevice_id = 0;
339 char adapter_name[256];
341 INIT_DEBUGOUT("ixgbe_probe: begin");
343 pci_vendor_id = pci_get_vendor(dev);
344 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
347 pci_device_id = pci_get_device(dev);
348 pci_subvendor_id = pci_get_subvendor(dev);
349 pci_subdevice_id = pci_get_subdevice(dev);
351 ent = ixgbe_vendor_info_array;
352 while (ent->vendor_id != 0) {
353 if ((pci_vendor_id == ent->vendor_id) &&
354 (pci_device_id == ent->device_id) &&
356 ((pci_subvendor_id == ent->subvendor_id) ||
357 (ent->subvendor_id == 0)) &&
359 ((pci_subdevice_id == ent->subdevice_id) ||
360 (ent->subdevice_id == 0))) {
361 sprintf(adapter_name, "%s, Version - %s",
362 ixgbe_strings[ent->index],
363 ixgbe_driver_version);
364 device_set_desc_copy(dev, adapter_name);
373 /*********************************************************************
374 * Device initialization routine
376 * The attach entry point is called when the driver is being loaded.
377 * This routine identifies the type of hardware, allocates all resources
378 * and initializes the hardware.
380 * return 0 on success, positive on failure
381 *********************************************************************/
384 ixgbe_attach(device_t dev)
386 struct adapter *adapter;
389 u16 pci_device_id, csum;
392 INIT_DEBUGOUT("ixgbe_attach: begin");
394 /* Allocate, clear, and link in our adapter structure */
395 adapter = device_get_softc(dev);
396 adapter->dev = adapter->osdep.dev = dev;
400 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
402 /* Keep track of optics */
403 pci_device_id = pci_get_device(dev);
404 switch (pci_device_id) {
405 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
406 case IXGBE_DEV_ID_82598EB_CX4:
407 adapter->optics = IFM_10G_CX4;
409 case IXGBE_DEV_ID_82598:
410 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
411 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
412 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
413 case IXGBE_DEV_ID_82598EB_SFP_LOM:
414 case IXGBE_DEV_ID_82598AT:
415 adapter->optics = IFM_10G_SR;
417 case IXGBE_DEV_ID_82598AT2:
418 adapter->optics = IFM_10G_T;
420 case IXGBE_DEV_ID_82598EB_XF_LR:
421 adapter->optics = IFM_10G_LR;
423 case IXGBE_DEV_ID_82599_SFP:
424 adapter->optics = IFM_10G_SR;
425 ixgbe_num_segs = IXGBE_82599_SCATTER;
427 case IXGBE_DEV_ID_82598_DA_DUAL_PORT :
428 adapter->optics = IFM_10G_TWINAX;
430 case IXGBE_DEV_ID_82599_KX4:
431 case IXGBE_DEV_ID_82599_KX4_MEZZ:
432 case IXGBE_DEV_ID_82599_CX4:
433 adapter->optics = IFM_10G_CX4;
434 ixgbe_num_segs = IXGBE_82599_SCATTER;
436 case IXGBE_DEV_ID_82599_XAUI_LOM:
437 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
438 ixgbe_num_segs = IXGBE_82599_SCATTER;
440 case IXGBE_DEV_ID_82599_T3_LOM:
441 ixgbe_num_segs = IXGBE_82599_SCATTER;
442 adapter->optics = IFM_10G_T;
444 ixgbe_num_segs = IXGBE_82599_SCATTER;
450 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
451 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
452 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
453 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
455 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
456 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457 OID_AUTO, "advertise_gig", CTLTYPE_INT | CTLFLAG_RW,
458 adapter, 0, ixgbe_set_advertise, "I", "1G Link");
460 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
461 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
462 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
463 &ixgbe_enable_aim, 1, "Interrupt Moderation");
465 /* Set up the timer callout */
466 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
468 /* Determine hardware revision */
469 ixgbe_identify_hardware(adapter);
471 /* Do base PCI setup - map BAR0 */
472 if (ixgbe_allocate_pci_resources(adapter)) {
473 device_printf(dev, "Allocation of PCI resources failed\n");
478 /* Do descriptor calc and sanity checks */
479 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
480 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
481 device_printf(dev, "TXD config issue, using default!\n");
482 adapter->num_tx_desc = DEFAULT_TXD;
484 adapter->num_tx_desc = ixgbe_txd;
487 ** With many RX rings it is easy to exceed the
488 ** system mbuf allocation. Tuning nmbclusters
489 ** can alleviate this.
491 if (nmbclusters > 0 ) {
493 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
494 if (s > nmbclusters) {
495 device_printf(dev, "RX Descriptors exceed "
496 "system mbuf max, using default instead!\n");
497 ixgbe_rxd = DEFAULT_RXD;
501 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
502 ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
503 device_printf(dev, "RXD config issue, using default!\n");
504 adapter->num_rx_desc = DEFAULT_RXD;
506 adapter->num_rx_desc = ixgbe_rxd;
508 /* Allocate our TX/RX Queues */
509 if (ixgbe_allocate_queues(adapter)) {
514 /* Allocate multicast array memory. */
515 adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
516 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
517 if (adapter->mta == NULL) {
518 device_printf(dev, "Can not allocate multicast setup array\n");
523 /* Initialize the shared code */
524 error = ixgbe_init_shared_code(hw);
525 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
527 ** No optics in this port, set up
528 ** so the timer routine will probe
529 ** for later insertion.
531 adapter->sfp_probe = TRUE;
533 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
534 device_printf(dev,"Unsupported SFP+ module detected!\n");
538 device_printf(dev,"Unable to initialize the shared code\n");
543 /* Make sure we have a good EEPROM before we read from it */
544 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
545 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
550 /* Pick up the smart speed setting */
551 if (hw->mac.type == ixgbe_mac_82599EB)
552 hw->phy.smart_speed = ixgbe_smart_speed;
554 /* Get Hardware Flow Control setting */
555 hw->fc.requested_mode = ixgbe_fc_full;
556 hw->fc.pause_time = IXGBE_FC_PAUSE;
557 hw->fc.low_water = IXGBE_FC_LO;
558 hw->fc.high_water = IXGBE_FC_HI;
559 hw->fc.send_xon = TRUE;
561 error = ixgbe_init_hw(hw);
562 if (error == IXGBE_ERR_EEPROM_VERSION) {
563 device_printf(dev, "This device is a pre-production adapter/"
564 "LOM. Please be aware there may be issues associated "
565 "with your hardware.\n If you are experiencing problems "
566 "please contact your Intel or hardware representative "
567 "who provided you with this hardware.\n");
568 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
569 device_printf(dev,"Unsupported SFP+ Module\n");
573 device_printf(dev,"Hardware Initialization Failure\n");
577 if ((adapter->msix > 1) && (ixgbe_enable_msix))
578 error = ixgbe_allocate_msix(adapter);
580 error = ixgbe_allocate_legacy(adapter);
584 /* Setup OS specific network interface */
585 if (ixgbe_setup_interface(dev, adapter) != 0)
588 /* Sysctl for limiting the amount of work done in the taskqueue */
589 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
590 "max number of rx packets to process", &adapter->rx_process_limit,
591 ixgbe_rx_process_limit);
593 /* Initialize statistics */
594 ixgbe_update_stats_counters(adapter);
596 /* Register for VLAN events */
597 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
598 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
599 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
600 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
602 /* Print PCIE bus type/speed/width info */
603 ixgbe_get_bus_info(hw);
604 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
605 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
606 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
607 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
608 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
609 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
612 if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
613 (hw->bus.speed == ixgbe_bus_speed_2500)) {
614 device_printf(dev, "PCI-Express bandwidth available"
615 " for this card\n is not sufficient for"
616 " optimal performance.\n");
617 device_printf(dev, "For optimal performance a x8 "
618 "PCIE, or x4 PCIE 2 slot is required.\n");
621 /* let hardware know driver is loaded */
622 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
623 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
624 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
626 ixgbe_add_hw_stats(adapter);
628 INIT_DEBUGOUT("ixgbe_attach: end");
631 ixgbe_free_transmit_structures(adapter);
632 ixgbe_free_receive_structures(adapter);
634 if (adapter->ifp != NULL)
635 if_free(adapter->ifp);
636 ixgbe_free_pci_resources(adapter);
637 free(adapter->mta, M_DEVBUF);
642 /*********************************************************************
643 * Device removal routine
645 * The detach entry point is called when the driver is being removed.
646 * This routine stops the adapter and deallocates all the resources
647 * that were allocated for driver operation.
649 * return 0 on success, positive on failure
650 *********************************************************************/
653 ixgbe_detach(device_t dev)
655 struct adapter *adapter = device_get_softc(dev);
656 struct ix_queue *que = adapter->queues;
659 INIT_DEBUGOUT("ixgbe_detach: begin");
661 /* Make sure VLANS are not using driver */
662 if (adapter->ifp->if_vlantrunk != NULL) {
663 device_printf(dev,"Vlan in use, detach first\n");
667 IXGBE_CORE_LOCK(adapter);
669 IXGBE_CORE_UNLOCK(adapter);
671 for (int i = 0; i < adapter->num_queues; i++, que++) {
673 taskqueue_drain(que->tq, &que->que_task);
674 taskqueue_free(que->tq);
678 /* Drain the Link queue */
680 taskqueue_drain(adapter->tq, &adapter->link_task);
681 taskqueue_drain(adapter->tq, &adapter->mod_task);
682 taskqueue_drain(adapter->tq, &adapter->msf_task);
684 taskqueue_drain(adapter->tq, &adapter->fdir_task);
686 taskqueue_free(adapter->tq);
689 /* let hardware know driver is unloading */
690 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
691 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
692 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
694 /* Unregister VLAN events */
695 if (adapter->vlan_attach != NULL)
696 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
697 if (adapter->vlan_detach != NULL)
698 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
700 ether_ifdetach(adapter->ifp);
701 callout_drain(&adapter->timer);
702 ixgbe_free_pci_resources(adapter);
703 bus_generic_detach(dev);
704 if_free(adapter->ifp);
706 ixgbe_free_transmit_structures(adapter);
707 ixgbe_free_receive_structures(adapter);
708 free(adapter->mta, M_DEVBUF);
710 IXGBE_CORE_LOCK_DESTROY(adapter);
714 /*********************************************************************
716 * Shutdown entry point
718 **********************************************************************/
721 ixgbe_shutdown(device_t dev)
723 struct adapter *adapter = device_get_softc(dev);
724 IXGBE_CORE_LOCK(adapter);
726 IXGBE_CORE_UNLOCK(adapter);
731 /*********************************************************************
732 * Transmit entry point
734 * ixgbe_start is called by the stack to initiate a transmit.
735 * The driver will remain in this routine as long as there are
736 * packets to transmit and transmit resources are available.
737 * In case resources are not available stack is notified and
738 * the packet is requeued.
739 **********************************************************************/
742 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
745 struct adapter *adapter = txr->adapter;
747 IXGBE_TX_LOCK_ASSERT(txr);
749 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
752 if (!adapter->link_active)
755 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
757 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
761 if (ixgbe_xmit(txr, &m_head)) {
764 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
765 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
768 /* Send a copy of the frame to the BPF listener */
769 ETHER_BPF_MTAP(ifp, m_head);
771 /* Set watchdog on */
772 txr->watchdog_time = ticks;
773 txr->queue_status = IXGBE_QUEUE_WORKING;
780 * Legacy TX start - called by the stack, this
781 * always uses the first tx ring, and should
782 * not be used with multiqueue tx enabled.
785 ixgbe_start(struct ifnet *ifp)
787 struct adapter *adapter = ifp->if_softc;
788 struct tx_ring *txr = adapter->tx_rings;
790 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
792 ixgbe_start_locked(txr, ifp);
793 IXGBE_TX_UNLOCK(txr);
798 #if __FreeBSD_version >= 800000
800 ** Multiqueue Transmit driver
804 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
806 struct adapter *adapter = ifp->if_softc;
807 struct ix_queue *que;
811 /* Which queue to use */
812 if ((m->m_flags & M_FLOWID) != 0)
813 i = m->m_pkthdr.flowid % adapter->num_queues;
815 txr = &adapter->tx_rings[i];
816 que = &adapter->queues[i];
818 if (IXGBE_TX_TRYLOCK(txr)) {
819 err = ixgbe_mq_start_locked(ifp, txr, m);
820 IXGBE_TX_UNLOCK(txr);
822 err = drbr_enqueue(ifp, txr->br, m);
823 taskqueue_enqueue(que->tq, &que->que_task);
830 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
832 struct adapter *adapter = txr->adapter;
834 int enqueued, err = 0;
836 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
837 IFF_DRV_RUNNING || adapter->link_active == 0) {
839 err = drbr_enqueue(ifp, txr->br, m);
843 /* Call cleanup if number of TX descriptors low */
844 if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD)
849 next = drbr_dequeue(ifp, txr->br);
850 } else if (drbr_needs_enqueue(ifp, txr->br)) {
851 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
853 next = drbr_dequeue(ifp, txr->br);
857 /* Process the queue */
858 while (next != NULL) {
859 if ((err = ixgbe_xmit(txr, &next)) != 0) {
861 err = drbr_enqueue(ifp, txr->br, next);
865 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
866 /* Send a copy of the frame to the BPF listener */
867 ETHER_BPF_MTAP(ifp, next);
868 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
870 if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
871 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
874 next = drbr_dequeue(ifp, txr->br);
878 /* Set watchdog on */
879 txr->queue_status = IXGBE_QUEUE_WORKING;
880 txr->watchdog_time = ticks;
887 ** Flush all ring buffers
890 ixgbe_qflush(struct ifnet *ifp)
892 struct adapter *adapter = ifp->if_softc;
893 struct tx_ring *txr = adapter->tx_rings;
896 for (int i = 0; i < adapter->num_queues; i++, txr++) {
898 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
900 IXGBE_TX_UNLOCK(txr);
904 #endif /* __FreeBSD_version >= 800000 */
906 /*********************************************************************
909 * ixgbe_ioctl is called when the user wants to configure the
912 * return 0 on success, positive on failure
913 **********************************************************************/
916 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
918 struct adapter *adapter = ifp->if_softc;
919 struct ifreq *ifr = (struct ifreq *) data;
925 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
926 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
929 IXGBE_CORE_LOCK(adapter);
930 ifp->if_mtu = ifr->ifr_mtu;
931 adapter->max_frame_size =
932 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
933 ixgbe_init_locked(adapter);
934 IXGBE_CORE_UNLOCK(adapter);
938 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
939 IXGBE_CORE_LOCK(adapter);
940 if (ifp->if_flags & IFF_UP) {
941 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
942 if ((ifp->if_flags ^ adapter->if_flags) &
943 (IFF_PROMISC | IFF_ALLMULTI)) {
944 ixgbe_set_promisc(adapter);
947 ixgbe_init_locked(adapter);
949 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
951 adapter->if_flags = ifp->if_flags;
952 IXGBE_CORE_UNLOCK(adapter);
956 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
957 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
958 IXGBE_CORE_LOCK(adapter);
959 ixgbe_disable_intr(adapter);
960 ixgbe_set_multi(adapter);
961 ixgbe_enable_intr(adapter);
962 IXGBE_CORE_UNLOCK(adapter);
967 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
968 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
972 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
973 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
974 if (mask & IFCAP_HWCSUM)
975 ifp->if_capenable ^= IFCAP_HWCSUM;
976 if (mask & IFCAP_TSO4)
977 ifp->if_capenable ^= IFCAP_TSO4;
978 if (mask & IFCAP_LRO)
979 ifp->if_capenable ^= IFCAP_LRO;
980 if (mask & IFCAP_VLAN_HWTAGGING)
981 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
982 if (mask & IFCAP_VLAN_HWFILTER)
983 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
984 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
985 IXGBE_CORE_LOCK(adapter);
986 ixgbe_init_locked(adapter);
987 IXGBE_CORE_UNLOCK(adapter);
989 VLAN_CAPABILITIES(ifp);
994 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
995 error = ether_ioctl(ifp, command, data);
1002 /*********************************************************************
1005 * This routine is used in two ways. It is used by the stack as
1006 * init entry point in network interface structure. It is also used
1007 * by the driver as a hw/sw initialization routine to get to a
1010 * return 0 on success, positive on failure
1011 **********************************************************************/
1012 #define IXGBE_MHADD_MFS_SHIFT 16
1015 ixgbe_init_locked(struct adapter *adapter)
1017 struct ifnet *ifp = adapter->ifp;
1018 device_t dev = adapter->dev;
1019 struct ixgbe_hw *hw = &adapter->hw;
1020 u32 k, txdctl, mhadd, gpie;
1023 mtx_assert(&adapter->core_mtx, MA_OWNED);
1024 INIT_DEBUGOUT("ixgbe_init: begin");
1025 hw->adapter_stopped = FALSE;
1026 ixgbe_stop_adapter(hw);
1027 callout_stop(&adapter->timer);
1029 /* reprogram the RAR[0] in case user changed it. */
1030 ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1032 /* Get the latest mac address, User can use a LAA */
1033 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
1034 IXGBE_ETH_LENGTH_OF_ADDRESS);
1035 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
1036 hw->addr_ctrl.rar_used_count = 1;
1038 /* Set the various hardware offload abilities */
1039 ifp->if_hwassist = 0;
1040 if (ifp->if_capenable & IFCAP_TSO4)
1041 ifp->if_hwassist |= CSUM_TSO;
1042 if (ifp->if_capenable & IFCAP_TXCSUM) {
1043 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1044 #if __FreeBSD_version >= 800000
1045 if (hw->mac.type == ixgbe_mac_82599EB)
1046 ifp->if_hwassist |= CSUM_SCTP;
1050 /* Prepare transmit descriptors and buffers */
1051 if (ixgbe_setup_transmit_structures(adapter)) {
1052 device_printf(dev,"Could not setup transmit structures\n");
1053 ixgbe_stop(adapter);
1058 ixgbe_initialize_transmit_units(adapter);
1060 /* Setup Multicast table */
1061 ixgbe_set_multi(adapter);
1064 ** Determine the correct mbuf pool
1065 ** for doing jumbo/headersplit
1067 if (adapter->max_frame_size <= 2048)
1068 adapter->rx_mbuf_sz = MCLBYTES;
1069 else if (adapter->max_frame_size <= 4096)
1070 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1072 adapter->rx_mbuf_sz = MJUM9BYTES;
1074 /* Prepare receive descriptors and buffers */
1075 if (ixgbe_setup_receive_structures(adapter)) {
1076 device_printf(dev,"Could not setup receive structures\n");
1077 ixgbe_stop(adapter);
1081 /* Configure RX settings */
1082 ixgbe_initialize_receive_units(adapter);
1084 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
1086 if (hw->mac.type == ixgbe_mac_82599EB) {
1087 gpie |= IXGBE_SDP1_GPIEN;
1088 gpie |= IXGBE_SDP2_GPIEN;
1091 /* Enable Fan Failure Interrupt */
1092 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1093 gpie |= IXGBE_SDP1_GPIEN;
1095 if (adapter->msix > 1) {
1096 /* Enable Enhanced MSIX mode */
1097 gpie |= IXGBE_GPIE_MSIX_MODE;
1098 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1101 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1104 if (ifp->if_mtu > ETHERMTU) {
1105 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1106 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1107 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1108 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1111 /* Now enable all the queues */
1113 for (int i = 0; i < adapter->num_queues; i++) {
1114 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1115 txdctl |= IXGBE_TXDCTL_ENABLE;
1116 /* Set WTHRESH to 8, burst writeback */
1117 txdctl |= (8 << 16);
1118 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1121 for (int i = 0; i < adapter->num_queues; i++) {
1122 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1123 if (hw->mac.type == ixgbe_mac_82598EB) {
1129 rxdctl &= ~0x3FFFFF;
1132 rxdctl |= IXGBE_RXDCTL_ENABLE;
1133 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1134 for (k = 0; k < 10; k++) {
1135 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1136 IXGBE_RXDCTL_ENABLE)
1142 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1145 /* Set up VLAN support and filter */
1146 ixgbe_setup_vlan_hw_support(adapter);
1148 /* Enable Receive engine */
1149 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1150 if (hw->mac.type == ixgbe_mac_82598EB)
1151 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1152 rxctrl |= IXGBE_RXCTRL_RXEN;
1153 ixgbe_enable_rx_dma(hw, rxctrl);
1155 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1157 /* Set up MSI/X routing */
1158 if (ixgbe_enable_msix) {
1159 ixgbe_configure_ivars(adapter);
1160 /* Set up auto-mask */
1161 if (hw->mac.type == ixgbe_mac_82598EB)
1162 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1164 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1165 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1167 } else { /* Simple settings for Legacy/MSI */
1168 ixgbe_set_ivar(adapter, 0, 0, 0);
1169 ixgbe_set_ivar(adapter, 0, 0, 1);
1170 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1174 /* Init Flow director */
1175 if (hw->mac.type == ixgbe_mac_82599EB)
1176 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1180 ** Check on any SFP devices that
1181 ** need to be kick-started
1183 if (hw->phy.type == ixgbe_phy_none) {
1184 int err = hw->phy.ops.identify(hw);
1185 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1187 "Unsupported SFP+ module type was detected.\n");
1192 /* Set moderation on the Link interrupt */
1193 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
1195 /* Config/Enable Link */
1196 ixgbe_config_link(adapter);
1198 /* And now turn on interrupts */
1199 ixgbe_enable_intr(adapter);
1201 /* Now inform the stack we're ready */
1202 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1203 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1209 ixgbe_init(void *arg)
1211 struct adapter *adapter = arg;
1213 IXGBE_CORE_LOCK(adapter);
1214 ixgbe_init_locked(adapter);
1215 IXGBE_CORE_UNLOCK(adapter);
1222 ** MSIX Interrupt Handlers and Tasklets
1227 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1229 struct ixgbe_hw *hw = &adapter->hw;
1230 u64 queue = (u64)(1 << vector);
1233 if (hw->mac.type == ixgbe_mac_82598EB) {
1234 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1235 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1237 mask = (queue & 0xFFFFFFFF);
1239 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1240 mask = (queue >> 32);
1242 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1247 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1249 struct ixgbe_hw *hw = &adapter->hw;
1250 u64 queue = (u64)(1 << vector);
1253 if (hw->mac.type == ixgbe_mac_82598EB) {
1254 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1255 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1257 mask = (queue & 0xFFFFFFFF);
1259 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1260 mask = (queue >> 32);
1262 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1267 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
1271 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1272 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
1273 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1275 mask = (queues & 0xFFFFFFFF);
1276 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
1277 mask = (queues >> 32);
1278 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
1284 ixgbe_handle_que(void *context, int pending)
1286 struct ix_queue *que = context;
1287 struct adapter *adapter = que->adapter;
1288 struct tx_ring *txr = que->txr;
1289 struct ifnet *ifp = adapter->ifp;
1292 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1293 more = ixgbe_rxeof(que, adapter->rx_process_limit);
1296 #if __FreeBSD_version >= 800000
1297 if (!drbr_empty(ifp, txr->br))
1298 ixgbe_mq_start_locked(ifp, txr, NULL);
1300 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1301 ixgbe_start_locked(txr, ifp);
1303 IXGBE_TX_UNLOCK(txr);
1305 taskqueue_enqueue(que->tq, &que->que_task);
1310 /* Reenable this interrupt */
1311 ixgbe_enable_queue(adapter, que->msix);
1316 /*********************************************************************
1318 * Legacy Interrupt Service routine
1320 **********************************************************************/
1323 ixgbe_legacy_irq(void *arg)
1325 struct ix_queue *que = arg;
1326 struct adapter *adapter = que->adapter;
1327 struct ixgbe_hw *hw = &adapter->hw;
1328 struct tx_ring *txr = adapter->tx_rings;
1329 bool more_tx, more_rx;
1330 u32 reg_eicr, loop = MAX_LOOP;
1333 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1336 if (reg_eicr == 0) {
1337 ixgbe_enable_intr(adapter);
1341 more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1345 more_tx = ixgbe_txeof(txr);
1346 } while (loop-- && more_tx);
1347 IXGBE_TX_UNLOCK(txr);
1349 if (more_rx || more_tx)
1350 taskqueue_enqueue(que->tq, &que->que_task);
1352 /* Check for fan failure */
1353 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1354 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1355 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1356 "REPLACE IMMEDIATELY!!\n");
1357 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1360 /* Link status change */
1361 if (reg_eicr & IXGBE_EICR_LSC)
1362 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1364 ixgbe_enable_intr(adapter);
1369 /*********************************************************************
1371 * MSI Queue Interrupt Service routine
1373 **********************************************************************/
1375 ixgbe_msix_que(void *arg)
1377 struct ix_queue *que = arg;
1378 struct adapter *adapter = que->adapter;
1379 struct tx_ring *txr = que->txr;
1380 struct rx_ring *rxr = que->rxr;
1381 bool more_tx, more_rx;
1386 more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1389 more_tx = ixgbe_txeof(txr);
1390 IXGBE_TX_UNLOCK(txr);
1392 more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1396 if (ixgbe_enable_aim == FALSE)
1399 ** Do Adaptive Interrupt Moderation:
1400 ** - Write out last calculated setting
1401 ** - Calculate based on average size over
1402 ** the last interval.
1404 if (que->eitr_setting)
1405 IXGBE_WRITE_REG(&adapter->hw,
1406 IXGBE_EITR(que->msix), que->eitr_setting);
1408 que->eitr_setting = 0;
1410 /* Idle, do nothing */
1411 if ((txr->bytes == 0) && (rxr->bytes == 0))
1414 if ((txr->bytes) && (txr->packets))
1415 newitr = txr->bytes/txr->packets;
1416 if ((rxr->bytes) && (rxr->packets))
1417 newitr = max(newitr,
1418 (rxr->bytes / rxr->packets));
1419 newitr += 24; /* account for hardware frame, crc */
1421 /* set an upper boundary */
1422 newitr = min(newitr, 3000);
1424 /* Be nice to the mid range */
1425 if ((newitr > 300) && (newitr < 1200))
1426 newitr = (newitr / 3);
1428 newitr = (newitr / 2);
1430 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1431 newitr |= newitr << 16;
1433 newitr |= IXGBE_EITR_CNT_WDIS;
1435 /* save for next interrupt */
1436 que->eitr_setting = newitr;
1445 if (more_tx || more_rx)
1446 taskqueue_enqueue(que->tq, &que->que_task);
1447 else /* Reenable this interrupt */
1448 ixgbe_enable_queue(adapter, que->msix);
1454 ixgbe_msix_link(void *arg)
1456 struct adapter *adapter = arg;
1457 struct ixgbe_hw *hw = &adapter->hw;
1460 ++adapter->link_irq;
1462 /* First get the cause */
1463 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1464 /* Clear interrupt with write */
1465 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1467 /* Link status change */
1468 if (reg_eicr & IXGBE_EICR_LSC)
1469 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1471 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1473 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1474 /* This is probably overkill :) */
1475 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1477 /* Clear the interrupt */
1478 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1479 /* Turn off the interface */
1480 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1481 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1484 if (reg_eicr & IXGBE_EICR_ECC) {
1485 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1486 "Please Reboot!!\n");
1487 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1490 if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1491 /* Clear the interrupt */
1492 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1493 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1494 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1495 /* Clear the interrupt */
1496 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1497 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1501 /* Check for fan failure */
1502 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1503 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1504 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1505 "REPLACE IMMEDIATELY!!\n");
1506 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1509 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1513 /*********************************************************************
1515 * Media Ioctl callback
1517 * This routine is called whenever the user queries the status of
1518 * the interface using ifconfig.
1520 **********************************************************************/
1522 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1524 struct adapter *adapter = ifp->if_softc;
1526 INIT_DEBUGOUT("ixgbe_media_status: begin");
1527 IXGBE_CORE_LOCK(adapter);
1528 ixgbe_update_link_status(adapter);
1530 ifmr->ifm_status = IFM_AVALID;
1531 ifmr->ifm_active = IFM_ETHER;
1533 if (!adapter->link_active) {
1534 IXGBE_CORE_UNLOCK(adapter);
1538 ifmr->ifm_status |= IFM_ACTIVE;
1540 switch (adapter->link_speed) {
1541 case IXGBE_LINK_SPEED_1GB_FULL:
1542 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1544 case IXGBE_LINK_SPEED_10GB_FULL:
1545 ifmr->ifm_active |= adapter->optics | IFM_FDX;
1549 IXGBE_CORE_UNLOCK(adapter);
1554 /*********************************************************************
1556 * Media Ioctl callback
1558 * This routine is called when the user changes speed/duplex using
1559 * media/mediopt option with ifconfig.
1561 **********************************************************************/
1563 ixgbe_media_change(struct ifnet * ifp)
1565 struct adapter *adapter = ifp->if_softc;
1566 struct ifmedia *ifm = &adapter->media;
1568 INIT_DEBUGOUT("ixgbe_media_change: begin");
1570 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1573 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1575 adapter->hw.phy.autoneg_advertised =
1576 IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
1579 device_printf(adapter->dev, "Only auto media type\n");
1586 /*********************************************************************
1588 * This routine maps the mbufs to tx descriptors, allowing the
1589 * TX engine to transmit the packets.
1590 * - return 0 on success, positive on failure
1592 **********************************************************************/
1595 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1597 struct adapter *adapter = txr->adapter;
1598 u32 olinfo_status = 0, cmd_type_len;
1600 int i, j, error, nsegs;
1601 int first, last = 0;
1602 struct mbuf *m_head;
1603 bus_dma_segment_t segs[ixgbe_num_segs];
1605 struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
1606 union ixgbe_adv_tx_desc *txd = NULL;
1610 /* Basic descriptor defines */
1611 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1612 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1614 if (m_head->m_flags & M_VLANTAG)
1615 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1618 * Important to capture the first descriptor
1619 * used because it will contain the index of
1620 * the one we tell the hardware to report back
1622 first = txr->next_avail_desc;
1623 txbuf = &txr->tx_buffers[first];
1624 txbuf_mapped = txbuf;
1628 * Map the packet for DMA.
1630 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1631 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1633 if (error == EFBIG) {
1636 m = m_defrag(*m_headp, M_DONTWAIT);
1638 adapter->mbuf_defrag_failed++;
1646 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1647 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1649 if (error == ENOMEM) {
1650 adapter->no_tx_dma_setup++;
1652 } else if (error != 0) {
1653 adapter->no_tx_dma_setup++;
1658 } else if (error == ENOMEM) {
1659 adapter->no_tx_dma_setup++;
1661 } else if (error != 0) {
1662 adapter->no_tx_dma_setup++;
1668 /* Make certain there are enough descriptors */
1669 if (nsegs > txr->tx_avail - 2) {
1670 txr->no_desc_avail++;
1677 ** Set up the appropriate offload context
1678 ** this becomes the first descriptor of
1681 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1682 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1683 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1684 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1685 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1686 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1690 } else if (ixgbe_tx_ctx_setup(txr, m_head))
1691 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1693 #ifdef IXGBE_IEEE1588
1694 /* This is changing soon to an mtag detection */
1695 if (we detect this mbuf has a TSTAMP mtag)
1696 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
1700 /* Do the flow director magic */
1701 if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
1703 if (txr->atr_count >= atr_sample_rate) {
1704 ixgbe_atr(txr, m_head);
1709 /* Record payload length */
1711 olinfo_status |= m_head->m_pkthdr.len <<
1712 IXGBE_ADVTXD_PAYLEN_SHIFT;
1714 i = txr->next_avail_desc;
1715 for (j = 0; j < nsegs; j++) {
1719 txbuf = &txr->tx_buffers[i];
1720 txd = &txr->tx_base[i];
1721 seglen = segs[j].ds_len;
1722 segaddr = htole64(segs[j].ds_addr);
1724 txd->read.buffer_addr = segaddr;
1725 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1726 cmd_type_len |seglen);
1727 txd->read.olinfo_status = htole32(olinfo_status);
1728 last = i; /* descriptor that will get completion IRQ */
1730 if (++i == adapter->num_tx_desc)
1733 txbuf->m_head = NULL;
1734 txbuf->eop_index = -1;
1737 txd->read.cmd_type_len |=
1738 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1739 txr->tx_avail -= nsegs;
1740 txr->next_avail_desc = i;
1742 txbuf->m_head = m_head;
1744 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1746 /* Set the index of the descriptor that will be marked done */
1747 txbuf = &txr->tx_buffers[first];
1748 txbuf->eop_index = last;
1750 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1751 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1753 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1754 * hardware that this frame is available to transmit.
1756 ++txr->total_packets;
1757 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1762 bus_dmamap_unload(txr->txtag, txbuf->map);
1768 ixgbe_set_promisc(struct adapter *adapter)
1771 struct ifnet *ifp = adapter->ifp;
1773 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1774 reg_rctl &= (~IXGBE_FCTRL_UPE);
1775 reg_rctl &= (~IXGBE_FCTRL_MPE);
1776 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1778 if (ifp->if_flags & IFF_PROMISC) {
1779 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1780 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1781 } else if (ifp->if_flags & IFF_ALLMULTI) {
1782 reg_rctl |= IXGBE_FCTRL_MPE;
1783 reg_rctl &= ~IXGBE_FCTRL_UPE;
1784 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1790 /*********************************************************************
1793 * This routine is called whenever multicast address list is updated.
1795 **********************************************************************/
1796 #define IXGBE_RAR_ENTRIES 16
1799 ixgbe_set_multi(struct adapter *adapter)
1804 struct ifmultiaddr *ifma;
1806 struct ifnet *ifp = adapter->ifp;
1808 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1811 bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1812 MAX_NUM_MULTICAST_ADDRESSES);
1814 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1815 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1816 if (ifp->if_flags & IFF_PROMISC)
1817 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1818 else if (ifp->if_flags & IFF_ALLMULTI) {
1819 fctrl |= IXGBE_FCTRL_MPE;
1820 fctrl &= ~IXGBE_FCTRL_UPE;
1822 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1824 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1826 #if __FreeBSD_version < 800000
1829 if_maddr_rlock(ifp);
1831 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1832 if (ifma->ifma_addr->sa_family != AF_LINK)
1834 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1835 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1836 IXGBE_ETH_LENGTH_OF_ADDRESS);
1839 #if __FreeBSD_version < 800000
1840 IF_ADDR_UNLOCK(ifp);
1842 if_maddr_runlock(ifp);
1846 ixgbe_update_mc_addr_list(&adapter->hw,
1847 update_ptr, mcnt, ixgbe_mc_array_itr);
1853 * This is an iterator function now needed by the multicast
1854 * shared code. It simply feeds the shared code routine the
1855 * addresses in the array of ixgbe_set_multi() one by one.
1858 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1860 u8 *addr = *update_ptr;
1864 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1865 *update_ptr = newptr;
1870 /*********************************************************************
1873 * This routine checks for link status,updates statistics,
1874 * and runs the watchdog check.
1876 **********************************************************************/
1879 ixgbe_local_timer(void *arg)
1881 struct adapter *adapter = arg;
1882 device_t dev = adapter->dev;
1883 struct tx_ring *txr = adapter->tx_rings;
1885 mtx_assert(&adapter->core_mtx, MA_OWNED);
1887 /* Check for pluggable optics */
1888 if (adapter->sfp_probe)
1889 if (!ixgbe_sfp_probe(adapter))
1890 goto out; /* Nothing to do */
1892 ixgbe_update_link_status(adapter);
1893 ixgbe_update_stats_counters(adapter);
1896 * If the interface has been paused
1897 * then don't do the watchdog check
1899 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1903 ** Check status on the TX queues for a hang
1905 for (int i = 0; i < adapter->num_queues; i++, txr++)
1906 if (txr->queue_status == IXGBE_QUEUE_HUNG)
1910 ixgbe_rearm_queues(adapter, adapter->que_mask);
1911 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1915 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1916 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1917 IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
1918 IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
1919 device_printf(dev,"TX(%d) desc avail = %d,"
1920 "Next TX to Clean = %d\n",
1921 txr->me, txr->tx_avail, txr->next_to_clean);
1922 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1923 adapter->watchdog_events++;
1924 IXGBE_TX_UNLOCK(txr);
1925 ixgbe_init_locked(adapter);
1929 ** Note: this routine updates the OS on the link state
1930 ** the real check of the hardware only happens with
1931 ** a link interrupt.
1934 ixgbe_update_link_status(struct adapter *adapter)
1936 struct ifnet *ifp = adapter->ifp;
1937 struct tx_ring *txr = adapter->tx_rings;
1938 device_t dev = adapter->dev;
1941 if (adapter->link_up){
1942 if (adapter->link_active == FALSE) {
1944 device_printf(dev,"Link is up %d Gbps %s \n",
1945 ((adapter->link_speed == 128)? 10:1),
1947 adapter->link_active = TRUE;
1948 if_link_state_change(ifp, LINK_STATE_UP);
1950 } else { /* Link down */
1951 if (adapter->link_active == TRUE) {
1953 device_printf(dev,"Link is Down\n");
1954 if_link_state_change(ifp, LINK_STATE_DOWN);
1955 adapter->link_active = FALSE;
1956 for (int i = 0; i < adapter->num_queues;
1958 txr->queue_status = IXGBE_QUEUE_IDLE;
1966 /*********************************************************************
1968 * This routine disables all traffic on the adapter by issuing a
1969 * global reset on the MAC and deallocates TX/RX buffers.
1971 **********************************************************************/
1974 ixgbe_stop(void *arg)
1977 struct adapter *adapter = arg;
1978 struct ixgbe_hw *hw = &adapter->hw;
1981 mtx_assert(&adapter->core_mtx, MA_OWNED);
1983 INIT_DEBUGOUT("ixgbe_stop: begin\n");
1984 ixgbe_disable_intr(adapter);
1986 /* Tell the stack that the interface is no longer active */
1987 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1990 hw->adapter_stopped = FALSE;
1991 ixgbe_stop_adapter(hw);
1992 /* Turn off the laser */
1993 if (hw->phy.multispeed_fiber)
1994 ixgbe_disable_tx_laser(hw);
1995 callout_stop(&adapter->timer);
1997 /* reprogram the RAR[0] in case user changed it. */
1998 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2004 /*********************************************************************
2006 * Determine hardware revision.
2008 **********************************************************************/
2010 ixgbe_identify_hardware(struct adapter *adapter)
2012 device_t dev = adapter->dev;
2014 /* Save off the information about this board */
2015 adapter->hw.vendor_id = pci_get_vendor(dev);
2016 adapter->hw.device_id = pci_get_device(dev);
2017 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2018 adapter->hw.subsystem_vendor_id =
2019 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2020 adapter->hw.subsystem_device_id =
2021 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2026 /*********************************************************************
2028 * Setup the Legacy or MSI Interrupt handler
2030 **********************************************************************/
2032 ixgbe_allocate_legacy(struct adapter *adapter)
2034 device_t dev = adapter->dev;
2035 struct ix_queue *que = adapter->queues;
2039 if (adapter->msix == 1)
2042 /* We allocate a single interrupt resource */
2043 adapter->res = bus_alloc_resource_any(dev,
2044 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2045 if (adapter->res == NULL) {
2046 device_printf(dev, "Unable to allocate bus resource: "
2052 * Try allocating a fast interrupt and the associated deferred
2053 * processing contexts.
2055 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2056 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2057 taskqueue_thread_enqueue, &que->tq);
2058 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2059 device_get_nameunit(adapter->dev));
2061 /* Tasklets for Link, SFP and Multispeed Fiber */
2062 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2063 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2064 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2066 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2068 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2069 taskqueue_thread_enqueue, &adapter->tq);
2070 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2071 device_get_nameunit(adapter->dev));
2073 if ((error = bus_setup_intr(dev, adapter->res,
2074 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2075 que, &adapter->tag)) != 0) {
2076 device_printf(dev, "Failed to register fast interrupt "
2077 "handler: %d\n", error);
2078 taskqueue_free(que->tq);
2079 taskqueue_free(adapter->tq);
2084 /* For simplicity in the handlers */
2085 adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
2091 /*********************************************************************
2093 * Setup MSIX Interrupt resources and handlers
2095 **********************************************************************/
2097 ixgbe_allocate_msix(struct adapter *adapter)
2099 device_t dev = adapter->dev;
2100 struct ix_queue *que = adapter->queues;
2101 int error, rid, vector = 0;
2103 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
2105 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2106 RF_SHAREABLE | RF_ACTIVE);
2107 if (que->res == NULL) {
2108 device_printf(dev,"Unable to allocate"
2109 " bus resource: que interrupt [%d]\n", vector);
2112 /* Set the handler function */
2113 error = bus_setup_intr(dev, que->res,
2114 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2115 ixgbe_msix_que, que, &que->tag);
2118 device_printf(dev, "Failed to register QUE handler");
2121 #if __FreeBSD_version >= 800504
2122 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2125 adapter->que_mask |= (u64)(1 << que->msix);
2127 ** Bind the msix vector, and thus the
2128 ** ring to the corresponding cpu.
2130 if (adapter->num_queues > 1)
2131 bus_bind_intr(dev, que->res, i);
2133 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2134 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2135 taskqueue_thread_enqueue, &que->tq);
2136 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2137 device_get_nameunit(adapter->dev));
2142 adapter->res = bus_alloc_resource_any(dev,
2143 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2144 if (!adapter->res) {
2145 device_printf(dev,"Unable to allocate"
2146 " bus resource: Link interrupt [%d]\n", rid);
2149 /* Set the link handler function */
2150 error = bus_setup_intr(dev, adapter->res,
2151 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2152 ixgbe_msix_link, adapter, &adapter->tag);
2154 adapter->res = NULL;
2155 device_printf(dev, "Failed to register LINK handler");
2158 #if __FreeBSD_version >= 800504
2159 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2161 adapter->linkvec = vector;
2162 /* Tasklets for Link, SFP and Multispeed Fiber */
2163 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2164 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2165 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2167 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2169 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2170 taskqueue_thread_enqueue, &adapter->tq);
2171 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2172 device_get_nameunit(adapter->dev));
2178 * Setup Either MSI/X or MSI
2181 ixgbe_setup_msix(struct adapter *adapter)
2183 device_t dev = adapter->dev;
2184 int rid, want, queues, msgs;
2186 /* Override by tuneable */
2187 if (ixgbe_enable_msix == 0)
2190 /* First try MSI/X */
2191 rid = PCIR_BAR(MSIX_82598_BAR);
2192 adapter->msix_mem = bus_alloc_resource_any(dev,
2193 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2194 if (!adapter->msix_mem) {
2195 rid += 4; /* 82599 maps in higher BAR */
2196 adapter->msix_mem = bus_alloc_resource_any(dev,
2197 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2199 if (!adapter->msix_mem) {
2200 /* May not be enabled */
2201 device_printf(adapter->dev,
2202 "Unable to map MSIX table \n");
2206 msgs = pci_msix_count(dev);
2207 if (msgs == 0) { /* system has msix disabled */
2208 bus_release_resource(dev, SYS_RES_MEMORY,
2209 rid, adapter->msix_mem);
2210 adapter->msix_mem = NULL;
2214 /* Figure out a reasonable auto config value */
2215 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2217 if (ixgbe_num_queues != 0)
2218 queues = ixgbe_num_queues;
2219 /* Set max queues to 8 */
2220 else if (queues > 8)
2224 ** Want one vector (RX/TX pair) per queue
2225 ** plus an additional for Link.
2231 device_printf(adapter->dev,
2232 "MSIX Configuration Problem, "
2233 "%d vectors but %d queues wanted!\n",
2235 return (0); /* Will go to Legacy setup */
2237 if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
2238 device_printf(adapter->dev,
2239 "Using MSIX interrupts with %d vectors\n", msgs);
2240 adapter->num_queues = queues;
2244 msgs = pci_msi_count(dev);
2245 if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
2246 device_printf(adapter->dev,"Using MSI interrupt\n");
2252 ixgbe_allocate_pci_resources(struct adapter *adapter)
2255 device_t dev = adapter->dev;
2258 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2261 if (!(adapter->pci_mem)) {
2262 device_printf(dev,"Unable to allocate bus resource: memory\n");
2266 adapter->osdep.mem_bus_space_tag =
2267 rman_get_bustag(adapter->pci_mem);
2268 adapter->osdep.mem_bus_space_handle =
2269 rman_get_bushandle(adapter->pci_mem);
2270 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2272 /* Legacy defaults */
2273 adapter->num_queues = 1;
2274 adapter->hw.back = &adapter->osdep;
2277 ** Now setup MSI or MSI/X, should
2278 ** return us the number of supported
2279 ** vectors. (Will be 1 for MSI)
2281 adapter->msix = ixgbe_setup_msix(adapter);
2286 ixgbe_free_pci_resources(struct adapter * adapter)
2288 struct ix_queue *que = adapter->queues;
2289 device_t dev = adapter->dev;
2292 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2293 memrid = PCIR_BAR(MSIX_82598_BAR);
2295 memrid = PCIR_BAR(MSIX_82599_BAR);
2298 ** There is a slight possibility of a failure mode
2299 ** in attach that will result in entering this function
2300 ** before interrupt resources have been initialized, and
2301 ** in that case we do not want to execute the loops below
2302 ** We can detect this reliably by the state of the adapter
2305 if (adapter->res == NULL)
2309 ** Release all msix queue resources:
2311 for (int i = 0; i < adapter->num_queues; i++, que++) {
2312 rid = que->msix + 1;
2313 if (que->tag != NULL) {
2314 bus_teardown_intr(dev, que->res, que->tag);
2317 if (que->res != NULL)
2318 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2322 /* Clean the Legacy or Link interrupt last */
2323 if (adapter->linkvec) /* we are doing MSIX */
2324 rid = adapter->linkvec + 1;
2326 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2328 if (adapter->tag != NULL) {
2329 bus_teardown_intr(dev, adapter->res, adapter->tag);
2330 adapter->tag = NULL;
2332 if (adapter->res != NULL)
2333 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2337 pci_release_msi(dev);
2339 if (adapter->msix_mem != NULL)
2340 bus_release_resource(dev, SYS_RES_MEMORY,
2341 memrid, adapter->msix_mem);
2343 if (adapter->pci_mem != NULL)
2344 bus_release_resource(dev, SYS_RES_MEMORY,
2345 PCIR_BAR(0), adapter->pci_mem);
2350 /*********************************************************************
2352 * Setup networking device structure and register an interface.
2354 **********************************************************************/
2356 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2358 struct ixgbe_hw *hw = &adapter->hw;
2361 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2363 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2365 device_printf(dev, "can not allocate ifnet structure\n");
2368 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2369 ifp->if_mtu = ETHERMTU;
2370 ifp->if_baudrate = 1000000000;
2371 ifp->if_init = ixgbe_init;
2372 ifp->if_softc = adapter;
2373 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2374 ifp->if_ioctl = ixgbe_ioctl;
2375 ifp->if_start = ixgbe_start;
2376 #if __FreeBSD_version >= 800000
2377 ifp->if_transmit = ixgbe_mq_start;
2378 ifp->if_qflush = ixgbe_qflush;
2380 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
2382 ether_ifattach(ifp, adapter->hw.mac.addr);
2384 adapter->max_frame_size =
2385 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2388 * Tell the upper layer(s) we support long frames.
2390 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2392 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
2393 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2394 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2395 ifp->if_capenable = ifp->if_capabilities;
2397 /* Don't enable LRO by default */
2398 ifp->if_capabilities |= IFCAP_LRO;
2401 ** Dont turn this on by default, if vlans are
2402 ** created on another pseudo device (eg. lagg)
2403 ** then vlan events are not passed thru, breaking
2404 ** operation, but with HW FILTER off it works. If
2405 ** using vlans directly on the em driver you can
2406 ** enable this and get full hardware tag filtering.
2408 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2411 * Specify the media types supported by this adapter and register
2412 * callbacks to update media and link information
2414 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2415 ixgbe_media_status);
2416 ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics |
2418 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2419 ifmedia_add(&adapter->media,
2420 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2421 ifmedia_add(&adapter->media,
2422 IFM_ETHER | IFM_1000_T, 0, NULL);
2424 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2425 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2431 ixgbe_config_link(struct adapter *adapter)
2433 struct ixgbe_hw *hw = &adapter->hw;
2434 u32 autoneg, err = 0;
2435 bool sfp, negotiate;
2437 sfp = ixgbe_is_sfp(hw);
2440 if (hw->phy.multispeed_fiber) {
2441 hw->mac.ops.setup_sfp(hw);
2442 ixgbe_enable_tx_laser(hw);
2443 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2445 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2447 if (hw->mac.ops.check_link)
2448 err = ixgbe_check_link(hw, &autoneg,
2449 &adapter->link_up, FALSE);
2452 autoneg = hw->phy.autoneg_advertised;
2453 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2454 err = hw->mac.ops.get_link_capabilities(hw,
2455 &autoneg, &negotiate);
2458 if (hw->mac.ops.setup_link)
2459 err = hw->mac.ops.setup_link(hw, autoneg,
2460 negotiate, adapter->link_up);
2466 /********************************************************************
2467 * Manage DMA'able memory.
2468 *******************************************************************/
2470 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2474 *(bus_addr_t *) arg = segs->ds_addr;
2479 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2480 struct ixgbe_dma_alloc *dma, int mapflags)
2482 device_t dev = adapter->dev;
2485 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2486 DBA_ALIGN, 0, /* alignment, bounds */
2487 BUS_SPACE_MAXADDR, /* lowaddr */
2488 BUS_SPACE_MAXADDR, /* highaddr */
2489 NULL, NULL, /* filter, filterarg */
2492 size, /* maxsegsize */
2493 BUS_DMA_ALLOCNOW, /* flags */
2494 NULL, /* lockfunc */
2495 NULL, /* lockfuncarg */
2498 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2502 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2503 BUS_DMA_NOWAIT, &dma->dma_map);
2505 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2509 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2513 mapflags | BUS_DMA_NOWAIT);
2515 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2519 dma->dma_size = size;
2522 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2524 bus_dma_tag_destroy(dma->dma_tag);
2526 dma->dma_map = NULL;
2527 dma->dma_tag = NULL;
2532 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2534 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2535 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2536 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2537 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2538 bus_dma_tag_destroy(dma->dma_tag);
2542 /*********************************************************************
2544 * Allocate memory for the transmit and receive rings, and then
2545 * the descriptors associated with each, called only once at attach.
2547 **********************************************************************/
2549 ixgbe_allocate_queues(struct adapter *adapter)
2551 device_t dev = adapter->dev;
2552 struct ix_queue *que;
2553 struct tx_ring *txr;
2554 struct rx_ring *rxr;
2555 int rsize, tsize, error = IXGBE_SUCCESS;
2556 int txconf = 0, rxconf = 0;
2558 /* First allocate the top level queue structs */
2559 if (!(adapter->queues =
2560 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2561 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2562 device_printf(dev, "Unable to allocate queue memory\n");
2567 /* First allocate the TX ring struct memory */
2568 if (!(adapter->tx_rings =
2569 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2570 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2571 device_printf(dev, "Unable to allocate TX ring memory\n");
2576 /* Next allocate the RX */
2577 if (!(adapter->rx_rings =
2578 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2579 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2580 device_printf(dev, "Unable to allocate RX ring memory\n");
2585 /* For the ring itself */
2586 tsize = roundup2(adapter->num_tx_desc *
2587 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2590 * Now set up the TX queues, txconf is needed to handle the
2591 * possibility that things fail midcourse and we need to
2592 * undo memory gracefully
2594 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2595 /* Set up some basics */
2596 txr = &adapter->tx_rings[i];
2597 txr->adapter = adapter;
2600 /* Initialize the TX side lock */
2601 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2602 device_get_nameunit(dev), txr->me);
2603 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2605 if (ixgbe_dma_malloc(adapter, tsize,
2606 &txr->txdma, BUS_DMA_NOWAIT)) {
2608 "Unable to allocate TX Descriptor memory\n");
2612 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2613 bzero((void *)txr->tx_base, tsize);
2615 /* Now allocate transmit buffers for the ring */
2616 if (ixgbe_allocate_transmit_buffers(txr)) {
2618 "Critical Failure setting up transmit buffers\n");
2622 #if __FreeBSD_version >= 800000
2623 /* Allocate a buf ring */
2624 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2625 M_WAITOK, &txr->tx_mtx);
2626 if (txr->br == NULL) {
2628 "Critical Failure setting up buf ring\n");
2636 * Next the RX queues...
2638 rsize = roundup2(adapter->num_rx_desc *
2639 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2640 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2641 rxr = &adapter->rx_rings[i];
2642 /* Set up some basics */
2643 rxr->adapter = adapter;
2646 /* Initialize the RX side lock */
2647 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2648 device_get_nameunit(dev), rxr->me);
2649 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2651 if (ixgbe_dma_malloc(adapter, rsize,
2652 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2654 "Unable to allocate RxDescriptor memory\n");
2658 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2659 bzero((void *)rxr->rx_base, rsize);
2661 /* Allocate receive buffers for the ring*/
2662 if (ixgbe_allocate_receive_buffers(rxr)) {
2664 "Critical Failure setting up receive buffers\n");
2671 ** Finally set up the queue holding structs
2673 for (int i = 0; i < adapter->num_queues; i++) {
2674 que = &adapter->queues[i];
2675 que->adapter = adapter;
2676 que->txr = &adapter->tx_rings[i];
2677 que->rxr = &adapter->rx_rings[i];
2683 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2684 ixgbe_dma_free(adapter, &rxr->rxdma);
2686 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2687 ixgbe_dma_free(adapter, &txr->txdma);
2688 free(adapter->rx_rings, M_DEVBUF);
2690 free(adapter->tx_rings, M_DEVBUF);
2692 free(adapter->queues, M_DEVBUF);
2697 /*********************************************************************
2699 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2700 * the information needed to transmit a packet on the wire. This is
2701 * called only once at attach, setup is done every reset.
2703 **********************************************************************/
2705 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2707 struct adapter *adapter = txr->adapter;
2708 device_t dev = adapter->dev;
2709 struct ixgbe_tx_buf *txbuf;
2713 * Setup DMA descriptor areas.
2715 if ((error = bus_dma_tag_create(NULL, /* parent */
2716 1, 0, /* alignment, bounds */
2717 BUS_SPACE_MAXADDR, /* lowaddr */
2718 BUS_SPACE_MAXADDR, /* highaddr */
2719 NULL, NULL, /* filter, filterarg */
2720 IXGBE_TSO_SIZE, /* maxsize */
2721 ixgbe_num_segs, /* nsegments */
2722 PAGE_SIZE, /* maxsegsize */
2724 NULL, /* lockfunc */
2725 NULL, /* lockfuncarg */
2727 device_printf(dev,"Unable to allocate TX DMA tag\n");
2731 if (!(txr->tx_buffers =
2732 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2733 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2734 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2739 /* Create the descriptor buffer dma maps */
2740 txbuf = txr->tx_buffers;
2741 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2742 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2744 device_printf(dev, "Unable to create TX DMA map\n");
2751 /* We free all, it handles case where we are in the middle */
2752 ixgbe_free_transmit_structures(adapter);
2756 /*********************************************************************
2758 * Initialize a transmit ring.
2760 **********************************************************************/
2762 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2764 struct adapter *adapter = txr->adapter;
2765 struct ixgbe_tx_buf *txbuf;
2768 /* Clear the old ring contents */
2770 bzero((void *)txr->tx_base,
2771 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2773 txr->next_avail_desc = 0;
2774 txr->next_to_clean = 0;
2776 /* Free any existing tx buffers. */
2777 txbuf = txr->tx_buffers;
2778 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2779 if (txbuf->m_head != NULL) {
2780 bus_dmamap_sync(txr->txtag, txbuf->map,
2781 BUS_DMASYNC_POSTWRITE);
2782 bus_dmamap_unload(txr->txtag, txbuf->map);
2783 m_freem(txbuf->m_head);
2784 txbuf->m_head = NULL;
2786 /* Clear the EOP index */
2787 txbuf->eop_index = -1;
2791 /* Set the rate at which we sample packets */
2792 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
2793 txr->atr_sample = atr_sample_rate;
2796 /* Set number of descriptors available */
2797 txr->tx_avail = adapter->num_tx_desc;
2799 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2800 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2801 IXGBE_TX_UNLOCK(txr);
2804 /*********************************************************************
2806 * Initialize all transmit rings.
2808 **********************************************************************/
2810 ixgbe_setup_transmit_structures(struct adapter *adapter)
2812 struct tx_ring *txr = adapter->tx_rings;
2814 for (int i = 0; i < adapter->num_queues; i++, txr++)
2815 ixgbe_setup_transmit_ring(txr);
2820 /*********************************************************************
2822 * Enable transmit unit.
2824 **********************************************************************/
2826 ixgbe_initialize_transmit_units(struct adapter *adapter)
2828 struct tx_ring *txr = adapter->tx_rings;
2829 struct ixgbe_hw *hw = &adapter->hw;
2831 /* Setup the Base and Length of the Tx Descriptor Ring */
2833 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2834 u64 tdba = txr->txdma.dma_paddr;
2837 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2838 (tdba & 0x00000000ffffffffULL));
2839 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2840 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2841 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2843 /* Setup the HW Tx Head and Tail descriptor pointers */
2844 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2845 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2847 /* Setup Transmit Descriptor Cmd Settings */
2848 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2849 txr->queue_status = IXGBE_QUEUE_IDLE;
2851 /* Disable Head Writeback */
2852 switch (hw->mac.type) {
2853 case ixgbe_mac_82598EB:
2854 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2856 case ixgbe_mac_82599EB:
2858 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2861 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2862 switch (hw->mac.type) {
2863 case ixgbe_mac_82598EB:
2864 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2866 case ixgbe_mac_82599EB:
2868 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2874 if (hw->mac.type == ixgbe_mac_82599EB) {
2875 u32 dmatxctl, rttdcs;
2876 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2877 dmatxctl |= IXGBE_DMATXCTL_TE;
2878 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2879 /* Disable arbiter to set MTQC */
2880 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2881 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2882 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2883 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2884 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2885 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2891 /*********************************************************************
2893 * Free all transmit rings.
2895 **********************************************************************/
2897 ixgbe_free_transmit_structures(struct adapter *adapter)
2899 struct tx_ring *txr = adapter->tx_rings;
2901 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2903 ixgbe_free_transmit_buffers(txr);
2904 ixgbe_dma_free(adapter, &txr->txdma);
2905 IXGBE_TX_UNLOCK(txr);
2906 IXGBE_TX_LOCK_DESTROY(txr);
2908 free(adapter->tx_rings, M_DEVBUF);
2911 /*********************************************************************
2913 * Free transmit ring related data structures.
2915 **********************************************************************/
2917 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2919 struct adapter *adapter = txr->adapter;
2920 struct ixgbe_tx_buf *tx_buffer;
2923 INIT_DEBUGOUT("free_transmit_ring: begin");
2925 if (txr->tx_buffers == NULL)
2928 tx_buffer = txr->tx_buffers;
2929 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2930 if (tx_buffer->m_head != NULL) {
2931 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2932 BUS_DMASYNC_POSTWRITE);
2933 bus_dmamap_unload(txr->txtag,
2935 m_freem(tx_buffer->m_head);
2936 tx_buffer->m_head = NULL;
2937 if (tx_buffer->map != NULL) {
2938 bus_dmamap_destroy(txr->txtag,
2940 tx_buffer->map = NULL;
2942 } else if (tx_buffer->map != NULL) {
2943 bus_dmamap_unload(txr->txtag,
2945 bus_dmamap_destroy(txr->txtag,
2947 tx_buffer->map = NULL;
2950 #if __FreeBSD_version >= 800000
2951 if (txr->br != NULL)
2952 buf_ring_free(txr->br, M_DEVBUF);
2954 if (txr->tx_buffers != NULL) {
2955 free(txr->tx_buffers, M_DEVBUF);
2956 txr->tx_buffers = NULL;
2958 if (txr->txtag != NULL) {
2959 bus_dma_tag_destroy(txr->txtag);
2965 /*********************************************************************
2967 * Advanced Context Descriptor setup for VLAN or CSUM
2969 **********************************************************************/
2972 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2974 struct adapter *adapter = txr->adapter;
2975 struct ixgbe_adv_tx_context_desc *TXD;
2976 struct ixgbe_tx_buf *tx_buffer;
2977 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2978 struct ether_vlan_header *eh;
2980 struct ip6_hdr *ip6;
2981 int ehdrlen, ip_hlen = 0;
2984 bool offload = TRUE;
2985 int ctxd = txr->next_avail_desc;
2989 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2992 tx_buffer = &txr->tx_buffers[ctxd];
2993 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2996 ** In advanced descriptors the vlan tag must
2997 ** be placed into the descriptor itself.
2999 if (mp->m_flags & M_VLANTAG) {
3000 vtag = htole16(mp->m_pkthdr.ether_vtag);
3001 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3002 } else if (offload == FALSE)
3006 * Determine where frame payload starts.
3007 * Jump over vlan headers if already present,
3008 * helpful for QinQ too.
3010 eh = mtod(mp, struct ether_vlan_header *);
3011 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3012 etype = ntohs(eh->evl_proto);
3013 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3015 etype = ntohs(eh->evl_encap_proto);
3016 ehdrlen = ETHER_HDR_LEN;
3019 /* Set the ether header length */
3020 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3024 ip = (struct ip *)(mp->m_data + ehdrlen);
3025 ip_hlen = ip->ip_hl << 2;
3027 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3029 case ETHERTYPE_IPV6:
3030 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3031 ip_hlen = sizeof(struct ip6_hdr);
3032 ipproto = ip6->ip6_nxt;
3033 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
3040 vlan_macip_lens |= ip_hlen;
3041 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3045 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
3046 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3050 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
3051 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
3054 #if __FreeBSD_version >= 800000
3056 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
3057 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3065 /* Now copy bits into descriptor */
3066 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3067 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3068 TXD->seqnum_seed = htole32(0);
3069 TXD->mss_l4len_idx = htole32(0);
3071 tx_buffer->m_head = NULL;
3072 tx_buffer->eop_index = -1;
3074 /* We've consumed the first desc, adjust counters */
3075 if (++ctxd == adapter->num_tx_desc)
3077 txr->next_avail_desc = ctxd;
3083 /**********************************************************************
3085 * Setup work for hardware segmentation offload (TSO) on
3086 * adapters using advanced tx descriptors
3088 **********************************************************************/
3090 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
3092 struct adapter *adapter = txr->adapter;
3093 struct ixgbe_adv_tx_context_desc *TXD;
3094 struct ixgbe_tx_buf *tx_buffer;
3095 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3096 u32 mss_l4len_idx = 0;
3098 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
3099 struct ether_vlan_header *eh;
3105 * Determine where frame payload starts.
3106 * Jump over vlan headers if already present
3108 eh = mtod(mp, struct ether_vlan_header *);
3109 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
3110 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3112 ehdrlen = ETHER_HDR_LEN;
3114 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3115 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3118 ctxd = txr->next_avail_desc;
3119 tx_buffer = &txr->tx_buffers[ctxd];
3120 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3122 ip = (struct ip *)(mp->m_data + ehdrlen);
3123 if (ip->ip_p != IPPROTO_TCP)
3124 return FALSE; /* 0 */
3126 ip_hlen = ip->ip_hl << 2;
3127 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3128 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3129 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3130 tcp_hlen = th->th_off << 2;
3131 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
3133 /* This is used in the transmit desc in encap */
3134 *paylen = mp->m_pkthdr.len - hdrlen;
3136 /* VLAN MACLEN IPLEN */
3137 if (mp->m_flags & M_VLANTAG) {
3138 vtag = htole16(mp->m_pkthdr.ether_vtag);
3139 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3142 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3143 vlan_macip_lens |= ip_hlen;
3144 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3146 /* ADV DTYPE TUCMD */
3147 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3148 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3149 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3150 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3154 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
3155 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
3156 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3158 TXD->seqnum_seed = htole32(0);
3159 tx_buffer->m_head = NULL;
3160 tx_buffer->eop_index = -1;
3162 if (++ctxd == adapter->num_tx_desc)
3166 txr->next_avail_desc = ctxd;
3172 ** This routine parses packet headers so that Flow
3173 ** Director can make a hashed filter table entry
3174 ** allowing traffic flows to be identified and kept
3175 ** on the same cpu. This would be a performance
3176 ** hit, but we only do it at IXGBE_FDIR_RATE of
3180 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
3182 struct adapter *adapter = txr->adapter;
3183 struct ix_queue *que;
3184 union ixgbe_atr_input atr_input;
3188 struct ether_vlan_header *eh;
3189 int ehdrlen, ip_hlen;
3190 u16 etype, vlan_id, src_port, dst_port, flex_bytes;
3191 u32 src_ipv4_addr, dst_ipv4_addr;
3192 u8 l4type = 0, ipproto = 0;
3194 eh = mtod(mp, struct ether_vlan_header *);
3195 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
3196 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3198 ehdrlen = ETHER_HDR_LEN;
3199 etype = ntohs(eh->evl_proto);
3201 /* Only handling IPv4 */
3202 if (etype != ETHERTYPE_IP)
3205 ip = (struct ip *)(mp->m_data + ehdrlen);
3207 ip_hlen = ip->ip_hl << 2;
3208 src_port = dst_port = 0;
3210 /* check if we're UDP or TCP */
3213 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3214 src_port = th->th_sport;
3215 dst_port = th->th_dport;
3216 l4type |= IXGBE_ATR_L4TYPE_TCP;
3219 uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
3220 src_port = uh->uh_sport;
3221 dst_port = uh->uh_dport;
3222 l4type |= IXGBE_ATR_L4TYPE_UDP;
3228 memset(&atr_input, 0, sizeof(union ixgbe_atr_input));
3230 vlan_id = htole16(mp->m_pkthdr.ether_vtag);
3231 src_ipv4_addr = ip->ip_src.s_addr;
3232 dst_ipv4_addr = ip->ip_dst.s_addr;
3234 que = &adapter->queues[txr->me];
3236 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
3237 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
3238 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
3239 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
3240 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
3241 /* src and dst are inverted, think how the receiver sees them */
3242 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
3243 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
3245 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
3246 ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
3247 &atr_input, que->msix);
3251 /**********************************************************************
3253 * Examine each tx_buffer in the used queue. If the hardware is done
3254 * processing the packet then free associated resources. The
3255 * tx_buffer is put back on the free queue.
3257 **********************************************************************/
3259 ixgbe_txeof(struct tx_ring *txr)
3261 struct adapter *adapter = txr->adapter;
3262 struct ifnet *ifp = adapter->ifp;
3263 u32 first, last, done, processed;
3264 struct ixgbe_tx_buf *tx_buffer;
3265 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
3267 mtx_assert(&txr->tx_mtx, MA_OWNED);
3269 if (txr->tx_avail == adapter->num_tx_desc) {
3270 txr->queue_status = IXGBE_QUEUE_IDLE;
3275 first = txr->next_to_clean;
3276 tx_buffer = &txr->tx_buffers[first];
3277 /* For cleanup we just use legacy struct */
3278 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3279 last = tx_buffer->eop_index;
3282 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3285 ** Get the index of the first descriptor
3286 ** BEYOND the EOP and call that 'done'.
3287 ** I do this so the comparison in the
3288 ** inner while loop below can be simple
3290 if (++last == adapter->num_tx_desc) last = 0;
3293 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3294 BUS_DMASYNC_POSTREAD);
3296 ** Only the EOP descriptor of a packet now has the DD
3297 ** bit set, this is what we look for...
3299 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
3300 /* We clean the range of the packet */
3301 while (first != done) {
3302 tx_desc->upper.data = 0;
3303 tx_desc->lower.data = 0;
3304 tx_desc->buffer_addr = 0;
3308 if (tx_buffer->m_head) {
3310 tx_buffer->m_head->m_pkthdr.len;
3311 bus_dmamap_sync(txr->txtag,
3313 BUS_DMASYNC_POSTWRITE);
3314 bus_dmamap_unload(txr->txtag,
3316 m_freem(tx_buffer->m_head);
3317 tx_buffer->m_head = NULL;
3318 tx_buffer->map = NULL;
3320 tx_buffer->eop_index = -1;
3321 txr->watchdog_time = ticks;
3323 if (++first == adapter->num_tx_desc)
3326 tx_buffer = &txr->tx_buffers[first];
3328 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3332 /* See if there is more work now */
3333 last = tx_buffer->eop_index;
3336 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3337 /* Get next done point */
3338 if (++last == adapter->num_tx_desc) last = 0;
3343 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3344 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3346 txr->next_to_clean = first;
3349 ** Watchdog calculation, we know there's
3350 ** work outstanding or the first return
3351 ** would have been taken, so none processed
3352 ** for too long indicates a hang.
3354 if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
3355 txr->queue_status = IXGBE_QUEUE_HUNG;
3358 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
3359 * it is OK to send packets. If there are no pending descriptors,
3360 * clear the timeout. Otherwise, if some descriptors have been freed,
3361 * restart the timeout.
3363 if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
3364 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3365 if (txr->tx_avail == adapter->num_tx_desc) {
3366 txr->queue_status = IXGBE_QUEUE_IDLE;
3374 /*********************************************************************
3376 * Refresh mbuf buffers for RX descriptor rings
3377 * - now keeps its own state so discards due to resource
3378 * exhaustion are unnecessary, if an mbuf cannot be obtained
3379 * it just returns, keeping its placeholder, thus it can simply
3380 * be recalled to try again.
3382 **********************************************************************/
3384 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
3386 struct adapter *adapter = rxr->adapter;
3387 bus_dma_segment_t hseg[1];
3388 bus_dma_segment_t pseg[1];
3389 struct ixgbe_rx_buf *rxbuf;
3390 struct mbuf *mh, *mp;
3391 int i, nsegs, error, cleaned;
3393 i = rxr->next_to_refresh;
3394 cleaned = -1; /* Signify no completions */
3395 while (i != limit) {
3396 rxbuf = &rxr->rx_buffers[i];
3397 if (rxr->hdr_split == FALSE)
3400 if (rxbuf->m_head == NULL) {
3401 mh = m_gethdr(M_DONTWAIT, MT_DATA);
3407 mh->m_pkthdr.len = mh->m_len = MHLEN;
3409 mh->m_flags |= M_PKTHDR;
3410 /* Get the memory mapping */
3411 error = bus_dmamap_load_mbuf_sg(rxr->htag,
3412 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
3414 printf("Refresh mbufs: hdr dmamap load"
3415 " failure - %d\n", error);
3417 rxbuf->m_head = NULL;
3421 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3422 BUS_DMASYNC_PREREAD);
3423 rxr->rx_base[i].read.hdr_addr = htole64(hseg[0].ds_addr);
3426 if (rxbuf->m_pack == NULL) {
3427 mp = m_getjcl(M_DONTWAIT, MT_DATA,
3428 M_PKTHDR, adapter->rx_mbuf_sz);
3434 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3435 /* Get the memory mapping */
3436 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
3437 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
3439 printf("Refresh mbufs: payload dmamap load"
3440 " failure - %d\n", error);
3442 rxbuf->m_pack = NULL;
3446 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3447 BUS_DMASYNC_PREREAD);
3448 rxr->rx_base[i].read.pkt_addr =
3449 htole64(pseg[0].ds_addr);
3452 /* Calculate next index */
3453 if (++i == adapter->num_rx_desc)
3455 /* This is the work marker for refresh */
3456 rxr->next_to_refresh = i;
3459 if (cleaned != -1) /* If we refreshed some, bump tail */
3460 IXGBE_WRITE_REG(&adapter->hw,
3461 IXGBE_RDT(rxr->me), cleaned);
3465 /*********************************************************************
3467 * Allocate memory for rx_buffer structures. Since we use one
3468 * rx_buffer per received packet, the maximum number of rx_buffer's
3469 * that we'll need is equal to the number of receive descriptors
3470 * that we've allocated.
3472 **********************************************************************/
3474 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
3476 struct adapter *adapter = rxr->adapter;
3477 device_t dev = adapter->dev;
3478 struct ixgbe_rx_buf *rxbuf;
3479 int i, bsize, error;
3481 bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
3482 if (!(rxr->rx_buffers =
3483 (struct ixgbe_rx_buf *) malloc(bsize,
3484 M_DEVBUF, M_NOWAIT | M_ZERO))) {
3485 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3490 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3491 1, 0, /* alignment, bounds */
3492 BUS_SPACE_MAXADDR, /* lowaddr */
3493 BUS_SPACE_MAXADDR, /* highaddr */
3494 NULL, NULL, /* filter, filterarg */
3495 MSIZE, /* maxsize */
3497 MSIZE, /* maxsegsize */
3499 NULL, /* lockfunc */
3500 NULL, /* lockfuncarg */
3502 device_printf(dev, "Unable to create RX DMA tag\n");
3506 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3507 1, 0, /* alignment, bounds */
3508 BUS_SPACE_MAXADDR, /* lowaddr */
3509 BUS_SPACE_MAXADDR, /* highaddr */
3510 NULL, NULL, /* filter, filterarg */
3511 MJUM9BYTES, /* maxsize */
3513 MJUM9BYTES, /* maxsegsize */
3515 NULL, /* lockfunc */
3516 NULL, /* lockfuncarg */
3518 device_printf(dev, "Unable to create RX DMA tag\n");
3522 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3523 rxbuf = &rxr->rx_buffers[i];
3524 error = bus_dmamap_create(rxr->htag,
3525 BUS_DMA_NOWAIT, &rxbuf->hmap);
3527 device_printf(dev, "Unable to create RX head map\n");
3530 error = bus_dmamap_create(rxr->ptag,
3531 BUS_DMA_NOWAIT, &rxbuf->pmap);
3533 device_printf(dev, "Unable to create RX pkt map\n");
3541 /* Frees all, but can handle partial completion */
3542 ixgbe_free_receive_structures(adapter);
3547 ** Used to detect a descriptor that has
3548 ** been merged by Hardware RSC.
3551 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
3553 return (le32toh(rx->wb.lower.lo_dword.data) &
3554 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
3557 /*********************************************************************
3559 * Initialize Hardware RSC (LRO) feature on 82599
3560 * for an RX ring, this is toggled by the LRO capability
3561 * even though it is transparent to the stack.
3563 **********************************************************************/
3565 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
3567 struct adapter *adapter = rxr->adapter;
3568 struct ixgbe_hw *hw = &adapter->hw;
3569 u32 rscctrl, rdrxctl;
3571 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3572 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3573 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3574 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
3575 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3577 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
3578 rscctrl |= IXGBE_RSCCTL_RSCEN;
3580 ** Limit the total number of descriptors that
3581 ** can be combined, so it does not exceed 64K
3583 if (adapter->rx_mbuf_sz == MCLBYTES)
3584 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3585 else /* using 4K clusters */
3586 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
3587 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
3589 /* Enable TCP header recognition */
3590 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
3591 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
3592 IXGBE_PSRTYPE_TCPHDR));
3594 /* Disable RSC for ACK packets */
3595 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3596 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3603 ixgbe_free_receive_ring(struct rx_ring *rxr)
3605 struct adapter *adapter;
3606 struct ixgbe_rx_buf *rxbuf;
3609 adapter = rxr->adapter;
3610 for (i = 0; i < adapter->num_rx_desc; i++) {
3611 rxbuf = &rxr->rx_buffers[i];
3612 if (rxbuf->m_head != NULL) {
3613 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3614 BUS_DMASYNC_POSTREAD);
3615 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3616 rxbuf->m_head->m_flags |= M_PKTHDR;
3617 m_freem(rxbuf->m_head);
3619 if (rxbuf->m_pack != NULL) {
3620 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3621 BUS_DMASYNC_POSTREAD);
3622 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3623 rxbuf->m_pack->m_flags |= M_PKTHDR;
3624 m_freem(rxbuf->m_pack);
3626 rxbuf->m_head = NULL;
3627 rxbuf->m_pack = NULL;
3632 /*********************************************************************
3634 * Initialize a receive ring and its buffers.
3636 **********************************************************************/
3638 ixgbe_setup_receive_ring(struct rx_ring *rxr)
3640 struct adapter *adapter;
3643 struct ixgbe_rx_buf *rxbuf;
3644 bus_dma_segment_t pseg[1], hseg[1];
3645 struct lro_ctrl *lro = &rxr->lro;
3646 int rsize, nsegs, error = 0;
3648 adapter = rxr->adapter;
3652 /* Clear the ring contents */
3654 rsize = roundup2(adapter->num_rx_desc *
3655 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3656 bzero((void *)rxr->rx_base, rsize);
3658 /* Free current RX buffer structs and their mbufs */
3659 ixgbe_free_receive_ring(rxr);
3661 /* Configure header split? */
3662 if (ixgbe_header_split)
3663 rxr->hdr_split = TRUE;
3665 /* Now replenish the mbufs */
3666 for (int j = 0; j != adapter->num_rx_desc; ++j) {
3667 struct mbuf *mh, *mp;
3669 rxbuf = &rxr->rx_buffers[j];
3671 ** Don't allocate mbufs if not
3672 ** doing header split, its wasteful
3674 if (rxr->hdr_split == FALSE)
3677 /* First the header */
3678 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
3679 if (rxbuf->m_head == NULL) {
3683 m_adj(rxbuf->m_head, ETHER_ALIGN);
3685 mh->m_len = mh->m_pkthdr.len = MHLEN;
3686 mh->m_flags |= M_PKTHDR;
3687 /* Get the memory mapping */
3688 error = bus_dmamap_load_mbuf_sg(rxr->htag,
3689 rxbuf->hmap, rxbuf->m_head, hseg,
3690 &nsegs, BUS_DMA_NOWAIT);
3691 if (error != 0) /* Nothing elegant to do here */
3693 bus_dmamap_sync(rxr->htag,
3694 rxbuf->hmap, BUS_DMASYNC_PREREAD);
3695 /* Update descriptor */
3696 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
3699 /* Now the payload cluster */
3700 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
3701 M_PKTHDR, adapter->rx_mbuf_sz);
3702 if (rxbuf->m_pack == NULL) {
3707 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3708 /* Get the memory mapping */
3709 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
3710 rxbuf->pmap, mp, pseg,
3711 &nsegs, BUS_DMA_NOWAIT);
3714 bus_dmamap_sync(rxr->ptag,
3715 rxbuf->pmap, BUS_DMASYNC_PREREAD);
3716 /* Update descriptor */
3717 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
3721 /* Setup our descriptor indices */
3722 rxr->next_to_check = 0;
3723 rxr->next_to_refresh = 0;
3724 rxr->lro_enabled = FALSE;
3725 rxr->rx_split_packets = 0;
3728 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3729 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3732 ** Now set up the LRO interface:
3733 ** 82598 uses software LRO, the
3734 ** 82599 uses a hardware assist.
3736 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
3737 (ifp->if_capenable & IFCAP_RXCSUM) &&
3738 (ifp->if_capenable & IFCAP_LRO))
3739 ixgbe_setup_hw_rsc(rxr);
3740 else if (ifp->if_capenable & IFCAP_LRO) {
3741 int err = tcp_lro_init(lro);
3743 device_printf(dev, "LRO Initialization failed!\n");
3746 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3747 rxr->lro_enabled = TRUE;
3748 lro->ifp = adapter->ifp;
3751 IXGBE_RX_UNLOCK(rxr);
3755 ixgbe_free_receive_ring(rxr);
3756 IXGBE_RX_UNLOCK(rxr);
3760 /*********************************************************************
3762 * Initialize all receive rings.
3764 **********************************************************************/
3766 ixgbe_setup_receive_structures(struct adapter *adapter)
3768 struct rx_ring *rxr = adapter->rx_rings;
3771 for (j = 0; j < adapter->num_queues; j++, rxr++)
3772 if (ixgbe_setup_receive_ring(rxr))
3778 * Free RX buffers allocated so far, we will only handle
3779 * the rings that completed, the failing case will have
3780 * cleaned up for itself. 'j' failed, so its the terminus.
3782 for (int i = 0; i < j; ++i) {
3783 rxr = &adapter->rx_rings[i];
3784 ixgbe_free_receive_ring(rxr);
3790 /*********************************************************************
3792 * Setup receive registers and features.
3794 **********************************************************************/
3795 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3798 ixgbe_initialize_receive_units(struct adapter *adapter)
3800 struct rx_ring *rxr = adapter->rx_rings;
3801 struct ixgbe_hw *hw = &adapter->hw;
3802 struct ifnet *ifp = adapter->ifp;
3803 u32 bufsz, rxctrl, fctrl, srrctl, rxcsum;
3804 u32 reta, mrqc = 0, hlreg, random[10];
3808 * Make sure receives are disabled while
3809 * setting up the descriptor ring
3811 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3812 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
3813 rxctrl & ~IXGBE_RXCTRL_RXEN);
3815 /* Enable broadcasts */
3816 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3817 fctrl |= IXGBE_FCTRL_BAM;
3818 fctrl |= IXGBE_FCTRL_DPF;
3819 fctrl |= IXGBE_FCTRL_PMCF;
3820 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3822 /* Set for Jumbo Frames? */
3823 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3824 if (ifp->if_mtu > ETHERMTU) {
3825 hlreg |= IXGBE_HLREG0_JUMBOEN;
3826 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3828 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3829 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3831 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3833 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3834 u64 rdba = rxr->rxdma.dma_paddr;
3836 /* Setup the Base and Length of the Rx Descriptor Ring */
3837 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
3838 (rdba & 0x00000000ffffffffULL));
3839 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
3840 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
3841 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3843 /* Set up the SRRCTL register */
3844 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
3845 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3846 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3848 if (rxr->hdr_split) {
3849 /* Use a standard mbuf for the header */
3850 srrctl |= ((IXGBE_RX_HDR <<
3851 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3852 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3853 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3855 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3856 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
3858 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3859 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
3860 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3863 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3864 /* PSRTYPE must be initialized in 82599 */
3865 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3866 IXGBE_PSRTYPE_UDPHDR |
3867 IXGBE_PSRTYPE_IPV4HDR |
3868 IXGBE_PSRTYPE_IPV6HDR;
3869 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3872 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3875 if (adapter->num_queues > 1) {
3879 /* set up random bits */
3880 arc4rand(&random, sizeof(random), 0);
3882 /* Set up the redirection table */
3883 for (i = 0, j = 0; i < 128; i++, j++) {
3884 if (j == adapter->num_queues) j = 0;
3885 reta = (reta << 8) | (j * 0x11);
3887 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3890 /* Now fill our hash function seeds */
3891 for (int i = 0; i < 10; i++)
3892 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
3894 /* Perform hash on these packet types */
3895 mrqc = IXGBE_MRQC_RSSEN
3896 | IXGBE_MRQC_RSS_FIELD_IPV4
3897 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3898 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3899 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3900 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3901 | IXGBE_MRQC_RSS_FIELD_IPV6
3902 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3903 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3904 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3905 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3907 /* RSS and RX IPP Checksum are mutually exclusive */
3908 rxcsum |= IXGBE_RXCSUM_PCSD;
3911 if (ifp->if_capenable & IFCAP_RXCSUM)
3912 rxcsum |= IXGBE_RXCSUM_PCSD;
3914 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3915 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3917 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3922 /*********************************************************************
3924 * Free all receive rings.
3926 **********************************************************************/
3928 ixgbe_free_receive_structures(struct adapter *adapter)
3930 struct rx_ring *rxr = adapter->rx_rings;
3932 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3933 struct lro_ctrl *lro = &rxr->lro;
3934 ixgbe_free_receive_buffers(rxr);
3935 /* Free LRO memory */
3937 /* Free the ring memory as well */
3938 ixgbe_dma_free(adapter, &rxr->rxdma);
3941 free(adapter->rx_rings, M_DEVBUF);
3945 /*********************************************************************
3947 * Free receive ring data structures
3949 **********************************************************************/
3951 ixgbe_free_receive_buffers(struct rx_ring *rxr)
3953 struct adapter *adapter = rxr->adapter;
3954 struct ixgbe_rx_buf *rxbuf;
3956 INIT_DEBUGOUT("free_receive_structures: begin");
3958 /* Cleanup any existing buffers */
3959 if (rxr->rx_buffers != NULL) {
3960 for (int i = 0; i < adapter->num_rx_desc; i++) {
3961 rxbuf = &rxr->rx_buffers[i];
3962 if (rxbuf->m_head != NULL) {
3963 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3964 BUS_DMASYNC_POSTREAD);
3965 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3966 rxbuf->m_head->m_flags |= M_PKTHDR;
3967 m_freem(rxbuf->m_head);
3969 if (rxbuf->m_pack != NULL) {
3970 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3971 BUS_DMASYNC_POSTREAD);
3972 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3973 rxbuf->m_pack->m_flags |= M_PKTHDR;
3974 m_freem(rxbuf->m_pack);
3976 rxbuf->m_head = NULL;
3977 rxbuf->m_pack = NULL;
3978 if (rxbuf->hmap != NULL) {
3979 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3982 if (rxbuf->pmap != NULL) {
3983 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3987 if (rxr->rx_buffers != NULL) {
3988 free(rxr->rx_buffers, M_DEVBUF);
3989 rxr->rx_buffers = NULL;
3993 if (rxr->htag != NULL) {
3994 bus_dma_tag_destroy(rxr->htag);
3997 if (rxr->ptag != NULL) {
3998 bus_dma_tag_destroy(rxr->ptag);
4005 static __inline void
4006 ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
4010 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
4011 * should be computed by hardware. Also it should not have VLAN tag in
4014 if (rxr->lro_enabled &&
4015 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
4016 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
4017 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
4018 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
4019 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
4020 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
4022 * Send to the stack if:
4023 ** - LRO not enabled, or
4024 ** - no LRO resources, or
4025 ** - lro enqueue fails
4027 if (rxr->lro.lro_cnt != 0)
4028 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
4031 (*ifp->if_input)(ifp, m);
4034 static __inline void
4035 ixgbe_rx_discard(struct rx_ring *rxr, int i)
4037 struct ixgbe_rx_buf *rbuf;
4039 rbuf = &rxr->rx_buffers[i];
4041 if (rbuf->fmp != NULL) {/* Partial chain ? */
4042 rbuf->fmp->m_flags |= M_PKTHDR;
4048 ** With advanced descriptors the writeback
4049 ** clobbers the buffer addrs, so its easier
4050 ** to just free the existing mbufs and take
4051 ** the normal refresh path to get new buffers
4055 m_free(rbuf->m_head);
4056 rbuf->m_head = NULL;
4060 m_free(rbuf->m_pack);
4061 rbuf->m_pack = NULL;
4068 /*********************************************************************
4070 * This routine executes in interrupt context. It replenishes
4071 * the mbufs in the descriptor and sends data which has been
4072 * dma'ed into host memory to upper layer.
4074 * We loop at most count times if count is > 0, or until done if
4077 * Return TRUE for more work, FALSE for all clean.
4078 *********************************************************************/
4080 ixgbe_rxeof(struct ix_queue *que, int count)
4082 struct adapter *adapter = que->adapter;
4083 struct rx_ring *rxr = que->rxr;
4084 struct ifnet *ifp = adapter->ifp;
4085 struct lro_ctrl *lro = &rxr->lro;
4086 struct lro_entry *queued;
4087 int i, nextp, processed = 0;
4089 union ixgbe_adv_rx_desc *cur;
4090 struct ixgbe_rx_buf *rbuf, *nbuf;
4094 for (i = rxr->next_to_check; count != 0;) {
4095 struct mbuf *sendmp, *mh, *mp;
4097 u16 hlen, plen, hdr, vtag;
4100 /* Sync the ring. */
4101 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4102 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4104 cur = &rxr->rx_base[i];
4105 staterr = le32toh(cur->wb.upper.status_error);
4107 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
4109 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4116 cur->wb.upper.status_error = 0;
4117 rbuf = &rxr->rx_buffers[i];
4121 plen = le16toh(cur->wb.upper.length);
4122 ptype = le32toh(cur->wb.lower.lo_dword.data) &
4123 IXGBE_RXDADV_PKTTYPE_MASK;
4124 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
4125 vtag = le16toh(cur->wb.upper.vlan);
4126 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
4128 /* Make sure bad packets are discarded */
4129 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
4132 rxr->rx_discarded++;
4134 rxr->discard = FALSE;
4136 rxr->discard = TRUE;
4137 ixgbe_rx_discard(rxr, i);
4142 ** On 82599 which supports a hardware
4143 ** LRO (called HW RSC), packets need
4144 ** not be fragmented across sequential
4145 ** descriptors, rather the next descriptor
4146 ** is indicated in bits of the descriptor.
4147 ** This also means that we might proceses
4148 ** more than one packet at a time, something
4149 ** that has never been true before, it
4150 ** required eliminating global chain pointers
4151 ** in favor of what we are doing here. -jfv
4155 ** Figure out the next descriptor
4158 if (rxr->hw_rsc == TRUE) {
4159 rsc = ixgbe_rsc_count(cur);
4160 rxr->rsc_num += (rsc - 1);
4162 if (rsc) { /* Get hardware index */
4164 IXGBE_RXDADV_NEXTP_MASK) >>
4165 IXGBE_RXDADV_NEXTP_SHIFT);
4166 } else { /* Just sequential */
4168 if (nextp == adapter->num_rx_desc)
4171 nbuf = &rxr->rx_buffers[nextp];
4175 ** The header mbuf is ONLY used when header
4176 ** split is enabled, otherwise we get normal
4177 ** behavior, ie, both header and payload
4178 ** are DMA'd into the payload buffer.
4180 ** Rather than using the fmp/lmp global pointers
4181 ** we now keep the head of a packet chain in the
4182 ** buffer struct and pass this along from one
4183 ** descriptor to the next, until we get EOP.
4185 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
4186 /* This must be an initial descriptor */
4187 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
4188 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
4189 if (hlen > IXGBE_RX_HDR)
4190 hlen = IXGBE_RX_HDR;
4192 mh->m_flags |= M_PKTHDR;
4194 mh->m_pkthdr.len = mh->m_len;
4195 /* Null buf pointer so it is refreshed */
4196 rbuf->m_head = NULL;
4198 ** Check the payload length, this
4199 ** could be zero if its a small
4205 mp->m_flags &= ~M_PKTHDR;
4207 mh->m_pkthdr.len += mp->m_len;
4208 /* Null buf pointer so it is refreshed */
4209 rbuf->m_pack = NULL;
4210 rxr->rx_split_packets++;
4213 ** Now create the forward
4214 ** chain so when complete
4218 /* stash the chain head */
4220 /* Make forward chain */
4222 mp->m_next = nbuf->m_pack;
4224 mh->m_next = nbuf->m_pack;
4226 /* Singlet, prepare to send */
4228 if ((adapter->num_vlans) &&
4229 (staterr & IXGBE_RXD_STAT_VP)) {
4230 sendmp->m_pkthdr.ether_vtag = vtag;
4231 sendmp->m_flags |= M_VLANTAG;
4236 ** Either no header split, or a
4237 ** secondary piece of a fragmented
4242 ** See if there is a stored head
4243 ** that determines what we are
4246 rbuf->m_pack = rbuf->fmp = NULL;
4248 if (sendmp != NULL) /* secondary frag */
4249 sendmp->m_pkthdr.len += mp->m_len;
4251 /* first desc of a non-ps chain */
4253 sendmp->m_flags |= M_PKTHDR;
4254 sendmp->m_pkthdr.len = mp->m_len;
4255 if (staterr & IXGBE_RXD_STAT_VP) {
4256 sendmp->m_pkthdr.ether_vtag = vtag;
4257 sendmp->m_flags |= M_VLANTAG;
4260 /* Pass the head pointer on */
4264 mp->m_next = nbuf->m_pack;
4268 /* Sending this frame? */
4270 sendmp->m_pkthdr.rcvif = ifp;
4273 /* capture data for AIM */
4274 rxr->bytes += sendmp->m_pkthdr.len;
4275 rxr->rx_bytes += sendmp->m_pkthdr.len;
4276 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
4277 ixgbe_rx_checksum(staterr, sendmp, ptype);
4278 #if __FreeBSD_version >= 800000
4279 sendmp->m_pkthdr.flowid = que->msix;
4280 sendmp->m_flags |= M_FLOWID;
4284 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4285 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4287 /* Advance our pointers to the next descriptor. */
4288 if (++i == adapter->num_rx_desc)
4291 /* Now send to the stack or do LRO */
4293 ixgbe_rx_input(rxr, ifp, sendmp, ptype);
4295 /* Every 8 descriptors we go to refresh mbufs */
4296 if (processed == 8) {
4297 ixgbe_refresh_mbufs(rxr, i);
4302 /* Refresh any remaining buf structs */
4303 if (processed != 0) {
4304 ixgbe_refresh_mbufs(rxr, i);
4308 rxr->next_to_check = i;
4311 * Flush any outstanding LRO work
4313 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
4314 SLIST_REMOVE_HEAD(&lro->lro_active, next);
4315 tcp_lro_flush(lro, queued);
4318 IXGBE_RX_UNLOCK(rxr);
4321 ** We still have cleaning to do?
4322 ** Schedule another interrupt if so.
4324 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
4325 ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
4333 /*********************************************************************
4335 * Verify that the hardware indicated that the checksum is valid.
4336 * Inform the stack about the status of checksum so that stack
4337 * doesn't spend time verifying the checksum.
4339 *********************************************************************/
4341 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
4343 u16 status = (u16) staterr;
4344 u8 errors = (u8) (staterr >> 24);
4347 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
4348 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
4351 if (status & IXGBE_RXD_STAT_IPCS) {
4352 if (!(errors & IXGBE_RXD_ERR_IPE)) {
4353 /* IP Checksum Good */
4354 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4355 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4358 mp->m_pkthdr.csum_flags = 0;
4360 if (status & IXGBE_RXD_STAT_L4CS) {
4361 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4362 #if __FreeBSD_version >= 800000
4364 type = CSUM_SCTP_VALID;
4366 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
4367 mp->m_pkthdr.csum_flags |= type;
4369 mp->m_pkthdr.csum_data = htons(0xffff);
4377 ** This routine is run via an vlan config EVENT,
4378 ** it enables us to use the HW Filter table since
4379 ** we can get the vlan id. This just creates the
4380 ** entry in the soft version of the VFTA, init will
4381 ** repopulate the real table.
4384 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4386 struct adapter *adapter = ifp->if_softc;
4389 if (ifp->if_softc != arg) /* Not our event */
4392 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4395 IXGBE_CORE_LOCK(adapter);
4396 index = (vtag >> 5) & 0x7F;
4398 adapter->shadow_vfta[index] |= (1 << bit);
4399 ++adapter->num_vlans;
4400 ixgbe_init_locked(adapter);
4401 IXGBE_CORE_UNLOCK(adapter);
4405 ** This routine is run via an vlan
4406 ** unconfig EVENT, remove our entry
4407 ** in the soft vfta.
4410 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4412 struct adapter *adapter = ifp->if_softc;
4415 if (ifp->if_softc != arg)
4418 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4421 IXGBE_CORE_LOCK(adapter);
4422 index = (vtag >> 5) & 0x7F;
4424 adapter->shadow_vfta[index] &= ~(1 << bit);
4425 --adapter->num_vlans;
4426 /* Re-init to load the changes */
4427 ixgbe_init_locked(adapter);
4428 IXGBE_CORE_UNLOCK(adapter);
4432 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
4434 struct ifnet *ifp = adapter->ifp;
4435 struct ixgbe_hw *hw = &adapter->hw;
4440 ** We get here thru init_locked, meaning
4441 ** a soft reset, this has already cleared
4442 ** the VFTA and other state, so if there
4443 ** have been no vlan's registered do nothing.
4445 if (adapter->num_vlans == 0)
4449 ** A soft reset zero's out the VFTA, so
4450 ** we need to repopulate it now.
4452 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
4453 if (adapter->shadow_vfta[i] != 0)
4454 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
4455 adapter->shadow_vfta[i]);
4457 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4458 /* Enable the Filter Table if enabled */
4459 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
4460 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
4461 ctrl |= IXGBE_VLNCTRL_VFE;
4463 if (hw->mac.type == ixgbe_mac_82598EB)
4464 ctrl |= IXGBE_VLNCTRL_VME;
4465 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
4467 /* On 82599 the VLAN enable is per/queue in RXDCTL */
4468 if (hw->mac.type == ixgbe_mac_82599EB)
4469 for (int i = 0; i < adapter->num_queues; i++) {
4470 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
4471 ctrl |= IXGBE_RXDCTL_VME;
4472 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
4477 ixgbe_enable_intr(struct adapter *adapter)
4479 struct ixgbe_hw *hw = &adapter->hw;
4480 struct ix_queue *que = adapter->queues;
4481 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4484 /* Enable Fan Failure detection */
4485 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4486 mask |= IXGBE_EIMS_GPI_SDP1;
4488 /* 82599 specific interrupts */
4489 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4490 mask |= IXGBE_EIMS_ECC;
4491 mask |= IXGBE_EIMS_GPI_SDP1;
4492 mask |= IXGBE_EIMS_GPI_SDP2;
4494 mask |= IXGBE_EIMS_FLOW_DIR;
4498 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4500 /* With RSS we use auto clear */
4501 if (adapter->msix_mem) {
4502 mask = IXGBE_EIMS_ENABLE_MASK;
4503 /* Don't autoclear Link */
4504 mask &= ~IXGBE_EIMS_OTHER;
4505 mask &= ~IXGBE_EIMS_LSC;
4506 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4510 ** Now enable all queues, this is done separately to
4511 ** allow for handling the extended (beyond 32) MSIX
4512 ** vectors that can be used by 82599
4514 for (int i = 0; i < adapter->num_queues; i++, que++)
4515 ixgbe_enable_queue(adapter, que->msix);
4517 IXGBE_WRITE_FLUSH(hw);
4523 ixgbe_disable_intr(struct adapter *adapter)
4525 if (adapter->msix_mem)
4526 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4527 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
4528 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
4530 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
4531 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
4532 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
4534 IXGBE_WRITE_FLUSH(&adapter->hw);
4539 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
4543 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
4550 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
4552 pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
4559 ** Setup the correct IVAR register for a particular MSIX interrupt
4560 ** (yes this is all very magic and confusing :)
4561 ** - entry is the register array entry
4562 ** - vector is the MSIX vector for this queue
4563 ** - type is RX/TX/MISC
4566 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4568 struct ixgbe_hw *hw = &adapter->hw;
4571 vector |= IXGBE_IVAR_ALLOC_VAL;
4573 switch (hw->mac.type) {
4575 case ixgbe_mac_82598EB:
4577 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4579 entry += (type * 64);
4580 index = (entry >> 2) & 0x1F;
4581 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4582 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4583 ivar |= (vector << (8 * (entry & 0x3)));
4584 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4587 case ixgbe_mac_82599EB:
4588 if (type == -1) { /* MISC IVAR */
4589 index = (entry & 1) * 8;
4590 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4591 ivar &= ~(0xFF << index);
4592 ivar |= (vector << index);
4593 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4594 } else { /* RX/TX IVARS */
4595 index = (16 * (entry & 1)) + (8 * type);
4596 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4597 ivar &= ~(0xFF << index);
4598 ivar |= (vector << index);
4599 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4608 ixgbe_configure_ivars(struct adapter *adapter)
4610 struct ix_queue *que = adapter->queues;
4613 if (ixgbe_max_interrupt_rate > 0)
4614 newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4618 for (int i = 0; i < adapter->num_queues; i++, que++) {
4619 /* First the RX queue entry */
4620 ixgbe_set_ivar(adapter, i, que->msix, 0);
4621 /* ... and the TX */
4622 ixgbe_set_ivar(adapter, i, que->msix, 1);
4623 /* Set an Initial EITR value */
4624 IXGBE_WRITE_REG(&adapter->hw,
4625 IXGBE_EITR(que->msix), newitr);
4628 /* For the Link interrupt */
4629 ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
4633 ** ixgbe_sfp_probe - called in the local timer to
4634 ** determine if a port had optics inserted.
4636 static bool ixgbe_sfp_probe(struct adapter *adapter)
4638 struct ixgbe_hw *hw = &adapter->hw;
4639 device_t dev = adapter->dev;
4640 bool result = FALSE;
4642 if ((hw->phy.type == ixgbe_phy_nl) &&
4643 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4644 s32 ret = hw->phy.ops.identify_sfp(hw);
4647 ret = hw->phy.ops.reset(hw);
4648 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4649 device_printf(dev,"Unsupported SFP+ module detected!");
4650 printf(" Reload driver with supported module.\n");
4651 adapter->sfp_probe = FALSE;
4654 device_printf(dev,"SFP+ module detected!\n");
4655 /* We now have supported optics */
4656 adapter->sfp_probe = FALSE;
4664 ** Tasklet handler for MSIX Link interrupts
4665 ** - do outside interrupt since it might sleep
4668 ixgbe_handle_link(void *context, int pending)
4670 struct adapter *adapter = context;
4672 ixgbe_check_link(&adapter->hw,
4673 &adapter->link_speed, &adapter->link_up, 0);
4674 ixgbe_update_link_status(adapter);
4678 ** Tasklet for handling SFP module interrupts
4681 ixgbe_handle_mod(void *context, int pending)
4683 struct adapter *adapter = context;
4684 struct ixgbe_hw *hw = &adapter->hw;
4685 device_t dev = adapter->dev;
4688 err = hw->phy.ops.identify_sfp(hw);
4689 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4691 "Unsupported SFP+ module type was detected.\n");
4694 err = hw->mac.ops.setup_sfp(hw);
4695 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4697 "Setup failure - unsupported SFP+ module type.\n");
4700 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
4706 ** Tasklet for handling MSF (multispeed fiber) interrupts
4709 ixgbe_handle_msf(void *context, int pending)
4711 struct adapter *adapter = context;
4712 struct ixgbe_hw *hw = &adapter->hw;
4716 autoneg = hw->phy.autoneg_advertised;
4717 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4718 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4719 if (hw->mac.ops.setup_link)
4720 hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
4722 ixgbe_check_link(&adapter->hw, &speed, &adapter->link_up, 0);
4723 ixgbe_update_link_status(adapter);
4730 ** Tasklet for reinitializing the Flow Director filter table
4733 ixgbe_reinit_fdir(void *context, int pending)
4735 struct adapter *adapter = context;
4736 struct ifnet *ifp = adapter->ifp;
4738 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
4740 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
4741 adapter->fdir_reinit = 0;
4742 /* Restart the interface */
4743 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4748 /**********************************************************************
4750 * Update the board statistics counters.
4752 **********************************************************************/
4754 ixgbe_update_stats_counters(struct adapter *adapter)
4756 struct ifnet *ifp = adapter->ifp;
4757 struct ixgbe_hw *hw = &adapter->hw;
4758 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4759 u64 total_missed_rx = 0;
4761 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4762 adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4763 adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4764 adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4766 for (int i = 0; i < 8; i++) {
4768 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4769 /* missed_rx tallies misses for the gprc workaround */
4771 /* global total per queue */
4772 adapter->stats.mpc[i] += mp;
4773 /* Running comprehensive total for stats display */
4774 total_missed_rx += adapter->stats.mpc[i];
4775 if (hw->mac.type == ixgbe_mac_82598EB)
4776 adapter->stats.rnbc[i] +=
4777 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
4778 adapter->stats.pxontxc[i] +=
4779 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
4780 adapter->stats.pxonrxc[i] +=
4781 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
4782 adapter->stats.pxofftxc[i] +=
4783 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
4784 adapter->stats.pxoffrxc[i] +=
4785 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
4786 adapter->stats.pxon2offc[i] +=
4787 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
4789 for (int i = 0; i < 16; i++) {
4790 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4791 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4792 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
4793 adapter->stats.qbrc[i] +=
4794 ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
4795 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
4796 adapter->stats.qbtc[i] +=
4797 ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
4798 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4800 adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4801 adapter->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4802 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4804 /* Hardware workaround, gprc counts missed packets */
4805 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4806 adapter->stats.gprc -= missed_rx;
4808 if (hw->mac.type == ixgbe_mac_82599EB) {
4809 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4810 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4811 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4812 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4813 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4814 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4815 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4816 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4818 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4819 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4820 /* 82598 only has a counter in the high register */
4821 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4822 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4823 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4827 * Workaround: mprc hardware is incorrectly counting
4828 * broadcasts, so for now we subtract those.
4830 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4831 adapter->stats.bprc += bprc;
4832 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4833 adapter->stats.mprc -= bprc;
4835 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4836 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4837 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4838 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4839 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4840 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4842 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4843 adapter->stats.lxontxc += lxon;
4844 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4845 adapter->stats.lxofftxc += lxoff;
4846 total = lxon + lxoff;
4848 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4849 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4850 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4851 adapter->stats.gptc -= total;
4852 adapter->stats.mptc -= total;
4853 adapter->stats.ptc64 -= total;
4854 adapter->stats.gotc -= total * ETHER_MIN_LEN;
4856 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4857 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4858 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4859 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4860 adapter->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4861 adapter->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4862 adapter->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4863 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4864 adapter->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4865 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4866 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4867 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4868 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4869 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4870 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4871 adapter->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4872 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4873 adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4874 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4875 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4876 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4877 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4878 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4881 /* Fill out the OS statistics structure */
4882 ifp->if_ipackets = adapter->stats.gprc;
4883 ifp->if_opackets = adapter->stats.gptc;
4884 ifp->if_ibytes = adapter->stats.gorc;
4885 ifp->if_obytes = adapter->stats.gotc;
4886 ifp->if_imcasts = adapter->stats.mprc;
4887 ifp->if_collisions = 0;
4890 ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
4891 adapter->stats.rlec;
4894 /** ixgbe_sysctl_tdh_handler - Handler function
4895 * Retrieves the TDH value from the hardware
4898 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4902 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4905 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4906 error = sysctl_handle_int(oidp, &val, 0, req);
4907 if (error || !req->newptr)
4912 /** ixgbe_sysctl_tdt_handler - Handler function
4913 * Retrieves the TDT value from the hardware
4916 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4920 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4923 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4924 error = sysctl_handle_int(oidp, &val, 0, req);
4925 if (error || !req->newptr)
4930 /** ixgbe_sysctl_rdh_handler - Handler function
4931 * Retrieves the RDH value from the hardware
4934 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4938 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4941 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4942 error = sysctl_handle_int(oidp, &val, 0, req);
4943 if (error || !req->newptr)
4948 /** ixgbe_sysctl_rdt_handler - Handler function
4949 * Retrieves the RDT value from the hardware
4952 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4956 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4959 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4960 error = sysctl_handle_int(oidp, &val, 0, req);
4961 if (error || !req->newptr)
4967 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4970 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4971 unsigned int reg, usec, rate;
4973 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4974 usec = ((reg & 0x0FF8) >> 3);
4976 rate = 1000000 / usec;
4979 error = sysctl_handle_int(oidp, &rate, 0, req);
4980 if (error || !req->newptr)
4986 * Add sysctl variables, one per statistic, to the system.
4989 ixgbe_add_hw_stats(struct adapter *adapter)
4992 device_t dev = adapter->dev;
4994 struct tx_ring *txr = adapter->tx_rings;
4995 struct rx_ring *rxr = adapter->rx_rings;
4997 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4998 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4999 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
5000 struct ixgbe_hw_stats *stats = &adapter->stats;
5002 struct sysctl_oid *stat_node, *queue_node;
5003 struct sysctl_oid_list *stat_list, *queue_list;
5005 #define QUEUE_NAME_LEN 32
5006 char namebuf[QUEUE_NAME_LEN];
5008 /* Driver Statistics */
5009 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
5010 CTLFLAG_RD, &adapter->dropped_pkts,
5011 "Driver dropped packets");
5012 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
5013 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
5014 "m_defrag() failed");
5016 /* These counters are not updated by the software */
5017 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_header_failed",
5018 CTLFLAG_RD, &adapter->mbuf_header_failed,
5020 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_packet_failed",
5021 CTLFLAG_RD, &adapter->mbuf_packet_failed,
5023 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_map_avail",
5024 CTLFLAG_RD, &adapter->no_tx_map_avail,
5027 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_dma_setup",
5028 CTLFLAG_RD, &adapter->no_tx_dma_setup,
5029 "Driver tx dma failure in xmit");
5030 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
5031 CTLFLAG_RD, &adapter->watchdog_events,
5032 "Watchdog timeouts");
5033 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tso_tx",
5034 CTLFLAG_RD, &adapter->tso_tx,
5036 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
5037 CTLFLAG_RD, &adapter->link_irq,
5038 "Link MSIX IRQ Handled");
5040 for (int i = 0; i < adapter->num_queues; i++, txr++) {
5041 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5042 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5043 CTLFLAG_RD, NULL, "Queue Name");
5044 queue_list = SYSCTL_CHILDREN(queue_node);
5046 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
5047 CTLFLAG_RD, &adapter->queues[i], sizeof(&adapter->queues[i]),
5048 ixgbe_sysctl_interrupt_rate_handler, "IU",
5050 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
5051 CTLFLAG_RD, txr, sizeof(txr),
5052 ixgbe_sysctl_tdh_handler, "IU",
5053 "Transmit Descriptor Head");
5054 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
5055 CTLFLAG_RD, txr, sizeof(txr),
5056 ixgbe_sysctl_tdt_handler, "IU",
5057 "Transmit Descriptor Tail");
5058 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
5059 CTLFLAG_RD, &txr->no_desc_avail,
5060 "Queue No Descriptor Available");
5061 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
5062 CTLFLAG_RD, &txr->total_packets,
5063 "Queue Packets Transmitted");
5066 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
5067 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5068 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5069 CTLFLAG_RD, NULL, "Queue Name");
5070 queue_list = SYSCTL_CHILDREN(queue_node);
5072 struct lro_ctrl *lro = &rxr->lro;
5074 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5075 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5076 CTLFLAG_RD, NULL, "Queue Name");
5077 queue_list = SYSCTL_CHILDREN(queue_node);
5079 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
5080 CTLFLAG_RD, rxr, sizeof(rxr),
5081 ixgbe_sysctl_rdh_handler, "IU",
5082 "Receive Descriptor Head");
5083 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
5084 CTLFLAG_RD, rxr, sizeof(rxr),
5085 ixgbe_sysctl_rdt_handler, "IU",
5086 "Receive Descriptor Tail");
5087 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
5088 CTLFLAG_RD, &rxr->rx_packets,
5089 "Queue Packets Received");
5090 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
5091 CTLFLAG_RD, &rxr->rx_bytes,
5092 "Queue Bytes Received");
5093 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_queued",
5094 CTLFLAG_RD, &lro->lro_queued, 0,
5096 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_flushed",
5097 CTLFLAG_RD, &lro->lro_flushed, 0,
5101 /* MAC stats get the own sub node */
5103 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
5104 CTLFLAG_RD, NULL, "MAC Statistics");
5105 stat_list = SYSCTL_CHILDREN(stat_node);
5107 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs",
5108 CTLFLAG_RD, &stats->crcerrs,
5110 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "ill_errs",
5111 CTLFLAG_RD, &stats->illerrc,
5112 "Illegal Byte Errors");
5113 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "byte_errs",
5114 CTLFLAG_RD, &stats->errbc,
5116 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "short_discards",
5117 CTLFLAG_RD, &stats->mspdc,
5118 "MAC Short Packets Discarded");
5119 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "local_faults",
5120 CTLFLAG_RD, &stats->mlfc,
5121 "MAC Local Faults");
5122 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "remote_faults",
5123 CTLFLAG_RD, &stats->mrfc,
5124 "MAC Remote Faults");
5125 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
5126 CTLFLAG_RD, &stats->rlec,
5127 "Receive Length Errors");
5128 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "link_xon_txd",
5129 CTLFLAG_RD, &stats->lxontxc,
5130 "Link XON Transmitted");
5131 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "link_xon_rcvd",
5132 CTLFLAG_RD, &stats->lxontxc,
5133 "Link XON Received");
5134 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "link_xoff_txd",
5135 CTLFLAG_RD, &stats->lxofftxc,
5136 "Link XOFF Transmitted");
5137 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "link_xoff_rcvd",
5138 CTLFLAG_RD, &stats->lxofftxc,
5139 "Link XOFF Received");
5141 /* Packet Reception Stats */
5142 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
5143 CTLFLAG_RD, &stats->tor,
5144 "Total Octets Received");
5145 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
5146 CTLFLAG_RD, &stats->gorc,
5147 "Good Octets Received");
5148 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
5149 CTLFLAG_RD, &stats->tpr,
5150 "Total Packets Received");
5151 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
5152 CTLFLAG_RD, &stats->gprc,
5153 "Good Packets Received");
5154 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
5155 CTLFLAG_RD, &stats->mprc,
5156 "Multicast Packets Received");
5157 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
5158 CTLFLAG_RD, &stats->bprc,
5159 "Broadcast Packets Received");
5160 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
5161 CTLFLAG_RD, &stats->prc64,
5162 "64 byte frames received ");
5163 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
5164 CTLFLAG_RD, &stats->prc127,
5165 "65-127 byte frames received");
5166 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
5167 CTLFLAG_RD, &stats->prc255,
5168 "128-255 byte frames received");
5169 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
5170 CTLFLAG_RD, &stats->prc511,
5171 "256-511 byte frames received");
5172 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
5173 CTLFLAG_RD, &stats->prc1023,
5174 "512-1023 byte frames received");
5175 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
5176 CTLFLAG_RD, &stats->prc1522,
5177 "1023-1522 byte frames received");
5178 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
5179 CTLFLAG_RD, &stats->ruc,
5180 "Receive Undersized");
5181 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
5182 CTLFLAG_RD, &stats->rfc,
5183 "Fragmented Packets Received ");
5184 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
5185 CTLFLAG_RD, &stats->roc,
5186 "Oversized Packets Received");
5187 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
5188 CTLFLAG_RD, &stats->rjc,
5190 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
5191 CTLFLAG_RD, &stats->mngprc,
5192 "Management Packets Received");
5193 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
5194 CTLFLAG_RD, &stats->mngptc,
5195 "Management Packets Dropped");
5196 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
5197 CTLFLAG_RD, &stats->xec,
5200 /* Packet Transmission Stats */
5201 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
5202 CTLFLAG_RD, &stats->gotc,
5203 "Good Octets Transmitted");
5204 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
5205 CTLFLAG_RD, &stats->tpt,
5206 "Total Packets Transmitted");
5207 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
5208 CTLFLAG_RD, &stats->gptc,
5209 "Good Packets Transmitted");
5210 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
5211 CTLFLAG_RD, &stats->bptc,
5212 "Broadcast Packets Transmitted");
5213 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
5214 CTLFLAG_RD, &stats->mptc,
5215 "Multicast Packets Transmitted");
5216 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
5217 CTLFLAG_RD, &stats->mngptc,
5218 "Management Packets Transmitted");
5219 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
5220 CTLFLAG_RD, &stats->ptc64,
5221 "64 byte frames transmitted ");
5222 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
5223 CTLFLAG_RD, &stats->ptc127,
5224 "65-127 byte frames transmitted");
5225 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
5226 CTLFLAG_RD, &stats->ptc255,
5227 "128-255 byte frames transmitted");
5228 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
5229 CTLFLAG_RD, &stats->ptc511,
5230 "256-511 byte frames transmitted");
5231 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
5232 CTLFLAG_RD, &stats->ptc1023,
5233 "512-1023 byte frames transmitted");
5234 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
5235 CTLFLAG_RD, &stats->ptc1522,
5236 "1024-1522 byte frames transmitted");
5239 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "fc_crc",
5240 CTLFLAG_RD, &stats->fccrc,
5242 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "fc_last",
5243 CTLFLAG_RD, &stats->fclast,
5245 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "fc_drpd",
5246 CTLFLAG_RD, &stats->fcoerpdc,
5247 "FCoE Packets Dropped");
5248 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "fc_pkts_rcvd",
5249 CTLFLAG_RD, &stats->fcoeprc,
5250 "FCoE Packets Received");
5251 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "fc_pkts_txd",
5252 CTLFLAG_RD, &stats->fcoeptc,
5253 "FCoE Packets Transmitted");
5254 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "fc_dword_rcvd",
5255 CTLFLAG_RD, &stats->fcoedwrc,
5256 "FCoE DWords Received");
5257 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "fc_dword_txd",
5258 CTLFLAG_RD, &stats->fcoedwtc,
5259 "FCoE DWords Transmitted");
5263 ** Set flow control using sysctl:
5264 ** Flow control values:
5271 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
5274 struct adapter *adapter;
5276 error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
5281 adapter = (struct adapter *) arg1;
5282 switch (ixgbe_flow_control) {
5283 case ixgbe_fc_rx_pause:
5284 case ixgbe_fc_tx_pause:
5286 adapter->hw.fc.requested_mode = ixgbe_flow_control;
5290 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5293 ixgbe_fc_enable(&adapter->hw, 0);
5298 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
5299 const char *description, int *limit, int value)
5302 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5303 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5304 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5308 ** Control link advertise speed:
5310 ** 1 - advertise only 1G
5313 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
5316 struct adapter *adapter;
5317 struct ixgbe_hw *hw;
5318 ixgbe_link_speed speed, last;
5320 adapter = (struct adapter *) arg1;
5322 last = hw->phy.autoneg_advertised;
5324 error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
5326 if ((error) || (adapter->advertise == -1))
5329 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5330 (hw->phy.multispeed_fiber)))
5333 if (adapter->advertise == 1)
5334 speed = IXGBE_LINK_SPEED_1GB_FULL;
5336 speed = IXGBE_LINK_SPEED_1GB_FULL |
5337 IXGBE_LINK_SPEED_10GB_FULL;
5339 if (speed == last) /* no change */
5342 hw->mac.autotry_restart = TRUE;
5343 hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);