1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /*********************************************************************
44 * Set this to one to display debug statistics
45 *********************************************************************/
46 int ixgbe_display_debug_stats = 0;
48 /*********************************************************************
50 *********************************************************************/
51 char ixgbe_driver_version[] = "2.8.3";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixgbe_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
95 /* required last entry */
99 /*********************************************************************
100 * Table of branding strings
101 *********************************************************************/
103 static char *ixgbe_strings[] = {
104 "Intel(R) PRO/10GbE PCI-Express Network Driver"
107 /*********************************************************************
108 * Function prototypes
109 *********************************************************************/
110 static int ixgbe_probe(device_t);
111 static int ixgbe_attach(device_t);
112 static int ixgbe_detach(device_t);
113 static int ixgbe_shutdown(device_t);
114 static int ixgbe_suspend(device_t);
115 static int ixgbe_resume(device_t);
116 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
117 static void ixgbe_init(void *);
118 static void ixgbe_init_locked(struct adapter *);
119 static void ixgbe_stop(void *);
120 #if __FreeBSD_version >= 1100036
121 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
123 static void ixgbe_add_media_types(struct adapter *);
124 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
125 static int ixgbe_media_change(struct ifnet *);
126 static void ixgbe_identify_hardware(struct adapter *);
127 static int ixgbe_allocate_pci_resources(struct adapter *);
128 static void ixgbe_get_slot_info(struct ixgbe_hw *);
129 static int ixgbe_allocate_msix(struct adapter *);
130 static int ixgbe_allocate_legacy(struct adapter *);
131 static int ixgbe_setup_msix(struct adapter *);
132 static void ixgbe_free_pci_resources(struct adapter *);
133 static void ixgbe_local_timer(void *);
134 static int ixgbe_setup_interface(device_t, struct adapter *);
135 static void ixgbe_config_dmac(struct adapter *);
136 static void ixgbe_config_delay_values(struct adapter *);
137 static void ixgbe_config_link(struct adapter *);
138 static void ixgbe_check_eee_support(struct adapter *);
139 static void ixgbe_check_wol_support(struct adapter *);
140 static int ixgbe_setup_low_power_mode(struct adapter *);
141 static void ixgbe_rearm_queues(struct adapter *, u64);
143 static void ixgbe_initialize_transmit_units(struct adapter *);
144 static void ixgbe_initialize_receive_units(struct adapter *);
145 static void ixgbe_enable_rx_drop(struct adapter *);
146 static void ixgbe_disable_rx_drop(struct adapter *);
148 static void ixgbe_enable_intr(struct adapter *);
149 static void ixgbe_disable_intr(struct adapter *);
150 static void ixgbe_update_stats_counters(struct adapter *);
151 static void ixgbe_set_promisc(struct adapter *);
152 static void ixgbe_set_multi(struct adapter *);
153 static void ixgbe_update_link_status(struct adapter *);
154 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
155 static void ixgbe_configure_ivars(struct adapter *);
156 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
158 static void ixgbe_setup_vlan_hw_support(struct adapter *);
159 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
160 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
162 static void ixgbe_add_device_sysctls(struct adapter *);
163 static void ixgbe_add_hw_stats(struct adapter *);
165 /* Sysctl handlers */
166 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
167 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
168 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
169 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
170 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
171 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
172 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
173 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
174 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
175 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
176 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
177 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
179 /* Support for pluggable optic modules */
180 static bool ixgbe_sfp_probe(struct adapter *);
181 static void ixgbe_setup_optics(struct adapter *);
183 /* Legacy (single vector interrupt handler */
184 static void ixgbe_legacy_irq(void *);
186 /* The MSI/X Interrupt handlers */
187 static void ixgbe_msix_que(void *);
188 static void ixgbe_msix_link(void *);
190 /* Deferred interrupt tasklets */
191 static void ixgbe_handle_que(void *, int);
192 static void ixgbe_handle_link(void *, int);
193 static void ixgbe_handle_msf(void *, int);
194 static void ixgbe_handle_mod(void *, int);
195 static void ixgbe_handle_phy(void *, int);
198 static void ixgbe_reinit_fdir(void *, int);
201 /*********************************************************************
202 * FreeBSD Device Interface Entry Points
203 *********************************************************************/
205 static device_method_t ix_methods[] = {
206 /* Device interface */
207 DEVMETHOD(device_probe, ixgbe_probe),
208 DEVMETHOD(device_attach, ixgbe_attach),
209 DEVMETHOD(device_detach, ixgbe_detach),
210 DEVMETHOD(device_shutdown, ixgbe_shutdown),
211 DEVMETHOD(device_suspend, ixgbe_suspend),
212 DEVMETHOD(device_resume, ixgbe_resume),
216 static driver_t ix_driver = {
217 "ix", ix_methods, sizeof(struct adapter),
220 devclass_t ix_devclass;
221 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
223 MODULE_DEPEND(ix, pci, 1, 1, 1);
224 MODULE_DEPEND(ix, ether, 1, 1, 1);
227 ** TUNEABLE PARAMETERS:
230 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
231 "IXGBE driver parameters");
234 ** AIM: Adaptive Interrupt Moderation
235 ** which means that the interrupt rate
236 ** is varied over time based on the
237 ** traffic for that interrupt vector
239 static int ixgbe_enable_aim = TRUE;
240 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
241 "Enable adaptive interrupt moderation");
243 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
244 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
245 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
247 /* How many packets rxeof tries to clean at a time */
248 static int ixgbe_rx_process_limit = 256;
249 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
250 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
251 &ixgbe_rx_process_limit, 0,
252 "Maximum number of received packets to process at a time,"
253 "-1 means unlimited");
255 /* How many packets txeof tries to clean at a time */
256 static int ixgbe_tx_process_limit = 256;
257 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
258 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
259 &ixgbe_tx_process_limit, 0,
260 "Maximum number of sent packets to process at a time,"
261 "-1 means unlimited");
264 ** Smart speed setting, default to on
265 ** this only works as a compile option
266 ** right now as its during attach, set
267 ** this to 'ixgbe_smart_speed_off' to
270 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
273 * MSIX should be the default for best performance,
274 * but this allows it to be forced off for testing.
276 static int ixgbe_enable_msix = 1;
277 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
278 "Enable MSI-X interrupts");
281 * Number of Queues, can be set to 0,
282 * it then autoconfigures based on the
283 * number of cpus with a max of 8. This
284 * can be overriden manually here.
286 static int ixgbe_num_queues = 0;
287 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
288 "Number of queues to configure up to a maximum of 8; "
289 "0 indicates autoconfigure");
292 ** Number of TX descriptors per ring,
293 ** setting higher than RX as this seems
294 ** the better performing choice.
296 static int ixgbe_txd = PERFORM_TXD;
297 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
298 "Number of transmit descriptors per queue");
300 /* Number of RX descriptors per ring */
301 static int ixgbe_rxd = PERFORM_RXD;
302 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
303 "Number of receive descriptors per queue");
306 ** Defining this on will allow the use
307 ** of unsupported SFP+ modules, note that
308 ** doing so you are on your own :)
310 static int allow_unsupported_sfp = FALSE;
311 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
313 /* Keep running tab on them for sanity check */
314 static int ixgbe_total_ports;
318 ** Flow Director actually 'steals'
319 ** part of the packet buffer as its
320 ** filter pool, this variable controls
322 ** 0 = 64K, 1 = 128K, 2 = 256K
324 static int fdir_pballoc = 1;
329 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
330 * be a reference on how to implement netmap support in a driver.
331 * Additional comments are in ixgbe_netmap.h .
333 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
334 * that extend the standard driver.
336 #include <dev/netmap/ixgbe_netmap.h>
337 #endif /* DEV_NETMAP */
339 /*********************************************************************
340 * Device identification routine
342 * ixgbe_probe determines if the driver should be loaded on
343 * adapter based on PCI vendor/device id of the adapter.
345 * return BUS_PROBE_DEFAULT on success, positive on failure
346 *********************************************************************/
349 ixgbe_probe(device_t dev)
351 ixgbe_vendor_info_t *ent;
353 u16 pci_vendor_id = 0;
354 u16 pci_device_id = 0;
355 u16 pci_subvendor_id = 0;
356 u16 pci_subdevice_id = 0;
357 char adapter_name[256];
359 INIT_DEBUGOUT("ixgbe_probe: begin");
361 pci_vendor_id = pci_get_vendor(dev);
362 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
365 pci_device_id = pci_get_device(dev);
366 pci_subvendor_id = pci_get_subvendor(dev);
367 pci_subdevice_id = pci_get_subdevice(dev);
369 ent = ixgbe_vendor_info_array;
370 while (ent->vendor_id != 0) {
371 if ((pci_vendor_id == ent->vendor_id) &&
372 (pci_device_id == ent->device_id) &&
374 ((pci_subvendor_id == ent->subvendor_id) ||
375 (ent->subvendor_id == 0)) &&
377 ((pci_subdevice_id == ent->subdevice_id) ||
378 (ent->subdevice_id == 0))) {
379 sprintf(adapter_name, "%s, Version - %s",
380 ixgbe_strings[ent->index],
381 ixgbe_driver_version);
382 device_set_desc_copy(dev, adapter_name);
384 return (BUS_PROBE_DEFAULT);
391 /*********************************************************************
392 * Device initialization routine
394 * The attach entry point is called when the driver is being loaded.
395 * This routine identifies the type of hardware, allocates all resources
396 * and initializes the hardware.
398 * return 0 on success, positive on failure
399 *********************************************************************/
402 ixgbe_attach(device_t dev)
404 struct adapter *adapter;
410 INIT_DEBUGOUT("ixgbe_attach: begin");
412 /* Allocate, clear, and link in our adapter structure */
413 adapter = device_get_softc(dev);
414 adapter->dev = adapter->osdep.dev = dev;
418 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
420 /* Set up the timer callout */
421 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
423 /* Determine hardware revision */
424 ixgbe_identify_hardware(adapter);
426 /* Do base PCI setup - map BAR0 */
427 if (ixgbe_allocate_pci_resources(adapter)) {
428 device_printf(dev, "Allocation of PCI resources failed\n");
433 /* Do descriptor calc and sanity checks */
434 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
435 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
436 device_printf(dev, "TXD config issue, using default!\n");
437 adapter->num_tx_desc = DEFAULT_TXD;
439 adapter->num_tx_desc = ixgbe_txd;
442 ** With many RX rings it is easy to exceed the
443 ** system mbuf allocation. Tuning nmbclusters
444 ** can alleviate this.
446 if (nmbclusters > 0) {
448 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
449 if (s > nmbclusters) {
450 device_printf(dev, "RX Descriptors exceed "
451 "system mbuf max, using default instead!\n");
452 ixgbe_rxd = DEFAULT_RXD;
456 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
457 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
458 device_printf(dev, "RXD config issue, using default!\n");
459 adapter->num_rx_desc = DEFAULT_RXD;
461 adapter->num_rx_desc = ixgbe_rxd;
463 /* Allocate our TX/RX Queues */
464 if (ixgbe_allocate_queues(adapter)) {
469 /* Allocate multicast array memory. */
470 adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
471 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
472 if (adapter->mta == NULL) {
473 device_printf(dev, "Can not allocate multicast setup array\n");
478 /* Initialize the shared code */
479 hw->allow_unsupported_sfp = allow_unsupported_sfp;
480 error = ixgbe_init_shared_code(hw);
481 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
483 ** No optics in this port, set up
484 ** so the timer routine will probe
485 ** for later insertion.
487 adapter->sfp_probe = TRUE;
489 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
490 device_printf(dev,"Unsupported SFP+ module detected!\n");
494 device_printf(dev,"Unable to initialize the shared code\n");
499 /* Make sure we have a good EEPROM before we read from it */
500 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
501 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
506 error = ixgbe_init_hw(hw);
508 case IXGBE_ERR_EEPROM_VERSION:
509 device_printf(dev, "This device is a pre-production adapter/"
510 "LOM. Please be aware there may be issues associated "
511 "with your hardware.\n If you are experiencing problems "
512 "please contact your Intel or hardware representative "
513 "who provided you with this hardware.\n");
515 case IXGBE_ERR_SFP_NOT_SUPPORTED:
516 device_printf(dev,"Unsupported SFP+ Module\n");
519 case IXGBE_ERR_SFP_NOT_PRESENT:
520 device_printf(dev,"No SFP+ Module found\n");
526 /* Detect and set physical type */
527 ixgbe_setup_optics(adapter);
529 if ((adapter->msix > 1) && (ixgbe_enable_msix))
530 error = ixgbe_allocate_msix(adapter);
532 error = ixgbe_allocate_legacy(adapter);
536 /* Setup OS specific network interface */
537 if (ixgbe_setup_interface(dev, adapter) != 0)
540 /* Initialize statistics */
541 ixgbe_update_stats_counters(adapter);
543 /* Register for VLAN events */
544 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
545 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
546 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
547 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
549 /* Check PCIE slot type/speed/width */
550 ixgbe_get_slot_info(hw);
552 /* Set an initial default flow control value */
553 adapter->fc = ixgbe_fc_full;
555 /* Check for certain supported features */
556 ixgbe_check_wol_support(adapter);
557 ixgbe_check_eee_support(adapter);
560 ixgbe_add_device_sysctls(adapter);
561 ixgbe_add_hw_stats(adapter);
563 /* let hardware know driver is loaded */
564 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
565 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
566 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
569 ixgbe_netmap_attach(adapter);
570 #endif /* DEV_NETMAP */
571 INIT_DEBUGOUT("ixgbe_attach: end");
575 ixgbe_free_transmit_structures(adapter);
576 ixgbe_free_receive_structures(adapter);
578 if (adapter->ifp != NULL)
579 if_free(adapter->ifp);
580 ixgbe_free_pci_resources(adapter);
581 free(adapter->mta, M_DEVBUF);
585 /*********************************************************************
586 * Device removal routine
588 * The detach entry point is called when the driver is being removed.
589 * This routine stops the adapter and deallocates all the resources
590 * that were allocated for driver operation.
592 * return 0 on success, positive on failure
593 *********************************************************************/
596 ixgbe_detach(device_t dev)
598 struct adapter *adapter = device_get_softc(dev);
599 struct ix_queue *que = adapter->queues;
600 struct tx_ring *txr = adapter->tx_rings;
603 INIT_DEBUGOUT("ixgbe_detach: begin");
605 /* Make sure VLANS are not using driver */
606 if (adapter->ifp->if_vlantrunk != NULL) {
607 device_printf(dev,"Vlan in use, detach first\n");
611 /* Stop the adapter */
612 IXGBE_CORE_LOCK(adapter);
613 ixgbe_setup_low_power_mode(adapter);
614 IXGBE_CORE_UNLOCK(adapter);
616 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
618 #ifndef IXGBE_LEGACY_TX
619 taskqueue_drain(que->tq, &txr->txq_task);
621 taskqueue_drain(que->tq, &que->que_task);
622 taskqueue_free(que->tq);
626 /* Drain the Link queue */
628 taskqueue_drain(adapter->tq, &adapter->link_task);
629 taskqueue_drain(adapter->tq, &adapter->mod_task);
630 taskqueue_drain(adapter->tq, &adapter->msf_task);
631 taskqueue_drain(adapter->tq, &adapter->phy_task);
633 taskqueue_drain(adapter->tq, &adapter->fdir_task);
635 taskqueue_free(adapter->tq);
638 /* let hardware know driver is unloading */
639 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
640 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
641 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
643 /* Unregister VLAN events */
644 if (adapter->vlan_attach != NULL)
645 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
646 if (adapter->vlan_detach != NULL)
647 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
649 ether_ifdetach(adapter->ifp);
650 callout_drain(&adapter->timer);
652 netmap_detach(adapter->ifp);
653 #endif /* DEV_NETMAP */
654 ixgbe_free_pci_resources(adapter);
655 bus_generic_detach(dev);
656 if_free(adapter->ifp);
658 ixgbe_free_transmit_structures(adapter);
659 ixgbe_free_receive_structures(adapter);
660 free(adapter->mta, M_DEVBUF);
662 IXGBE_CORE_LOCK_DESTROY(adapter);
666 /*********************************************************************
668 * Shutdown entry point
670 **********************************************************************/
673 ixgbe_shutdown(device_t dev)
675 struct adapter *adapter = device_get_softc(dev);
678 INIT_DEBUGOUT("ixgbe_shutdown: begin");
680 IXGBE_CORE_LOCK(adapter);
681 error = ixgbe_setup_low_power_mode(adapter);
682 IXGBE_CORE_UNLOCK(adapter);
688 * Methods for going from:
689 * D0 -> D3: ixgbe_suspend
690 * D3 -> D0: ixgbe_resume
693 ixgbe_suspend(device_t dev)
695 struct adapter *adapter = device_get_softc(dev);
698 INIT_DEBUGOUT("ixgbe_suspend: begin");
700 IXGBE_CORE_LOCK(adapter);
702 error = ixgbe_setup_low_power_mode(adapter);
704 /* Save state and power down */
706 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
708 IXGBE_CORE_UNLOCK(adapter);
714 ixgbe_resume(device_t dev)
716 struct adapter *adapter = device_get_softc(dev);
717 struct ifnet *ifp = adapter->ifp;
718 struct ixgbe_hw *hw = &adapter->hw;
721 INIT_DEBUGOUT("ixgbe_resume: begin");
723 IXGBE_CORE_LOCK(adapter);
725 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
726 pci_restore_state(dev);
728 /* Read & clear WUS register */
729 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
731 device_printf(dev, "Woken up by (WUS): %#010x\n",
732 IXGBE_READ_REG(hw, IXGBE_WUS));
733 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
734 /* And clear WUFC until next low-power transition */
735 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
738 * Required after D3->D0 transition;
739 * will re-advertise all previous advertised speeds
741 if (ifp->if_flags & IFF_UP)
742 ixgbe_init_locked(adapter);
744 IXGBE_CORE_UNLOCK(adapter);
746 INIT_DEBUGOUT("ixgbe_resume: end");
751 /*********************************************************************
754 * ixgbe_ioctl is called when the user wants to configure the
757 * return 0 on success, positive on failure
758 **********************************************************************/
761 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
763 struct adapter *adapter = ifp->if_softc;
764 struct ifreq *ifr = (struct ifreq *) data;
765 #if defined(INET) || defined(INET6)
766 struct ifaddr *ifa = (struct ifaddr *)data;
767 bool avoid_reset = FALSE;
775 if (ifa->ifa_addr->sa_family == AF_INET)
779 if (ifa->ifa_addr->sa_family == AF_INET6)
782 #if defined(INET) || defined(INET6)
784 ** Calling init results in link renegotiation,
785 ** so we avoid doing it when possible.
788 ifp->if_flags |= IFF_UP;
789 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
791 if (!(ifp->if_flags & IFF_NOARP))
792 arp_ifinit(ifp, ifa);
794 error = ether_ioctl(ifp, command, data);
798 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
799 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
802 IXGBE_CORE_LOCK(adapter);
803 ifp->if_mtu = ifr->ifr_mtu;
804 adapter->max_frame_size =
805 ifp->if_mtu + IXGBE_MTU_HDR;
806 ixgbe_init_locked(adapter);
807 IXGBE_CORE_UNLOCK(adapter);
811 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
812 IXGBE_CORE_LOCK(adapter);
813 if (ifp->if_flags & IFF_UP) {
814 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
815 if ((ifp->if_flags ^ adapter->if_flags) &
816 (IFF_PROMISC | IFF_ALLMULTI)) {
817 ixgbe_set_promisc(adapter);
820 ixgbe_init_locked(adapter);
822 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
824 adapter->if_flags = ifp->if_flags;
825 IXGBE_CORE_UNLOCK(adapter);
829 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
830 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
831 IXGBE_CORE_LOCK(adapter);
832 ixgbe_disable_intr(adapter);
833 ixgbe_set_multi(adapter);
834 ixgbe_enable_intr(adapter);
835 IXGBE_CORE_UNLOCK(adapter);
840 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
841 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
845 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
846 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
847 if (mask & IFCAP_HWCSUM)
848 ifp->if_capenable ^= IFCAP_HWCSUM;
849 if (mask & IFCAP_TSO4)
850 ifp->if_capenable ^= IFCAP_TSO4;
851 if (mask & IFCAP_TSO6)
852 ifp->if_capenable ^= IFCAP_TSO6;
853 if (mask & IFCAP_LRO)
854 ifp->if_capenable ^= IFCAP_LRO;
855 if (mask & IFCAP_VLAN_HWTAGGING)
856 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
857 if (mask & IFCAP_VLAN_HWFILTER)
858 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
859 if (mask & IFCAP_VLAN_HWTSO)
860 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
861 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
862 IXGBE_CORE_LOCK(adapter);
863 ixgbe_init_locked(adapter);
864 IXGBE_CORE_UNLOCK(adapter);
866 VLAN_CAPABILITIES(ifp);
869 #if __FreeBSD_version >= 1100036
872 struct ixgbe_hw *hw = &adapter->hw;
875 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
876 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
879 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
883 if (i2c.len > sizeof(i2c.data)) {
888 for (i = 0; i < i2c.len; i++)
889 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
890 i2c.dev_addr, &i2c.data[i]);
891 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
896 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
897 error = ether_ioctl(ifp, command, data);
904 /*********************************************************************
907 * This routine is used in two ways. It is used by the stack as
908 * init entry point in network interface structure. It is also used
909 * by the driver as a hw/sw initialization routine to get to a
912 * return 0 on success, positive on failure
913 **********************************************************************/
914 #define IXGBE_MHADD_MFS_SHIFT 16
917 ixgbe_init_locked(struct adapter *adapter)
919 struct ifnet *ifp = adapter->ifp;
920 device_t dev = adapter->dev;
921 struct ixgbe_hw *hw = &adapter->hw;
922 u32 k, txdctl, mhadd, gpie;
925 mtx_assert(&adapter->core_mtx, MA_OWNED);
926 INIT_DEBUGOUT("ixgbe_init_locked: begin");
927 hw->adapter_stopped = FALSE;
928 ixgbe_stop_adapter(hw);
929 callout_stop(&adapter->timer);
931 /* reprogram the RAR[0] in case user changed it. */
932 ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
934 /* Get the latest mac address, User can use a LAA */
935 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
936 IXGBE_ETH_LENGTH_OF_ADDRESS);
937 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
938 hw->addr_ctrl.rar_used_count = 1;
940 /* Set the various hardware offload abilities */
941 ifp->if_hwassist = 0;
942 if (ifp->if_capenable & IFCAP_TSO)
943 ifp->if_hwassist |= CSUM_TSO;
944 if (ifp->if_capenable & IFCAP_TXCSUM) {
945 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
946 #if __FreeBSD_version >= 800000
947 if (hw->mac.type != ixgbe_mac_82598EB)
948 ifp->if_hwassist |= CSUM_SCTP;
952 /* Prepare transmit descriptors and buffers */
953 if (ixgbe_setup_transmit_structures(adapter)) {
954 device_printf(dev, "Could not setup transmit structures\n");
960 ixgbe_initialize_transmit_units(adapter);
962 /* Setup Multicast table */
963 ixgbe_set_multi(adapter);
966 ** Determine the correct mbuf pool
967 ** for doing jumbo frames
969 if (adapter->max_frame_size <= 2048)
970 adapter->rx_mbuf_sz = MCLBYTES;
971 else if (adapter->max_frame_size <= 4096)
972 adapter->rx_mbuf_sz = MJUMPAGESIZE;
973 else if (adapter->max_frame_size <= 9216)
974 adapter->rx_mbuf_sz = MJUM9BYTES;
976 adapter->rx_mbuf_sz = MJUM16BYTES;
978 /* Prepare receive descriptors and buffers */
979 if (ixgbe_setup_receive_structures(adapter)) {
980 device_printf(dev, "Could not setup receive structures\n");
985 /* Configure RX settings */
986 ixgbe_initialize_receive_units(adapter);
988 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
990 /* Enable Fan Failure Interrupt */
991 gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
993 /* Add for Module detection */
994 if (hw->mac.type == ixgbe_mac_82599EB)
995 gpie |= IXGBE_SDP2_GPIEN;
998 * Thermal Failure Detection (X540)
999 * Link Detection (X552)
1001 if (hw->mac.type == ixgbe_mac_X540 ||
1002 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1003 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1004 gpie |= IXGBE_SDP0_GPIEN_X540;
1006 if (adapter->msix > 1) {
1007 /* Enable Enhanced MSIX mode */
1008 gpie |= IXGBE_GPIE_MSIX_MODE;
1009 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1012 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1015 if (ifp->if_mtu > ETHERMTU) {
1016 /* aka IXGBE_MAXFRS on 82599 and newer */
1017 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1018 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1019 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1020 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1023 /* Now enable all the queues */
1024 for (int i = 0; i < adapter->num_queues; i++) {
1025 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1026 txdctl |= IXGBE_TXDCTL_ENABLE;
1027 /* Set WTHRESH to 8, burst writeback */
1028 txdctl |= (8 << 16);
1030 * When the internal queue falls below PTHRESH (32),
1031 * start prefetching as long as there are at least
1032 * HTHRESH (1) buffers ready. The values are taken
1033 * from the Intel linux driver 3.8.21.
1034 * Prefetching enables tx line rate even with 1 queue.
1036 txdctl |= (32 << 0) | (1 << 8);
1037 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1040 for (int i = 0; i < adapter->num_queues; i++) {
1041 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1042 if (hw->mac.type == ixgbe_mac_82598EB) {
1048 rxdctl &= ~0x3FFFFF;
1051 rxdctl |= IXGBE_RXDCTL_ENABLE;
1052 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1053 for (k = 0; k < 10; k++) {
1054 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1055 IXGBE_RXDCTL_ENABLE)
1063 * In netmap mode, we must preserve the buffers made
1064 * available to userspace before the if_init()
1065 * (this is true by default on the TX side, because
1066 * init makes all buffers available to userspace).
1068 * netmap_reset() and the device specific routines
1069 * (e.g. ixgbe_setup_receive_rings()) map these
1070 * buffers at the end of the NIC ring, so here we
1071 * must set the RDT (tail) register to make sure
1072 * they are not overwritten.
1074 * In this driver the NIC ring starts at RDH = 0,
1075 * RDT points to the last slot available for reception (?),
1076 * so RDT = num_rx_desc - 1 means the whole ring is available.
1078 if (ifp->if_capenable & IFCAP_NETMAP) {
1079 struct netmap_adapter *na = NA(adapter->ifp);
1080 struct netmap_kring *kring = &na->rx_rings[i];
1081 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1083 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1085 #endif /* DEV_NETMAP */
1086 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1089 /* Enable Receive engine */
1090 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1091 if (hw->mac.type == ixgbe_mac_82598EB)
1092 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1093 rxctrl |= IXGBE_RXCTRL_RXEN;
1094 ixgbe_enable_rx_dma(hw, rxctrl);
1096 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1098 /* Set up MSI/X routing */
1099 if (ixgbe_enable_msix) {
1100 ixgbe_configure_ivars(adapter);
1101 /* Set up auto-mask */
1102 if (hw->mac.type == ixgbe_mac_82598EB)
1103 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1105 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1106 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1108 } else { /* Simple settings for Legacy/MSI */
1109 ixgbe_set_ivar(adapter, 0, 0, 0);
1110 ixgbe_set_ivar(adapter, 0, 0, 1);
1111 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1115 /* Init Flow director */
1116 if (hw->mac.type != ixgbe_mac_82598EB) {
1117 u32 hdrm = 32 << fdir_pballoc;
1119 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1120 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1125 ** Check on any SFP devices that
1126 ** need to be kick-started
1128 if (hw->phy.type == ixgbe_phy_none) {
1129 int err = hw->phy.ops.identify(hw);
1130 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1132 "Unsupported SFP+ module type was detected.\n");
1137 /* Set moderation on the Link interrupt */
1138 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1140 /* Configure Energy Efficient Ethernet for supported devices */
1141 if (adapter->eee_support)
1142 ixgbe_setup_eee(hw, adapter->eee_enabled);
1144 /* Config/Enable Link */
1145 ixgbe_config_link(adapter);
1147 /* Hardware Packet Buffer & Flow Control setup */
1148 ixgbe_config_delay_values(adapter);
1150 /* Initialize the FC settings */
1153 /* Set up VLAN support and filter */
1154 ixgbe_setup_vlan_hw_support(adapter);
1156 /* Setup DMA Coalescing */
1157 ixgbe_config_dmac(adapter);
1159 /* And now turn on interrupts */
1160 ixgbe_enable_intr(adapter);
1162 /* Now inform the stack we're ready */
1163 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1169 ixgbe_init(void *arg)
1171 struct adapter *adapter = arg;
1173 IXGBE_CORE_LOCK(adapter);
1174 ixgbe_init_locked(adapter);
1175 IXGBE_CORE_UNLOCK(adapter);
1180 ixgbe_config_delay_values(struct adapter *adapter)
1182 struct ixgbe_hw *hw = &adapter->hw;
1183 u32 rxpb, frame, size, tmp;
1185 frame = adapter->max_frame_size;
1187 /* Calculate High Water */
1188 switch (hw->mac.type) {
1189 case ixgbe_mac_X540:
1190 case ixgbe_mac_X550:
1191 case ixgbe_mac_X550EM_x:
1192 tmp = IXGBE_DV_X540(frame, frame);
1195 tmp = IXGBE_DV(frame, frame);
1198 size = IXGBE_BT2KB(tmp);
1199 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1200 hw->fc.high_water[0] = rxpb - size;
1202 /* Now calculate Low Water */
1203 switch (hw->mac.type) {
1204 case ixgbe_mac_X540:
1205 case ixgbe_mac_X550:
1206 case ixgbe_mac_X550EM_x:
1207 tmp = IXGBE_LOW_DV_X540(frame);
1210 tmp = IXGBE_LOW_DV(frame);
1213 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1215 hw->fc.requested_mode = adapter->fc;
1216 hw->fc.pause_time = IXGBE_FC_PAUSE;
1217 hw->fc.send_xon = TRUE;
1222 ** MSIX Interrupt Handlers and Tasklets
1227 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1229 struct ixgbe_hw *hw = &adapter->hw;
1230 u64 queue = (u64)(1 << vector);
1233 if (hw->mac.type == ixgbe_mac_82598EB) {
1234 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1235 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1237 mask = (queue & 0xFFFFFFFF);
1239 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1240 mask = (queue >> 32);
1242 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1247 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1249 struct ixgbe_hw *hw = &adapter->hw;
1250 u64 queue = (u64)(1 << vector);
1253 if (hw->mac.type == ixgbe_mac_82598EB) {
1254 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1255 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1257 mask = (queue & 0xFFFFFFFF);
1259 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1260 mask = (queue >> 32);
1262 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1267 ixgbe_handle_que(void *context, int pending)
1269 struct ix_queue *que = context;
1270 struct adapter *adapter = que->adapter;
1271 struct tx_ring *txr = que->txr;
1272 struct ifnet *ifp = adapter->ifp;
1275 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1276 more = ixgbe_rxeof(que);
1279 #ifndef IXGBE_LEGACY_TX
1280 if (!drbr_empty(ifp, txr->br))
1281 ixgbe_mq_start_locked(ifp, txr);
1283 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1284 ixgbe_start_locked(txr, ifp);
1286 IXGBE_TX_UNLOCK(txr);
1289 /* Reenable this interrupt */
1290 if (que->res != NULL)
1291 ixgbe_enable_queue(adapter, que->msix);
1293 ixgbe_enable_intr(adapter);
1298 /*********************************************************************
1300 * Legacy Interrupt Service routine
1302 **********************************************************************/
1305 ixgbe_legacy_irq(void *arg)
1307 struct ix_queue *que = arg;
1308 struct adapter *adapter = que->adapter;
1309 struct ixgbe_hw *hw = &adapter->hw;
1310 struct ifnet *ifp = adapter->ifp;
1311 struct tx_ring *txr = adapter->tx_rings;
1316 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1319 if (reg_eicr == 0) {
1320 ixgbe_enable_intr(adapter);
1324 more = ixgbe_rxeof(que);
1328 #ifdef IXGBE_LEGACY_TX
1329 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1330 ixgbe_start_locked(txr, ifp);
1332 if (!drbr_empty(ifp, txr->br))
1333 ixgbe_mq_start_locked(ifp, txr);
1335 IXGBE_TX_UNLOCK(txr);
1337 /* Check for fan failure */
1338 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1339 (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1340 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1341 "REPLACE IMMEDIATELY!!\n");
1342 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1345 /* Link status change */
1346 if (reg_eicr & IXGBE_EICR_LSC)
1347 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1349 /* External PHY interrupt */
1350 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1351 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1352 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1355 taskqueue_enqueue(que->tq, &que->que_task);
1357 ixgbe_enable_intr(adapter);
1362 /*********************************************************************
1364 * MSIX Queue Interrupt Service routine
1366 **********************************************************************/
1368 ixgbe_msix_que(void *arg)
1370 struct ix_queue *que = arg;
1371 struct adapter *adapter = que->adapter;
1372 struct ifnet *ifp = adapter->ifp;
1373 struct tx_ring *txr = que->txr;
1374 struct rx_ring *rxr = que->rxr;
1378 /* Protect against spurious interrupts */
1379 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1382 ixgbe_disable_queue(adapter, que->msix);
1385 more = ixgbe_rxeof(que);
1389 #ifdef IXGBE_LEGACY_TX
1390 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1391 ixgbe_start_locked(txr, ifp);
1393 if (!drbr_empty(ifp, txr->br))
1394 ixgbe_mq_start_locked(ifp, txr);
1396 IXGBE_TX_UNLOCK(txr);
1400 if (ixgbe_enable_aim == FALSE)
1403 ** Do Adaptive Interrupt Moderation:
1404 ** - Write out last calculated setting
1405 ** - Calculate based on average size over
1406 ** the last interval.
1408 if (que->eitr_setting)
1409 IXGBE_WRITE_REG(&adapter->hw,
1410 IXGBE_EITR(que->msix), que->eitr_setting);
1412 que->eitr_setting = 0;
1414 /* Idle, do nothing */
1415 if ((txr->bytes == 0) && (rxr->bytes == 0))
1418 if ((txr->bytes) && (txr->packets))
1419 newitr = txr->bytes/txr->packets;
1420 if ((rxr->bytes) && (rxr->packets))
1421 newitr = max(newitr,
1422 (rxr->bytes / rxr->packets));
1423 newitr += 24; /* account for hardware frame, crc */
1425 /* set an upper boundary */
1426 newitr = min(newitr, 3000);
1428 /* Be nice to the mid range */
1429 if ((newitr > 300) && (newitr < 1200))
1430 newitr = (newitr / 3);
1432 newitr = (newitr / 2);
1434 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1435 newitr |= newitr << 16;
1437 newitr |= IXGBE_EITR_CNT_WDIS;
1439 /* save for next interrupt */
1440 que->eitr_setting = newitr;
1450 taskqueue_enqueue(que->tq, &que->que_task);
1452 ixgbe_enable_queue(adapter, que->msix);
1458 ixgbe_msix_link(void *arg)
1460 struct adapter *adapter = arg;
1461 struct ixgbe_hw *hw = &adapter->hw;
1462 u32 reg_eicr, mod_mask;
1464 ++adapter->link_irq;
1466 /* First get the cause */
1467 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1468 /* Be sure the queue bits are not cleared */
1469 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1470 /* Clear interrupt with write */
1471 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1473 /* Link status change */
1474 if (reg_eicr & IXGBE_EICR_LSC)
1475 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1477 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1479 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1480 /* This is probably overkill :) */
1481 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1483 /* Disable the interrupt */
1484 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1485 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1488 if (reg_eicr & IXGBE_EICR_ECC) {
1489 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1490 "Please Reboot!!\n");
1491 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1494 /* Check for over temp condition */
1495 if (reg_eicr & IXGBE_EICR_TS) {
1496 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1497 "PHY IS SHUT DOWN!!\n");
1498 device_printf(adapter->dev, "System shutdown required!\n");
1499 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1503 /* Pluggable optics-related interrupt */
1504 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1505 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1507 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1509 if (ixgbe_is_sfp(hw)) {
1510 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1511 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1512 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1513 } else if (reg_eicr & mod_mask) {
1514 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1515 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1519 /* Check for fan failure */
1520 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1521 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1522 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1523 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1524 "REPLACE IMMEDIATELY!!\n");
1527 /* External PHY interrupt */
1528 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1529 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1530 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1531 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1538 /*********************************************************************
1540 * Media Ioctl callback
1542 * This routine is called whenever the user queries the status of
1543 * the interface using ifconfig.
1545 **********************************************************************/
1547 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1549 struct adapter *adapter = ifp->if_softc;
1550 struct ixgbe_hw *hw = &adapter->hw;
1553 INIT_DEBUGOUT("ixgbe_media_status: begin");
1554 IXGBE_CORE_LOCK(adapter);
1555 ixgbe_update_link_status(adapter);
1557 ifmr->ifm_status = IFM_AVALID;
1558 ifmr->ifm_active = IFM_ETHER;
1560 if (!adapter->link_active) {
1561 IXGBE_CORE_UNLOCK(adapter);
1565 ifmr->ifm_status |= IFM_ACTIVE;
1566 layer = ixgbe_get_supported_physical_layer(hw);
1568 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1569 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1570 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1571 switch (adapter->link_speed) {
1572 case IXGBE_LINK_SPEED_10GB_FULL:
1573 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1575 case IXGBE_LINK_SPEED_1GB_FULL:
1576 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1578 case IXGBE_LINK_SPEED_100_FULL:
1579 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1582 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1583 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1584 switch (adapter->link_speed) {
1585 case IXGBE_LINK_SPEED_10GB_FULL:
1586 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1589 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1590 switch (adapter->link_speed) {
1591 case IXGBE_LINK_SPEED_10GB_FULL:
1592 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1594 case IXGBE_LINK_SPEED_1GB_FULL:
1595 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1598 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1599 switch (adapter->link_speed) {
1600 case IXGBE_LINK_SPEED_10GB_FULL:
1601 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1603 case IXGBE_LINK_SPEED_1GB_FULL:
1604 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1607 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1608 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1609 switch (adapter->link_speed) {
1610 case IXGBE_LINK_SPEED_10GB_FULL:
1611 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1613 case IXGBE_LINK_SPEED_1GB_FULL:
1614 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1617 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1618 switch (adapter->link_speed) {
1619 case IXGBE_LINK_SPEED_10GB_FULL:
1620 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1624 ** XXX: These need to use the proper media types once
1627 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1628 switch (adapter->link_speed) {
1629 case IXGBE_LINK_SPEED_10GB_FULL:
1630 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1632 case IXGBE_LINK_SPEED_2_5GB_FULL:
1633 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1635 case IXGBE_LINK_SPEED_1GB_FULL:
1636 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1639 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1640 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1641 switch (adapter->link_speed) {
1642 case IXGBE_LINK_SPEED_10GB_FULL:
1643 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1645 case IXGBE_LINK_SPEED_2_5GB_FULL:
1646 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1648 case IXGBE_LINK_SPEED_1GB_FULL:
1649 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1653 /* If nothing is recognized... */
1654 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1655 ifmr->ifm_active |= IFM_UNKNOWN;
1657 #if __FreeBSD_version >= 900025
1658 /* Display current flow control setting used on link */
1659 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1660 hw->fc.current_mode == ixgbe_fc_full)
1661 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1662 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1663 hw->fc.current_mode == ixgbe_fc_full)
1664 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1667 IXGBE_CORE_UNLOCK(adapter);
1672 /*********************************************************************
1674 * Media Ioctl callback
1676 * This routine is called when the user changes speed/duplex using
1677 * media/mediopt option with ifconfig.
1679 **********************************************************************/
1681 ixgbe_media_change(struct ifnet * ifp)
1683 struct adapter *adapter = ifp->if_softc;
1684 struct ifmedia *ifm = &adapter->media;
1685 struct ixgbe_hw *hw = &adapter->hw;
1686 ixgbe_link_speed speed = 0;
1688 INIT_DEBUGOUT("ixgbe_media_change: begin");
1690 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1693 if (hw->phy.media_type == ixgbe_media_type_backplane)
1697 ** We don't actually need to check against the supported
1698 ** media types of the adapter; ifmedia will take care of
1701 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1704 speed |= IXGBE_LINK_SPEED_100_FULL;
1706 case IFM_10G_SR: /* KR, too */
1708 case IFM_10G_CX4: /* KX4 */
1709 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1710 case IFM_10G_TWINAX:
1711 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1714 speed |= IXGBE_LINK_SPEED_100_FULL;
1717 case IFM_1000_CX: /* KX */
1718 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1721 speed |= IXGBE_LINK_SPEED_100_FULL;
1727 hw->mac.autotry_restart = TRUE;
1728 hw->mac.ops.setup_link(hw, speed, TRUE);
1729 adapter->advertise =
1730 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1731 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1732 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1737 device_printf(adapter->dev, "Invalid media type!\n");
1742 ixgbe_set_promisc(struct adapter *adapter)
1745 struct ifnet *ifp = adapter->ifp;
1748 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1749 reg_rctl &= (~IXGBE_FCTRL_UPE);
1750 if (ifp->if_flags & IFF_ALLMULTI)
1751 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1753 struct ifmultiaddr *ifma;
1754 #if __FreeBSD_version < 800000
1757 if_maddr_rlock(ifp);
1759 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1760 if (ifma->ifma_addr->sa_family != AF_LINK)
1762 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1766 #if __FreeBSD_version < 800000
1767 IF_ADDR_UNLOCK(ifp);
1769 if_maddr_runlock(ifp);
1772 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1773 reg_rctl &= (~IXGBE_FCTRL_MPE);
1774 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1776 if (ifp->if_flags & IFF_PROMISC) {
1777 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1778 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1779 } else if (ifp->if_flags & IFF_ALLMULTI) {
1780 reg_rctl |= IXGBE_FCTRL_MPE;
1781 reg_rctl &= ~IXGBE_FCTRL_UPE;
1782 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1788 /*********************************************************************
1791 * This routine is called whenever multicast address list is updated.
1793 **********************************************************************/
1794 #define IXGBE_RAR_ENTRIES 16
1797 ixgbe_set_multi(struct adapter *adapter)
1802 struct ifmultiaddr *ifma;
1804 struct ifnet *ifp = adapter->ifp;
1806 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1809 bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1810 MAX_NUM_MULTICAST_ADDRESSES);
1812 #if __FreeBSD_version < 800000
1815 if_maddr_rlock(ifp);
1817 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1818 if (ifma->ifma_addr->sa_family != AF_LINK)
1820 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1822 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1823 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1824 IXGBE_ETH_LENGTH_OF_ADDRESS);
1827 #if __FreeBSD_version < 800000
1828 IF_ADDR_UNLOCK(ifp);
1830 if_maddr_runlock(ifp);
1833 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1834 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1835 if (ifp->if_flags & IFF_PROMISC)
1836 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1837 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1838 ifp->if_flags & IFF_ALLMULTI) {
1839 fctrl |= IXGBE_FCTRL_MPE;
1840 fctrl &= ~IXGBE_FCTRL_UPE;
1842 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1846 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1848 ixgbe_update_mc_addr_list(&adapter->hw,
1849 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1856 * This is an iterator function now needed by the multicast
1857 * shared code. It simply feeds the shared code routine the
1858 * addresses in the array of ixgbe_set_multi() one by one.
1861 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1863 u8 *addr = *update_ptr;
1867 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1868 *update_ptr = newptr;
1873 /*********************************************************************
1876 * This routine checks for link status,updates statistics,
1877 * and runs the watchdog check.
1879 **********************************************************************/
1882 ixgbe_local_timer(void *arg)
1884 struct adapter *adapter = arg;
1885 device_t dev = adapter->dev;
1886 struct ix_queue *que = adapter->queues;
1890 mtx_assert(&adapter->core_mtx, MA_OWNED);
1892 /* Check for pluggable optics */
1893 if (adapter->sfp_probe)
1894 if (!ixgbe_sfp_probe(adapter))
1895 goto out; /* Nothing to do */
1897 ixgbe_update_link_status(adapter);
1898 ixgbe_update_stats_counters(adapter);
1901 ** Check the TX queues status
1902 ** - mark hung queues so we don't schedule on them
1903 ** - watchdog only if all queues show hung
1905 for (int i = 0; i < adapter->num_queues; i++, que++) {
1906 /* Keep track of queues with work for soft irq */
1908 queues |= ((u64)1 << que->me);
1910 ** Each time txeof runs without cleaning, but there
1911 ** are uncleaned descriptors it increments busy. If
1912 ** we get to the MAX we declare it hung.
1914 if (que->busy == IXGBE_QUEUE_HUNG) {
1916 /* Mark the queue as inactive */
1917 adapter->active_queues &= ~((u64)1 << que->me);
1920 /* Check if we've come back from hung */
1921 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1922 adapter->active_queues |= ((u64)1 << que->me);
1924 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1925 device_printf(dev,"Warning queue %d "
1926 "appears to be hung!\n", i);
1927 que->txr->busy = IXGBE_QUEUE_HUNG;
1933 /* Only truly watchdog if all queues show hung */
1934 if (hung == adapter->num_queues)
1936 else if (queues != 0) { /* Force an IRQ on queues with work */
1937 ixgbe_rearm_queues(adapter, queues);
1941 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1945 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1946 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1947 adapter->watchdog_events++;
1948 ixgbe_init_locked(adapter);
1952 ** Note: this routine updates the OS on the link state
1953 ** the real check of the hardware only happens with
1954 ** a link interrupt.
1957 ixgbe_update_link_status(struct adapter *adapter)
1959 struct ifnet *ifp = adapter->ifp;
1960 device_t dev = adapter->dev;
1962 if (adapter->link_up){
1963 if (adapter->link_active == FALSE) {
1965 device_printf(dev,"Link is up %d Gbps %s \n",
1966 ((adapter->link_speed == 128)? 10:1),
1968 adapter->link_active = TRUE;
1969 /* Update any Flow Control changes */
1970 ixgbe_fc_enable(&adapter->hw);
1971 /* Update DMA coalescing config */
1972 ixgbe_config_dmac(adapter);
1973 if_link_state_change(ifp, LINK_STATE_UP);
1975 } else { /* Link down */
1976 if (adapter->link_active == TRUE) {
1978 device_printf(dev,"Link is Down\n");
1979 if_link_state_change(ifp, LINK_STATE_DOWN);
1980 adapter->link_active = FALSE;
1988 /*********************************************************************
1990 * This routine disables all traffic on the adapter by issuing a
1991 * global reset on the MAC and deallocates TX/RX buffers.
1993 **********************************************************************/
1996 ixgbe_stop(void *arg)
1999 struct adapter *adapter = arg;
2000 struct ixgbe_hw *hw = &adapter->hw;
2003 mtx_assert(&adapter->core_mtx, MA_OWNED);
2005 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2006 ixgbe_disable_intr(adapter);
2007 callout_stop(&adapter->timer);
2009 /* Let the stack know...*/
2010 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2013 hw->adapter_stopped = FALSE;
2014 ixgbe_stop_adapter(hw);
2015 if (hw->mac.type == ixgbe_mac_82599EB)
2016 ixgbe_stop_mac_link_on_d3_82599(hw);
2017 /* Turn off the laser - noop with no optics */
2018 ixgbe_disable_tx_laser(hw);
2020 /* Update the stack */
2021 adapter->link_up = FALSE;
2022 ixgbe_update_link_status(adapter);
2024 /* reprogram the RAR[0] in case user changed it. */
2025 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2031 /*********************************************************************
2033 * Determine hardware revision.
2035 **********************************************************************/
2037 ixgbe_identify_hardware(struct adapter *adapter)
2039 device_t dev = adapter->dev;
2040 struct ixgbe_hw *hw = &adapter->hw;
2042 /* Save off the information about this board */
2043 hw->vendor_id = pci_get_vendor(dev);
2044 hw->device_id = pci_get_device(dev);
2045 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2046 hw->subsystem_vendor_id =
2047 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2048 hw->subsystem_device_id =
2049 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2052 ** Make sure BUSMASTER is set
2054 pci_enable_busmaster(dev);
2056 /* We need this here to set the num_segs below */
2057 ixgbe_set_mac_type(hw);
2059 /* Pick up the 82599 settings */
2060 if (hw->mac.type != ixgbe_mac_82598EB) {
2061 hw->phy.smart_speed = ixgbe_smart_speed;
2062 adapter->num_segs = IXGBE_82599_SCATTER;
2064 adapter->num_segs = IXGBE_82598_SCATTER;
2069 /*********************************************************************
2071 * Determine optic type
2073 **********************************************************************/
2075 ixgbe_setup_optics(struct adapter *adapter)
2077 struct ixgbe_hw *hw = &adapter->hw;
2080 layer = ixgbe_get_supported_physical_layer(hw);
2082 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2083 adapter->optics = IFM_10G_T;
2087 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2088 adapter->optics = IFM_1000_T;
2092 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2093 adapter->optics = IFM_1000_SX;
2097 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2098 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2099 adapter->optics = IFM_10G_LR;
2103 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2104 adapter->optics = IFM_10G_SR;
2108 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2109 adapter->optics = IFM_10G_TWINAX;
2113 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2114 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2115 adapter->optics = IFM_10G_CX4;
2119 /* If we get here just set the default */
2120 adapter->optics = IFM_ETHER | IFM_AUTO;
2124 /*********************************************************************
2126 * Setup the Legacy or MSI Interrupt handler
2128 **********************************************************************/
2130 ixgbe_allocate_legacy(struct adapter *adapter)
2132 device_t dev = adapter->dev;
2133 struct ix_queue *que = adapter->queues;
2134 #ifndef IXGBE_LEGACY_TX
2135 struct tx_ring *txr = adapter->tx_rings;
2140 if (adapter->msix == 1)
2143 /* We allocate a single interrupt resource */
2144 adapter->res = bus_alloc_resource_any(dev,
2145 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2146 if (adapter->res == NULL) {
2147 device_printf(dev, "Unable to allocate bus resource: "
2153 * Try allocating a fast interrupt and the associated deferred
2154 * processing contexts.
2156 #ifndef IXGBE_LEGACY_TX
2157 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2159 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2160 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2161 taskqueue_thread_enqueue, &que->tq);
2162 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2163 device_get_nameunit(adapter->dev));
2165 /* Tasklets for Link, SFP and Multispeed Fiber */
2166 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2167 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2168 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2169 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2171 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2173 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2174 taskqueue_thread_enqueue, &adapter->tq);
2175 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2176 device_get_nameunit(adapter->dev));
2178 if ((error = bus_setup_intr(dev, adapter->res,
2179 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2180 que, &adapter->tag)) != 0) {
2181 device_printf(dev, "Failed to register fast interrupt "
2182 "handler: %d\n", error);
2183 taskqueue_free(que->tq);
2184 taskqueue_free(adapter->tq);
2189 /* For simplicity in the handlers */
2190 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2196 /*********************************************************************
2198 * Setup MSIX Interrupt resources and handlers
2200 **********************************************************************/
2202 ixgbe_allocate_msix(struct adapter *adapter)
2204 device_t dev = adapter->dev;
2205 struct ix_queue *que = adapter->queues;
2206 struct tx_ring *txr = adapter->tx_rings;
2207 int error, rid, vector = 0;
2210 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2212 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2213 RF_SHAREABLE | RF_ACTIVE);
2214 if (que->res == NULL) {
2215 device_printf(dev,"Unable to allocate"
2216 " bus resource: que interrupt [%d]\n", vector);
2219 /* Set the handler function */
2220 error = bus_setup_intr(dev, que->res,
2221 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2222 ixgbe_msix_que, que, &que->tag);
2225 device_printf(dev, "Failed to register QUE handler");
2228 #if __FreeBSD_version >= 800504
2229 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2232 adapter->active_queues |= (u64)(1 << que->msix);
2234 * Bind the msix vector, and thus the
2235 * rings to the corresponding cpu.
2237 * This just happens to match the default RSS round-robin
2238 * bucket -> queue -> CPU allocation.
2240 if (adapter->num_queues > 1)
2243 if (adapter->num_queues > 1)
2244 bus_bind_intr(dev, que->res, cpu_id);
2246 #ifndef IXGBE_LEGACY_TX
2247 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2249 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2250 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2251 taskqueue_thread_enqueue, &que->tq);
2252 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2253 device_get_nameunit(adapter->dev));
2258 adapter->res = bus_alloc_resource_any(dev,
2259 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2260 if (!adapter->res) {
2261 device_printf(dev,"Unable to allocate"
2262 " bus resource: Link interrupt [%d]\n", rid);
2265 /* Set the link handler function */
2266 error = bus_setup_intr(dev, adapter->res,
2267 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2268 ixgbe_msix_link, adapter, &adapter->tag);
2270 adapter->res = NULL;
2271 device_printf(dev, "Failed to register LINK handler");
2274 #if __FreeBSD_version >= 800504
2275 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2277 adapter->vector = vector;
2278 /* Tasklets for Link, SFP and Multispeed Fiber */
2279 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2280 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2281 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2282 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2284 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2286 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2287 taskqueue_thread_enqueue, &adapter->tq);
2288 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2289 device_get_nameunit(adapter->dev));
2295 * Setup Either MSI/X or MSI
2298 ixgbe_setup_msix(struct adapter *adapter)
2300 device_t dev = adapter->dev;
2301 int rid, want, queues, msgs;
2303 /* Override by tuneable */
2304 if (ixgbe_enable_msix == 0)
2307 /* First try MSI/X */
2308 msgs = pci_msix_count(dev);
2311 rid = PCIR_BAR(MSIX_82598_BAR);
2312 adapter->msix_mem = bus_alloc_resource_any(dev,
2313 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2314 if (adapter->msix_mem == NULL) {
2315 rid += 4; /* 82599 maps in higher BAR */
2316 adapter->msix_mem = bus_alloc_resource_any(dev,
2317 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2319 if (adapter->msix_mem == NULL) {
2320 /* May not be enabled */
2321 device_printf(adapter->dev,
2322 "Unable to map MSIX table \n");
2326 /* Figure out a reasonable auto config value */
2327 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2329 if (ixgbe_num_queues != 0)
2330 queues = ixgbe_num_queues;
2331 /* Set max queues to 8 when autoconfiguring */
2332 else if ((ixgbe_num_queues == 0) && (queues > 8))
2335 /* reflect correct sysctl value */
2336 ixgbe_num_queues = queues;
2339 ** Want one vector (RX/TX pair) per queue
2340 ** plus an additional for Link.
2346 device_printf(adapter->dev,
2347 "MSIX Configuration Problem, "
2348 "%d vectors but %d queues wanted!\n",
2352 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2353 device_printf(adapter->dev,
2354 "Using MSIX interrupts with %d vectors\n", msgs);
2355 adapter->num_queues = queues;
2359 ** If MSIX alloc failed or provided us with
2360 ** less than needed, free and fall through to MSI
2362 pci_release_msi(dev);
2365 if (adapter->msix_mem != NULL) {
2366 bus_release_resource(dev, SYS_RES_MEMORY,
2367 rid, adapter->msix_mem);
2368 adapter->msix_mem = NULL;
2371 if (pci_alloc_msi(dev, &msgs) == 0) {
2372 device_printf(adapter->dev,"Using an MSI interrupt\n");
2375 device_printf(adapter->dev,"Using a Legacy interrupt\n");
2381 ixgbe_allocate_pci_resources(struct adapter *adapter)
2384 device_t dev = adapter->dev;
2387 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2390 if (!(adapter->pci_mem)) {
2391 device_printf(dev,"Unable to allocate bus resource: memory\n");
2395 adapter->osdep.mem_bus_space_tag =
2396 rman_get_bustag(adapter->pci_mem);
2397 adapter->osdep.mem_bus_space_handle =
2398 rman_get_bushandle(adapter->pci_mem);
2399 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2401 /* Legacy defaults */
2402 adapter->num_queues = 1;
2403 adapter->hw.back = &adapter->osdep;
2406 ** Now setup MSI or MSI/X, should
2407 ** return us the number of supported
2408 ** vectors. (Will be 1 for MSI)
2410 adapter->msix = ixgbe_setup_msix(adapter);
2415 ixgbe_free_pci_resources(struct adapter * adapter)
2417 struct ix_queue *que = adapter->queues;
2418 device_t dev = adapter->dev;
2421 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2422 memrid = PCIR_BAR(MSIX_82598_BAR);
2424 memrid = PCIR_BAR(MSIX_82599_BAR);
2427 ** There is a slight possibility of a failure mode
2428 ** in attach that will result in entering this function
2429 ** before interrupt resources have been initialized, and
2430 ** in that case we do not want to execute the loops below
2431 ** We can detect this reliably by the state of the adapter
2434 if (adapter->res == NULL)
2438 ** Release all msix queue resources:
2440 for (int i = 0; i < adapter->num_queues; i++, que++) {
2441 rid = que->msix + 1;
2442 if (que->tag != NULL) {
2443 bus_teardown_intr(dev, que->res, que->tag);
2446 if (que->res != NULL)
2447 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2451 /* Clean the Legacy or Link interrupt last */
2452 if (adapter->vector) /* we are doing MSIX */
2453 rid = adapter->vector + 1;
2455 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2457 if (adapter->tag != NULL) {
2458 bus_teardown_intr(dev, adapter->res, adapter->tag);
2459 adapter->tag = NULL;
2461 if (adapter->res != NULL)
2462 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2466 pci_release_msi(dev);
2468 if (adapter->msix_mem != NULL)
2469 bus_release_resource(dev, SYS_RES_MEMORY,
2470 memrid, adapter->msix_mem);
2472 if (adapter->pci_mem != NULL)
2473 bus_release_resource(dev, SYS_RES_MEMORY,
2474 PCIR_BAR(0), adapter->pci_mem);
2479 /*********************************************************************
2481 * Setup networking device structure and register an interface.
2483 **********************************************************************/
2485 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2489 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2491 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2493 device_printf(dev, "can not allocate ifnet structure\n");
2496 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2497 if_initbaudrate(ifp, IF_Gbps(10));
2498 ifp->if_init = ixgbe_init;
2499 ifp->if_softc = adapter;
2500 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2501 ifp->if_ioctl = ixgbe_ioctl;
2502 #ifndef IXGBE_LEGACY_TX
2503 ifp->if_transmit = ixgbe_mq_start;
2504 ifp->if_qflush = ixgbe_qflush;
2506 ifp->if_start = ixgbe_start;
2507 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2508 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2509 IFQ_SET_READY(&ifp->if_snd);
2512 ether_ifattach(ifp, adapter->hw.mac.addr);
2514 adapter->max_frame_size =
2515 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2518 * Tell the upper layer(s) we support long frames.
2520 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2522 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2523 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2524 ifp->if_capabilities |= IFCAP_LRO;
2525 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2529 ifp->if_capenable = ifp->if_capabilities;
2532 ** Don't turn this on by default, if vlans are
2533 ** created on another pseudo device (eg. lagg)
2534 ** then vlan events are not passed thru, breaking
2535 ** operation, but with HW FILTER off it works. If
2536 ** using vlans directly on the ixgbe driver you can
2537 ** enable this and get full hardware tag filtering.
2539 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2542 * Specify the media types supported by this adapter and register
2543 * callbacks to update media and link information
2545 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2546 ixgbe_media_status);
2548 ixgbe_add_media_types(adapter);
2550 /* Autoselect media by default */
2551 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2557 ixgbe_add_media_types(struct adapter *adapter)
2559 struct ixgbe_hw *hw = &adapter->hw;
2560 device_t dev = adapter->dev;
2563 layer = ixgbe_get_supported_physical_layer(hw);
2565 /* Media types with matching FreeBSD media defines */
2566 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2567 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2568 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2569 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2570 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2571 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2573 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2574 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2575 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2577 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2578 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2579 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2580 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2581 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2582 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2583 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2584 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2587 ** Other (no matching FreeBSD media type):
2588 ** To workaround this, we'll assign these completely
2589 ** inappropriate media types.
2591 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2592 device_printf(dev, "Media supported: 10GbaseKR\n");
2593 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2594 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2596 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2597 device_printf(dev, "Media supported: 10GbaseKX4\n");
2598 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2599 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2601 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2602 device_printf(dev, "Media supported: 1000baseKX\n");
2603 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2604 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2606 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2607 /* Someday, someone will care about you... */
2608 device_printf(dev, "Media supported: 1000baseBX\n");
2611 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2612 ifmedia_add(&adapter->media,
2613 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2614 ifmedia_add(&adapter->media,
2615 IFM_ETHER | IFM_1000_T, 0, NULL);
2618 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2622 ixgbe_config_link(struct adapter *adapter)
2624 struct ixgbe_hw *hw = &adapter->hw;
2625 u32 autoneg, err = 0;
2626 bool sfp, negotiate;
2628 sfp = ixgbe_is_sfp(hw);
2631 if (hw->phy.multispeed_fiber) {
2632 hw->mac.ops.setup_sfp(hw);
2633 ixgbe_enable_tx_laser(hw);
2634 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2636 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2638 if (hw->mac.ops.check_link)
2639 err = ixgbe_check_link(hw, &adapter->link_speed,
2640 &adapter->link_up, FALSE);
2643 autoneg = hw->phy.autoneg_advertised;
2644 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2645 err = hw->mac.ops.get_link_capabilities(hw,
2646 &autoneg, &negotiate);
2649 if (hw->mac.ops.setup_link)
2650 err = hw->mac.ops.setup_link(hw,
2651 autoneg, adapter->link_up);
2658 /*********************************************************************
2660 * Enable transmit units.
2662 **********************************************************************/
2664 ixgbe_initialize_transmit_units(struct adapter *adapter)
2666 struct tx_ring *txr = adapter->tx_rings;
2667 struct ixgbe_hw *hw = &adapter->hw;
2669 /* Setup the Base and Length of the Tx Descriptor Ring */
2671 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2672 u64 tdba = txr->txdma.dma_paddr;
2675 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2676 (tdba & 0x00000000ffffffffULL));
2677 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2678 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2679 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2681 /* Setup the HW Tx Head and Tail descriptor pointers */
2682 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2683 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2685 /* Cache the tail address */
2686 txr->tail = IXGBE_TDT(txr->me);
2688 /* Set the processing limit */
2689 txr->process_limit = ixgbe_tx_process_limit;
2691 /* Disable Head Writeback */
2692 switch (hw->mac.type) {
2693 case ixgbe_mac_82598EB:
2694 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2696 case ixgbe_mac_82599EB:
2697 case ixgbe_mac_X540:
2699 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2702 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2703 switch (hw->mac.type) {
2704 case ixgbe_mac_82598EB:
2705 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2707 case ixgbe_mac_82599EB:
2708 case ixgbe_mac_X540:
2710 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2716 if (hw->mac.type != ixgbe_mac_82598EB) {
2717 u32 dmatxctl, rttdcs;
2718 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2719 dmatxctl |= IXGBE_DMATXCTL_TE;
2720 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2721 /* Disable arbiter to set MTQC */
2722 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2723 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2724 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2725 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2726 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2727 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2734 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2736 struct ixgbe_hw *hw = &adapter->hw;
2738 int i, j, queue_id, table_size;
2740 uint32_t rss_key[10];
2746 /* set up random bits */
2747 arc4rand(&rss_key, sizeof(rss_key), 0);
2749 /* Set multiplier for RETA setup and table size based on MAC */
2752 switch (adapter->hw.mac.type) {
2753 case ixgbe_mac_82598EB:
2756 case ixgbe_mac_X550:
2757 case ixgbe_mac_X550EM_x:
2764 /* Set up the redirection table */
2765 for (i = 0, j = 0; i < table_size; i++, j++) {
2766 if (j == adapter->num_queues) j = 0;
2767 queue_id = (j * index_mult);
2769 * The low 8 bits are for hash value (n+0);
2770 * The next 8 bits are for hash value (n+1), etc.
2773 reta = reta | ( ((uint32_t) queue_id) << 24);
2776 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2778 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2783 /* Now fill our hash function seeds */
2784 for (int i = 0; i < 10; i++)
2785 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2787 /* Perform hash on these packet types */
2789 * Disable UDP - IP fragments aren't currently being handled
2790 * and so we end up with a mix of 2-tuple and 4-tuple
2793 mrqc = IXGBE_MRQC_RSSEN
2794 | IXGBE_MRQC_RSS_FIELD_IPV4
2795 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2797 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2799 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2800 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2801 | IXGBE_MRQC_RSS_FIELD_IPV6
2802 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2804 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2805 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2808 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2812 /*********************************************************************
2814 * Setup receive registers and features.
2816 **********************************************************************/
2817 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2819 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2822 ixgbe_initialize_receive_units(struct adapter *adapter)
2824 struct rx_ring *rxr = adapter->rx_rings;
2825 struct ixgbe_hw *hw = &adapter->hw;
2826 struct ifnet *ifp = adapter->ifp;
2827 u32 bufsz, fctrl, srrctl, rxcsum;
2832 * Make sure receives are disabled while
2833 * setting up the descriptor ring
2835 ixgbe_disable_rx(hw);
2837 /* Enable broadcasts */
2838 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2839 fctrl |= IXGBE_FCTRL_BAM;
2840 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2841 fctrl |= IXGBE_FCTRL_DPF;
2842 fctrl |= IXGBE_FCTRL_PMCF;
2844 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2846 /* Set for Jumbo Frames? */
2847 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2848 if (ifp->if_mtu > ETHERMTU)
2849 hlreg |= IXGBE_HLREG0_JUMBOEN;
2851 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2853 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2854 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2855 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2857 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2858 #endif /* DEV_NETMAP */
2859 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2861 bufsz = (adapter->rx_mbuf_sz +
2862 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2864 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2865 u64 rdba = rxr->rxdma.dma_paddr;
2867 /* Setup the Base and Length of the Rx Descriptor Ring */
2868 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2869 (rdba & 0x00000000ffffffffULL));
2870 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2871 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2872 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2874 /* Set up the SRRCTL register */
2875 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2876 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2877 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2879 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2882 * Set DROP_EN iff we have no flow control and >1 queue.
2883 * Note that srrctl was cleared shortly before during reset,
2884 * so we do not need to clear the bit, but do it just in case
2885 * this code is moved elsewhere.
2887 if (adapter->num_queues > 1 &&
2888 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2889 srrctl |= IXGBE_SRRCTL_DROP_EN;
2891 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2894 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2896 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2897 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2898 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2900 /* Set the processing limit */
2901 rxr->process_limit = ixgbe_rx_process_limit;
2903 /* Set the driver rx tail address */
2904 rxr->tail = IXGBE_RDT(rxr->me);
2907 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2908 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2909 IXGBE_PSRTYPE_UDPHDR |
2910 IXGBE_PSRTYPE_IPV4HDR |
2911 IXGBE_PSRTYPE_IPV6HDR;
2912 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2915 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2917 ixgbe_initialise_rss_mapping(adapter);
2919 if (adapter->num_queues > 1) {
2920 /* RSS and RX IPP Checksum are mutually exclusive */
2921 rxcsum |= IXGBE_RXCSUM_PCSD;
2924 if (ifp->if_capenable & IFCAP_RXCSUM)
2925 rxcsum |= IXGBE_RXCSUM_PCSD;
2927 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2928 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2930 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2937 ** This routine is run via an vlan config EVENT,
2938 ** it enables us to use the HW Filter table since
2939 ** we can get the vlan id. This just creates the
2940 ** entry in the soft version of the VFTA, init will
2941 ** repopulate the real table.
2944 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2946 struct adapter *adapter = ifp->if_softc;
2949 if (ifp->if_softc != arg) /* Not our event */
2952 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2955 IXGBE_CORE_LOCK(adapter);
2956 index = (vtag >> 5) & 0x7F;
2958 adapter->shadow_vfta[index] |= (1 << bit);
2959 ++adapter->num_vlans;
2960 ixgbe_setup_vlan_hw_support(adapter);
2961 IXGBE_CORE_UNLOCK(adapter);
2965 ** This routine is run via an vlan
2966 ** unconfig EVENT, remove our entry
2967 ** in the soft vfta.
2970 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2972 struct adapter *adapter = ifp->if_softc;
2975 if (ifp->if_softc != arg)
2978 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2981 IXGBE_CORE_LOCK(adapter);
2982 index = (vtag >> 5) & 0x7F;
2984 adapter->shadow_vfta[index] &= ~(1 << bit);
2985 --adapter->num_vlans;
2986 /* Re-init to load the changes */
2987 ixgbe_setup_vlan_hw_support(adapter);
2988 IXGBE_CORE_UNLOCK(adapter);
2992 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2994 struct ifnet *ifp = adapter->ifp;
2995 struct ixgbe_hw *hw = &adapter->hw;
2996 struct rx_ring *rxr;
3001 ** We get here thru init_locked, meaning
3002 ** a soft reset, this has already cleared
3003 ** the VFTA and other state, so if there
3004 ** have been no vlan's registered do nothing.
3006 if (adapter->num_vlans == 0)
3009 /* Setup the queues for vlans */
3010 for (int i = 0; i < adapter->num_queues; i++) {
3011 rxr = &adapter->rx_rings[i];
3012 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3013 if (hw->mac.type != ixgbe_mac_82598EB) {
3014 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3015 ctrl |= IXGBE_RXDCTL_VME;
3016 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3018 rxr->vtag_strip = TRUE;
3021 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3024 ** A soft reset zero's out the VFTA, so
3025 ** we need to repopulate it now.
3027 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3028 if (adapter->shadow_vfta[i] != 0)
3029 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3030 adapter->shadow_vfta[i]);
3032 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3033 /* Enable the Filter Table if enabled */
3034 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3035 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3036 ctrl |= IXGBE_VLNCTRL_VFE;
3038 if (hw->mac.type == ixgbe_mac_82598EB)
3039 ctrl |= IXGBE_VLNCTRL_VME;
3040 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3044 ixgbe_enable_intr(struct adapter *adapter)
3046 struct ixgbe_hw *hw = &adapter->hw;
3047 struct ix_queue *que = adapter->queues;
3050 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3051 /* Enable Fan Failure detection */
3052 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3053 mask |= IXGBE_EIMS_GPI_SDP1;
3055 switch (adapter->hw.mac.type) {
3056 case ixgbe_mac_82599EB:
3057 mask |= IXGBE_EIMS_ECC;
3058 /* Temperature sensor on some adapters */
3059 mask |= IXGBE_EIMS_GPI_SDP0;
3060 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3061 mask |= IXGBE_EIMS_GPI_SDP1;
3062 mask |= IXGBE_EIMS_GPI_SDP2;
3064 mask |= IXGBE_EIMS_FLOW_DIR;
3067 case ixgbe_mac_X540:
3068 /* Detect if Thermal Sensor is enabled */
3069 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3070 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3071 mask |= IXGBE_EIMS_TS;
3072 mask |= IXGBE_EIMS_ECC;
3074 mask |= IXGBE_EIMS_FLOW_DIR;
3077 case ixgbe_mac_X550:
3078 case ixgbe_mac_X550EM_x:
3079 /* MAC thermal sensor is automatically enabled */
3080 mask |= IXGBE_EIMS_TS;
3081 /* Some devices use SDP0 for important information */
3082 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3083 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3084 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3085 mask |= IXGBE_EIMS_ECC;
3087 mask |= IXGBE_EIMS_FLOW_DIR;
3094 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3096 /* With MSI-X we use auto clear */
3097 if (adapter->msix_mem) {
3098 mask = IXGBE_EIMS_ENABLE_MASK;
3099 /* Don't autoclear Link */
3100 mask &= ~IXGBE_EIMS_OTHER;
3101 mask &= ~IXGBE_EIMS_LSC;
3102 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3106 ** Now enable all queues, this is done separately to
3107 ** allow for handling the extended (beyond 32) MSIX
3108 ** vectors that can be used by 82599
3110 for (int i = 0; i < adapter->num_queues; i++, que++)
3111 ixgbe_enable_queue(adapter, que->msix);
3113 IXGBE_WRITE_FLUSH(hw);
3119 ixgbe_disable_intr(struct adapter *adapter)
3121 if (adapter->msix_mem)
3122 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3123 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3124 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3126 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3127 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3128 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3130 IXGBE_WRITE_FLUSH(&adapter->hw);
3135 ** Get the width and transaction speed of
3136 ** the slot this adapter is plugged into.
3139 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3141 device_t dev = ((struct ixgbe_osdep *)hw->back)->dev;
3142 struct ixgbe_mac_info *mac = &hw->mac;
3146 /* For most devices simply call the shared code routine */
3147 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3148 ixgbe_get_bus_info(hw);
3149 /* These devices don't use PCI-E */
3150 switch (hw->mac.type) {
3151 case ixgbe_mac_X550EM_x:
3159 ** For the Quad port adapter we need to parse back
3160 ** up the PCI tree to find the speed of the expansion
3161 ** slot into which this adapter is plugged. A bit more work.
3163 dev = device_get_parent(device_get_parent(dev));
3165 device_printf(dev, "parent pcib = %x,%x,%x\n",
3166 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3168 dev = device_get_parent(device_get_parent(dev));
3170 device_printf(dev, "slot pcib = %x,%x,%x\n",
3171 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3173 /* Now get the PCI Express Capabilities offset */
3174 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3175 /* ...and read the Link Status Register */
3176 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3177 switch (link & IXGBE_PCI_LINK_WIDTH) {
3178 case IXGBE_PCI_LINK_WIDTH_1:
3179 hw->bus.width = ixgbe_bus_width_pcie_x1;
3181 case IXGBE_PCI_LINK_WIDTH_2:
3182 hw->bus.width = ixgbe_bus_width_pcie_x2;
3184 case IXGBE_PCI_LINK_WIDTH_4:
3185 hw->bus.width = ixgbe_bus_width_pcie_x4;
3187 case IXGBE_PCI_LINK_WIDTH_8:
3188 hw->bus.width = ixgbe_bus_width_pcie_x8;
3191 hw->bus.width = ixgbe_bus_width_unknown;
3195 switch (link & IXGBE_PCI_LINK_SPEED) {
3196 case IXGBE_PCI_LINK_SPEED_2500:
3197 hw->bus.speed = ixgbe_bus_speed_2500;
3199 case IXGBE_PCI_LINK_SPEED_5000:
3200 hw->bus.speed = ixgbe_bus_speed_5000;
3202 case IXGBE_PCI_LINK_SPEED_8000:
3203 hw->bus.speed = ixgbe_bus_speed_8000;
3206 hw->bus.speed = ixgbe_bus_speed_unknown;
3210 mac->ops.set_lan_id(hw);
3213 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3214 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3215 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3216 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3217 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3218 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3219 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3222 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3223 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3224 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3225 device_printf(dev, "PCI-Express bandwidth available"
3226 " for this card\n is not sufficient for"
3227 " optimal performance.\n");
3228 device_printf(dev, "For optimal performance a x8 "
3229 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3231 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3232 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3233 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3234 device_printf(dev, "PCI-Express bandwidth available"
3235 " for this card\n is not sufficient for"
3236 " optimal performance.\n");
3237 device_printf(dev, "For optimal performance a x8 "
3238 "PCIE Gen3 slot is required.\n");
3246 ** Setup the correct IVAR register for a particular MSIX interrupt
3247 ** (yes this is all very magic and confusing :)
3248 ** - entry is the register array entry
3249 ** - vector is the MSIX vector for this queue
3250 ** - type is RX/TX/MISC
3253 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3255 struct ixgbe_hw *hw = &adapter->hw;
3258 vector |= IXGBE_IVAR_ALLOC_VAL;
3260 switch (hw->mac.type) {
3262 case ixgbe_mac_82598EB:
3264 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3266 entry += (type * 64);
3267 index = (entry >> 2) & 0x1F;
3268 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3269 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3270 ivar |= (vector << (8 * (entry & 0x3)));
3271 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3274 case ixgbe_mac_82599EB:
3275 case ixgbe_mac_X540:
3276 case ixgbe_mac_X550:
3277 case ixgbe_mac_X550EM_x:
3278 if (type == -1) { /* MISC IVAR */
3279 index = (entry & 1) * 8;
3280 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3281 ivar &= ~(0xFF << index);
3282 ivar |= (vector << index);
3283 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3284 } else { /* RX/TX IVARS */
3285 index = (16 * (entry & 1)) + (8 * type);
3286 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3287 ivar &= ~(0xFF << index);
3288 ivar |= (vector << index);
3289 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3298 ixgbe_configure_ivars(struct adapter *adapter)
3300 struct ix_queue *que = adapter->queues;
3303 if (ixgbe_max_interrupt_rate > 0)
3304 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3307 ** Disable DMA coalescing if interrupt moderation is
3314 for (int i = 0; i < adapter->num_queues; i++, que++) {
3315 /* First the RX queue entry */
3316 ixgbe_set_ivar(adapter, i, que->msix, 0);
3317 /* ... and the TX */
3318 ixgbe_set_ivar(adapter, i, que->msix, 1);
3319 /* Set an Initial EITR value */
3320 IXGBE_WRITE_REG(&adapter->hw,
3321 IXGBE_EITR(que->msix), newitr);
3324 /* For the Link interrupt */
3325 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3329 ** ixgbe_sfp_probe - called in the local timer to
3330 ** determine if a port had optics inserted.
3332 static bool ixgbe_sfp_probe(struct adapter *adapter)
3334 struct ixgbe_hw *hw = &adapter->hw;
3335 device_t dev = adapter->dev;
3336 bool result = FALSE;
3338 if ((hw->phy.type == ixgbe_phy_nl) &&
3339 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3340 s32 ret = hw->phy.ops.identify_sfp(hw);
3343 ret = hw->phy.ops.reset(hw);
3344 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3345 device_printf(dev,"Unsupported SFP+ module detected!");
3346 printf(" Reload driver with supported module.\n");
3347 adapter->sfp_probe = FALSE;
3350 device_printf(dev,"SFP+ module detected!\n");
3351 /* We now have supported optics */
3352 adapter->sfp_probe = FALSE;
3353 /* Set the optics type so system reports correctly */
3354 ixgbe_setup_optics(adapter);
3362 ** Tasklet handler for MSIX Link interrupts
3363 ** - do outside interrupt since it might sleep
3366 ixgbe_handle_link(void *context, int pending)
3368 struct adapter *adapter = context;
3370 ixgbe_check_link(&adapter->hw,
3371 &adapter->link_speed, &adapter->link_up, 0);
3372 ixgbe_update_link_status(adapter);
3376 ** Tasklet for handling SFP module interrupts
3379 ixgbe_handle_mod(void *context, int pending)
3381 struct adapter *adapter = context;
3382 struct ixgbe_hw *hw = &adapter->hw;
3383 device_t dev = adapter->dev;
3386 err = hw->phy.ops.identify_sfp(hw);
3387 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3389 "Unsupported SFP+ module type was detected.\n");
3392 err = hw->mac.ops.setup_sfp(hw);
3393 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3395 "Setup failure - unsupported SFP+ module type.\n");
3398 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3404 ** Tasklet for handling MSF (multispeed fiber) interrupts
3407 ixgbe_handle_msf(void *context, int pending)
3409 struct adapter *adapter = context;
3410 struct ixgbe_hw *hw = &adapter->hw;
3415 err = hw->phy.ops.identify_sfp(hw);
3417 ixgbe_setup_optics(adapter);
3418 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3421 autoneg = hw->phy.autoneg_advertised;
3422 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3423 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3424 if (hw->mac.ops.setup_link)
3425 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3427 ifmedia_removeall(&adapter->media);
3428 ixgbe_add_media_types(adapter);
3433 ** Tasklet for handling interrupts from an external PHY
3436 ixgbe_handle_phy(void *context, int pending)
3438 struct adapter *adapter = context;
3439 struct ixgbe_hw *hw = &adapter->hw;
3442 error = hw->phy.ops.handle_lasi(hw);
3443 if (error == IXGBE_ERR_OVERTEMP)
3444 device_printf(adapter->dev,
3445 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3446 " PHY will downshift to lower power state!\n");
3448 device_printf(adapter->dev,
3449 "Error handling LASI interrupt: %d\n",
3456 ** Tasklet for reinitializing the Flow Director filter table
3459 ixgbe_reinit_fdir(void *context, int pending)
3461 struct adapter *adapter = context;
3462 struct ifnet *ifp = adapter->ifp;
3464 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3466 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3467 adapter->fdir_reinit = 0;
3468 /* re-enable flow director interrupts */
3469 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3470 /* Restart the interface */
3471 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3476 /*********************************************************************
3478 * Configure DMA Coalescing
3480 **********************************************************************/
3482 ixgbe_config_dmac(struct adapter *adapter)
3484 struct ixgbe_hw *hw = &adapter->hw;
3485 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3487 if (hw->mac.type < ixgbe_mac_X550 ||
3488 !hw->mac.ops.dmac_config)
3491 if (dcfg->watchdog_timer ^ adapter->dmac ||
3492 dcfg->link_speed ^ adapter->link_speed) {
3493 dcfg->watchdog_timer = adapter->dmac;
3494 dcfg->fcoe_en = false;
3495 dcfg->link_speed = adapter->link_speed;
3498 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3499 dcfg->watchdog_timer, dcfg->link_speed);
3501 hw->mac.ops.dmac_config(hw);
3506 * Checks whether the adapter supports Energy Efficient Ethernet
3507 * or not, based on device ID.
3510 ixgbe_check_eee_support(struct adapter *adapter)
3512 struct ixgbe_hw *hw = &adapter->hw;
3514 adapter->eee_support = adapter->eee_enabled =
3515 (hw->device_id == IXGBE_DEV_ID_X550T ||
3516 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
3520 * Checks whether the adapter's ports are capable of
3521 * Wake On LAN by reading the adapter's NVM.
3523 * Sets each port's hw->wol_enabled value depending
3524 * on the value read here.
3527 ixgbe_check_wol_support(struct adapter *adapter)
3529 struct ixgbe_hw *hw = &adapter->hw;
3532 /* Find out WoL support for port */
3533 adapter->wol_support = hw->wol_enabled = 0;
3534 ixgbe_get_device_caps(hw, &dev_caps);
3535 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3536 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3538 adapter->wol_support = hw->wol_enabled = 1;
3540 /* Save initial wake up filter configuration */
3541 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3547 * Prepare the adapter/port for LPLU and/or WoL
3550 ixgbe_setup_low_power_mode(struct adapter *adapter)
3552 struct ixgbe_hw *hw = &adapter->hw;
3553 device_t dev = adapter->dev;
3556 mtx_assert(&adapter->core_mtx, MA_OWNED);
3558 /* Limit power management flow to X550EM baseT */
3559 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3560 && hw->phy.ops.enter_lplu) {
3561 /* Turn off support for APM wakeup. (Using ACPI instead) */
3562 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3563 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3566 * Clear Wake Up Status register to prevent any previous wakeup
3567 * events from waking us up immediately after we suspend.
3569 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3572 * Program the Wakeup Filter Control register with user filter
3575 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3577 /* Enable wakeups and power management in Wakeup Control */
3578 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3579 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3581 /* X550EM baseT adapters need a special LPLU flow */
3582 hw->phy.reset_disable = true;
3583 ixgbe_stop(adapter);
3584 error = hw->phy.ops.enter_lplu(hw);
3587 "Error entering LPLU: %d\n", error);
3588 hw->phy.reset_disable = false;
3590 /* Just stop for other adapters */
3591 ixgbe_stop(adapter);
3597 /**********************************************************************
3599 * Update the board statistics counters.
3601 **********************************************************************/
3603 ixgbe_update_stats_counters(struct adapter *adapter)
3605 struct ixgbe_hw *hw = &adapter->hw;
3606 u32 missed_rx = 0, bprc, lxon, lxoff, total;
3607 u64 total_missed_rx = 0;
3609 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3610 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3611 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3612 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3614 for (int i = 0; i < 16; i++) {
3615 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3616 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3617 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3619 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3620 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3621 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3623 /* Hardware workaround, gprc counts missed packets */
3624 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3625 adapter->stats.pf.gprc -= missed_rx;
3627 if (hw->mac.type != ixgbe_mac_82598EB) {
3628 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3629 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3630 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3631 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3632 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3633 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3634 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3635 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3637 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3638 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3639 /* 82598 only has a counter in the high register */
3640 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3641 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3642 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3646 * Workaround: mprc hardware is incorrectly counting
3647 * broadcasts, so for now we subtract those.
3649 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3650 adapter->stats.pf.bprc += bprc;
3651 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3652 if (hw->mac.type == ixgbe_mac_82598EB)
3653 adapter->stats.pf.mprc -= bprc;
3655 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3656 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3657 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3658 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3659 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3660 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3662 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3663 adapter->stats.pf.lxontxc += lxon;
3664 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3665 adapter->stats.pf.lxofftxc += lxoff;
3666 total = lxon + lxoff;
3668 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3669 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3670 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3671 adapter->stats.pf.gptc -= total;
3672 adapter->stats.pf.mptc -= total;
3673 adapter->stats.pf.ptc64 -= total;
3674 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3676 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3677 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3678 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3679 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3680 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3681 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3682 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3683 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3684 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3685 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3686 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3687 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3688 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3689 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3690 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3691 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3692 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3693 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3694 /* Only read FCOE on 82599 */
3695 if (hw->mac.type != ixgbe_mac_82598EB) {
3696 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3697 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3698 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3699 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3700 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3703 /* Fill out the OS statistics structure */
3704 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3705 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3706 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3707 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3708 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3709 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3710 IXGBE_SET_COLLISIONS(adapter, 0);
3711 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3712 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3713 + adapter->stats.pf.rlec);
3716 #if __FreeBSD_version >= 1100036
3718 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3720 struct adapter *adapter;
3721 struct tx_ring *txr;
3724 adapter = if_getsoftc(ifp);
3727 case IFCOUNTER_IPACKETS:
3728 return (adapter->ipackets);
3729 case IFCOUNTER_OPACKETS:
3730 return (adapter->opackets);
3731 case IFCOUNTER_IBYTES:
3732 return (adapter->ibytes);
3733 case IFCOUNTER_OBYTES:
3734 return (adapter->obytes);
3735 case IFCOUNTER_IMCASTS:
3736 return (adapter->imcasts);
3737 case IFCOUNTER_OMCASTS:
3738 return (adapter->omcasts);
3739 case IFCOUNTER_COLLISIONS:
3741 case IFCOUNTER_IQDROPS:
3742 return (adapter->iqdrops);
3743 case IFCOUNTER_OQDROPS:
3745 txr = adapter->tx_rings;
3746 for (int i = 0; i < adapter->num_queues; i++, txr++)
3747 rv += txr->br->br_drops;
3749 case IFCOUNTER_IERRORS:
3750 return (adapter->ierrors);
3752 return (if_get_counter_default(ifp, cnt));
3757 /** ixgbe_sysctl_tdh_handler - Handler function
3758 * Retrieves the TDH value from the hardware
3761 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3765 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3768 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3769 error = sysctl_handle_int(oidp, &val, 0, req);
3770 if (error || !req->newptr)
3775 /** ixgbe_sysctl_tdt_handler - Handler function
3776 * Retrieves the TDT value from the hardware
3779 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3783 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3786 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3787 error = sysctl_handle_int(oidp, &val, 0, req);
3788 if (error || !req->newptr)
3793 /** ixgbe_sysctl_rdh_handler - Handler function
3794 * Retrieves the RDH value from the hardware
3797 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3801 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3804 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3805 error = sysctl_handle_int(oidp, &val, 0, req);
3806 if (error || !req->newptr)
3811 /** ixgbe_sysctl_rdt_handler - Handler function
3812 * Retrieves the RDT value from the hardware
3815 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3819 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3822 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3823 error = sysctl_handle_int(oidp, &val, 0, req);
3824 if (error || !req->newptr)
3830 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3833 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3834 unsigned int reg, usec, rate;
3836 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3837 usec = ((reg & 0x0FF8) >> 3);
3839 rate = 500000 / usec;
3842 error = sysctl_handle_int(oidp, &rate, 0, req);
3843 if (error || !req->newptr)
3845 reg &= ~0xfff; /* default, no limitation */
3846 ixgbe_max_interrupt_rate = 0;
3847 if (rate > 0 && rate < 500000) {
3850 ixgbe_max_interrupt_rate = rate;
3851 reg |= ((4000000/rate) & 0xff8 );
3853 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3858 ixgbe_add_device_sysctls(struct adapter *adapter)
3860 device_t dev = adapter->dev;
3861 struct ixgbe_hw *hw = &adapter->hw;
3862 struct sysctl_oid_list *child;
3863 struct sysctl_ctx_list *ctx;
3865 ctx = device_get_sysctl_ctx(dev);
3866 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3868 /* Sysctls for all devices */
3869 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
3870 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3871 ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3873 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
3875 &ixgbe_enable_aim, 1, "Interrupt Moderation");
3877 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
3878 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3879 ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3881 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
3882 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3883 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
3885 /* for X550 devices */
3886 if (hw->mac.type >= ixgbe_mac_X550)
3887 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
3888 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3889 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
3891 /* for X550T and X550EM backplane devices */
3892 if (hw->device_id == IXGBE_DEV_ID_X550T ||
3893 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
3894 struct sysctl_oid *eee_node;
3895 struct sysctl_oid_list *eee_list;
3897 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
3899 "Energy Efficient Ethernet sysctls");
3900 eee_list = SYSCTL_CHILDREN(eee_node);
3902 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
3903 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3904 ixgbe_sysctl_eee_enable, "I",
3905 "Enable or Disable EEE");
3907 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
3908 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3909 ixgbe_sysctl_eee_negotiated, "I",
3910 "EEE negotiated on link");
3912 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
3913 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3914 ixgbe_sysctl_eee_tx_lpi_status, "I",
3915 "Whether or not TX link is in LPI state");
3917 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
3918 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3919 ixgbe_sysctl_eee_rx_lpi_status, "I",
3920 "Whether or not RX link is in LPI state");
3923 /* for certain 10GBaseT devices */
3924 if (hw->device_id == IXGBE_DEV_ID_X550T ||
3925 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3926 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
3927 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3928 ixgbe_sysctl_wol_enable, "I",
3929 "Enable/Disable Wake on LAN");
3931 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
3932 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3933 ixgbe_sysctl_wufc, "I",
3934 "Enable/Disable Wake Up Filters");
3937 /* for X550EM 10GBaseT devices */
3938 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3939 struct sysctl_oid *phy_node;
3940 struct sysctl_oid_list *phy_list;
3942 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
3944 "External PHY sysctls");
3945 phy_list = SYSCTL_CHILDREN(phy_node);
3947 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
3948 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3949 ixgbe_sysctl_phy_temp, "I",
3950 "Current External PHY Temperature (Celsius)");
3952 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
3953 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3954 ixgbe_sysctl_phy_overtemp_occurred, "I",
3955 "External PHY High Temperature Event Occurred");
3960 * Add sysctl variables, one per statistic, to the system.
3963 ixgbe_add_hw_stats(struct adapter *adapter)
3965 device_t dev = adapter->dev;
3967 struct tx_ring *txr = adapter->tx_rings;
3968 struct rx_ring *rxr = adapter->rx_rings;
3970 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3971 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3972 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3973 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3975 struct sysctl_oid *stat_node, *queue_node;
3976 struct sysctl_oid_list *stat_list, *queue_list;
3978 #define QUEUE_NAME_LEN 32
3979 char namebuf[QUEUE_NAME_LEN];
3981 /* Driver Statistics */
3982 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3983 CTLFLAG_RD, &adapter->dropped_pkts,
3984 "Driver dropped packets");
3985 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3986 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3987 "m_defrag() failed");
3988 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3989 CTLFLAG_RD, &adapter->watchdog_events,
3990 "Watchdog timeouts");
3991 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
3992 CTLFLAG_RD, &adapter->link_irq,
3993 "Link MSIX IRQ Handled");
3995 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3996 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3997 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3998 CTLFLAG_RD, NULL, "Queue Name");
3999 queue_list = SYSCTL_CHILDREN(queue_node);
4001 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4002 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4003 sizeof(&adapter->queues[i]),
4004 ixgbe_sysctl_interrupt_rate_handler, "IU",
4006 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4007 CTLFLAG_RD, &(adapter->queues[i].irqs),
4008 "irqs on this queue");
4009 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4010 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4011 ixgbe_sysctl_tdh_handler, "IU",
4012 "Transmit Descriptor Head");
4013 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4014 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4015 ixgbe_sysctl_tdt_handler, "IU",
4016 "Transmit Descriptor Tail");
4017 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4018 CTLFLAG_RD, &txr->tso_tx,
4020 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4021 CTLFLAG_RD, &txr->no_tx_dma_setup,
4022 "Driver tx dma failure in xmit");
4023 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4024 CTLFLAG_RD, &txr->no_desc_avail,
4025 "Queue No Descriptor Available");
4026 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4027 CTLFLAG_RD, &txr->total_packets,
4028 "Queue Packets Transmitted");
4029 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4030 CTLFLAG_RD, &txr->br->br_drops,
4031 "Packets dropped in buf_ring");
4034 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4035 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4036 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4037 CTLFLAG_RD, NULL, "Queue Name");
4038 queue_list = SYSCTL_CHILDREN(queue_node);
4040 struct lro_ctrl *lro = &rxr->lro;
4042 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4043 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4044 CTLFLAG_RD, NULL, "Queue Name");
4045 queue_list = SYSCTL_CHILDREN(queue_node);
4047 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4048 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4049 ixgbe_sysctl_rdh_handler, "IU",
4050 "Receive Descriptor Head");
4051 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4052 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4053 ixgbe_sysctl_rdt_handler, "IU",
4054 "Receive Descriptor Tail");
4055 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4056 CTLFLAG_RD, &rxr->rx_packets,
4057 "Queue Packets Received");
4058 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4059 CTLFLAG_RD, &rxr->rx_bytes,
4060 "Queue Bytes Received");
4061 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4062 CTLFLAG_RD, &rxr->rx_copies,
4063 "Copied RX Frames");
4064 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4065 CTLFLAG_RD, &lro->lro_queued, 0,
4067 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4068 CTLFLAG_RD, &lro->lro_flushed, 0,
4072 /* MAC stats get the own sub node */
4074 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4075 CTLFLAG_RD, NULL, "MAC Statistics");
4076 stat_list = SYSCTL_CHILDREN(stat_node);
4078 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4079 CTLFLAG_RD, &stats->crcerrs,
4081 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4082 CTLFLAG_RD, &stats->illerrc,
4083 "Illegal Byte Errors");
4084 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4085 CTLFLAG_RD, &stats->errbc,
4087 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4088 CTLFLAG_RD, &stats->mspdc,
4089 "MAC Short Packets Discarded");
4090 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4091 CTLFLAG_RD, &stats->mlfc,
4092 "MAC Local Faults");
4093 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4094 CTLFLAG_RD, &stats->mrfc,
4095 "MAC Remote Faults");
4096 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4097 CTLFLAG_RD, &stats->rlec,
4098 "Receive Length Errors");
4100 /* Flow Control stats */
4101 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4102 CTLFLAG_RD, &stats->lxontxc,
4103 "Link XON Transmitted");
4104 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4105 CTLFLAG_RD, &stats->lxonrxc,
4106 "Link XON Received");
4107 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4108 CTLFLAG_RD, &stats->lxofftxc,
4109 "Link XOFF Transmitted");
4110 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4111 CTLFLAG_RD, &stats->lxoffrxc,
4112 "Link XOFF Received");
4114 /* Packet Reception Stats */
4115 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4116 CTLFLAG_RD, &stats->tor,
4117 "Total Octets Received");
4118 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4119 CTLFLAG_RD, &stats->gorc,
4120 "Good Octets Received");
4121 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4122 CTLFLAG_RD, &stats->tpr,
4123 "Total Packets Received");
4124 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4125 CTLFLAG_RD, &stats->gprc,
4126 "Good Packets Received");
4127 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4128 CTLFLAG_RD, &stats->mprc,
4129 "Multicast Packets Received");
4130 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4131 CTLFLAG_RD, &stats->bprc,
4132 "Broadcast Packets Received");
4133 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4134 CTLFLAG_RD, &stats->prc64,
4135 "64 byte frames received ");
4136 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4137 CTLFLAG_RD, &stats->prc127,
4138 "65-127 byte frames received");
4139 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4140 CTLFLAG_RD, &stats->prc255,
4141 "128-255 byte frames received");
4142 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4143 CTLFLAG_RD, &stats->prc511,
4144 "256-511 byte frames received");
4145 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4146 CTLFLAG_RD, &stats->prc1023,
4147 "512-1023 byte frames received");
4148 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4149 CTLFLAG_RD, &stats->prc1522,
4150 "1023-1522 byte frames received");
4151 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4152 CTLFLAG_RD, &stats->ruc,
4153 "Receive Undersized");
4154 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4155 CTLFLAG_RD, &stats->rfc,
4156 "Fragmented Packets Received ");
4157 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4158 CTLFLAG_RD, &stats->roc,
4159 "Oversized Packets Received");
4160 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4161 CTLFLAG_RD, &stats->rjc,
4163 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4164 CTLFLAG_RD, &stats->mngprc,
4165 "Management Packets Received");
4166 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4167 CTLFLAG_RD, &stats->mngptc,
4168 "Management Packets Dropped");
4169 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4170 CTLFLAG_RD, &stats->xec,
4173 /* Packet Transmission Stats */
4174 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4175 CTLFLAG_RD, &stats->gotc,
4176 "Good Octets Transmitted");
4177 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4178 CTLFLAG_RD, &stats->tpt,
4179 "Total Packets Transmitted");
4180 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4181 CTLFLAG_RD, &stats->gptc,
4182 "Good Packets Transmitted");
4183 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4184 CTLFLAG_RD, &stats->bptc,
4185 "Broadcast Packets Transmitted");
4186 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4187 CTLFLAG_RD, &stats->mptc,
4188 "Multicast Packets Transmitted");
4189 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4190 CTLFLAG_RD, &stats->mngptc,
4191 "Management Packets Transmitted");
4192 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4193 CTLFLAG_RD, &stats->ptc64,
4194 "64 byte frames transmitted ");
4195 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4196 CTLFLAG_RD, &stats->ptc127,
4197 "65-127 byte frames transmitted");
4198 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4199 CTLFLAG_RD, &stats->ptc255,
4200 "128-255 byte frames transmitted");
4201 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4202 CTLFLAG_RD, &stats->ptc511,
4203 "256-511 byte frames transmitted");
4204 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4205 CTLFLAG_RD, &stats->ptc1023,
4206 "512-1023 byte frames transmitted");
4207 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4208 CTLFLAG_RD, &stats->ptc1522,
4209 "1024-1522 byte frames transmitted");
4213 ** Set flow control using sysctl:
4214 ** Flow control values:
4221 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4224 struct adapter *adapter = (struct adapter *) arg1;
4227 error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4228 if ((error) || (req->newptr == NULL))
4231 /* Don't bother if it's not changed */
4232 if (adapter->fc == last)
4235 switch (adapter->fc) {
4236 case ixgbe_fc_rx_pause:
4237 case ixgbe_fc_tx_pause:
4239 adapter->hw.fc.requested_mode = adapter->fc;
4240 if (adapter->num_queues > 1)
4241 ixgbe_disable_rx_drop(adapter);
4244 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4245 if (adapter->num_queues > 1)
4246 ixgbe_enable_rx_drop(adapter);
4252 /* Don't autoneg if forcing a value */
4253 adapter->hw.fc.disable_fc_autoneg = TRUE;
4254 ixgbe_fc_enable(&adapter->hw);
4259 ** Control advertised link speed:
4261 ** 0x1 - advertise 100 Mb
4262 ** 0x2 - advertise 1G
4263 ** 0x4 - advertise 10G
4266 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4268 int error = 0, requested;
4269 struct adapter *adapter;
4271 struct ixgbe_hw *hw;
4272 ixgbe_link_speed speed = 0;
4274 adapter = (struct adapter *) arg1;
4278 requested = adapter->advertise;
4279 error = sysctl_handle_int(oidp, &requested, 0, req);
4280 if ((error) || (req->newptr == NULL))
4283 /* Checks to validate new value */
4284 if (adapter->advertise == requested) /* no change */
4287 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4288 (hw->phy.multispeed_fiber))) {
4290 "Advertised speed can only be set on copper or "
4291 "multispeed fiber media types.\n");
4295 if (requested < 0x1 || requested > 0x7) {
4297 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4301 if ((requested & 0x1)
4302 && (hw->mac.type != ixgbe_mac_X540)
4303 && (hw->mac.type != ixgbe_mac_X550)) {
4304 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4308 /* Set new value and report new advertised mode */
4309 if (requested & 0x1)
4310 speed |= IXGBE_LINK_SPEED_100_FULL;
4311 if (requested & 0x2)
4312 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4313 if (requested & 0x4)
4314 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4316 hw->mac.autotry_restart = TRUE;
4317 hw->mac.ops.setup_link(hw, speed, TRUE);
4318 adapter->advertise = requested;
4324 * The following two sysctls are for X550 BaseT devices;
4325 * they deal with the external PHY used in them.
4328 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4330 struct adapter *adapter = (struct adapter *) arg1;
4331 struct ixgbe_hw *hw = &adapter->hw;
4334 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4335 device_printf(adapter->dev,
4336 "Device has no supported external thermal sensor.\n");
4340 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4341 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4343 device_printf(adapter->dev,
4344 "Error reading from PHY's current temperature register\n");
4348 /* Shift temp for output */
4351 return (sysctl_handle_int(oidp, NULL, reg, req));
4355 * Reports whether the current PHY temperature is over
4356 * the overtemp threshold.
4357 * - This is reported directly from the PHY
4360 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4362 struct adapter *adapter = (struct adapter *) arg1;
4363 struct ixgbe_hw *hw = &adapter->hw;
4366 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4367 device_printf(adapter->dev,
4368 "Device has no supported external thermal sensor.\n");
4372 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4373 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4375 device_printf(adapter->dev,
4376 "Error reading from PHY's temperature status register\n");
4380 /* Get occurrence bit */
4381 reg = !!(reg & 0x4000);
4382 return (sysctl_handle_int(oidp, 0, reg, req));
4386 ** Thermal Shutdown Trigger (internal MAC)
4387 ** - Set this to 1 to cause an overtemp event to occur
4390 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4392 struct adapter *adapter = (struct adapter *) arg1;
4393 struct ixgbe_hw *hw = &adapter->hw;
4394 int error, fire = 0;
4396 error = sysctl_handle_int(oidp, &fire, 0, req);
4397 if ((error) || (req->newptr == NULL))
4401 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4402 reg |= IXGBE_EICR_TS;
4403 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4410 ** Manage DMA Coalescing.
4412 ** 0/1 - off / on (use default value of 1000)
4414 ** Legal timer values are:
4415 ** 50,100,250,500,1000,2000,5000,10000
4417 ** Turning off interrupt moderation will also turn this off.
4420 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4422 struct adapter *adapter = (struct adapter *) arg1;
4423 struct ixgbe_hw *hw = &adapter->hw;
4424 struct ifnet *ifp = adapter->ifp;
4428 oldval = adapter->dmac;
4429 error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4430 if ((error) || (req->newptr == NULL))
4433 switch (hw->mac.type) {
4434 case ixgbe_mac_X550:
4435 case ixgbe_mac_X550EM_x:
4438 device_printf(adapter->dev,
4439 "DMA Coalescing is only supported on X550 devices\n");
4443 switch (adapter->dmac) {
4447 case 1: /* Enable and use default */
4448 adapter->dmac = 1000;
4458 /* Legal values - allow */
4461 /* Do nothing, illegal value */
4462 adapter->dmac = oldval;
4466 /* Re-initialize hardware if it's already running */
4467 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4468 ixgbe_init(adapter);
4474 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4480 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4482 struct adapter *adapter = (struct adapter *) arg1;
4483 struct ixgbe_hw *hw = &adapter->hw;
4484 int new_wol_enabled;
4487 new_wol_enabled = hw->wol_enabled;
4488 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4489 if ((error) || (req->newptr == NULL))
4491 if (new_wol_enabled == hw->wol_enabled)
4494 if (new_wol_enabled > 0 && !adapter->wol_support)
4497 hw->wol_enabled = !!(new_wol_enabled);
4503 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4504 * if supported by the adapter.
4510 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4512 struct adapter *adapter = (struct adapter *) arg1;
4513 struct ifnet *ifp = adapter->ifp;
4514 int new_eee_enabled, error = 0;
4516 new_eee_enabled = adapter->eee_enabled;
4517 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4518 if ((error) || (req->newptr == NULL))
4520 if (new_eee_enabled == adapter->eee_enabled)
4523 if (new_eee_enabled > 0 && !adapter->eee_support)
4526 adapter->eee_enabled = !!(new_eee_enabled);
4528 /* Re-initialize hardware if it's already running */
4529 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4530 ixgbe_init(adapter);
4536 * Read-only sysctl indicating whether EEE support was negotiated
4540 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4542 struct adapter *adapter = (struct adapter *) arg1;
4543 struct ixgbe_hw *hw = &adapter->hw;
4546 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4548 return (sysctl_handle_int(oidp, 0, status, req));
4552 * Read-only sysctl indicating whether RX Link is in LPI state.
4555 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4557 struct adapter *adapter = (struct adapter *) arg1;
4558 struct ixgbe_hw *hw = &adapter->hw;
4561 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4562 IXGBE_EEE_RX_LPI_STATUS);
4564 return (sysctl_handle_int(oidp, 0, status, req));
4568 * Read-only sysctl indicating whether TX Link is in LPI state.
4571 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4573 struct adapter *adapter = (struct adapter *) arg1;
4574 struct ixgbe_hw *hw = &adapter->hw;
4577 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4578 IXGBE_EEE_TX_LPI_STATUS);
4580 return (sysctl_handle_int(oidp, 0, status, req));
4584 * Sysctl to enable/disable the types of packets that the
4585 * adapter will wake up on upon receipt.
4586 * WUFC - Wake Up Filter Control
4588 * 0x1 - Link Status Change
4589 * 0x2 - Magic Packet
4590 * 0x4 - Direct Exact
4591 * 0x8 - Directed Multicast
4593 * 0x20 - ARP/IPv4 Request Packet
4594 * 0x40 - Direct IPv4 Packet
4595 * 0x80 - Direct IPv6 Packet
4597 * Setting another flag will cause the sysctl to return an
4601 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4603 struct adapter *adapter = (struct adapter *) arg1;
4607 new_wufc = adapter->wufc;
4609 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4610 if ((error) || (req->newptr == NULL))
4612 if (new_wufc == adapter->wufc)
4615 if (new_wufc & 0xffffff00)
4619 new_wufc |= (0xffffff & adapter->wufc);
4620 adapter->wufc = new_wufc;
4627 ** Enable the hardware to drop packets when the buffer is
4628 ** full. This is useful when multiqueue,so that no single
4629 ** queue being full stalls the entire RX engine. We only
4630 ** enable this when Multiqueue AND when Flow Control is
4634 ixgbe_enable_rx_drop(struct adapter *adapter)
4636 struct ixgbe_hw *hw = &adapter->hw;
4638 for (int i = 0; i < adapter->num_queues; i++) {
4639 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4640 srrctl |= IXGBE_SRRCTL_DROP_EN;
4641 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4646 ixgbe_disable_rx_drop(struct adapter *adapter)
4648 struct ixgbe_hw *hw = &adapter->hw;
4650 for (int i = 0; i < adapter->num_queues; i++) {
4651 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4652 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4653 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4658 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4662 switch (adapter->hw.mac.type) {
4663 case ixgbe_mac_82598EB:
4664 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4665 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4667 case ixgbe_mac_82599EB:
4668 case ixgbe_mac_X540:
4669 case ixgbe_mac_X550:
4670 case ixgbe_mac_X550EM_x:
4671 mask = (queues & 0xFFFFFFFF);
4672 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4673 mask = (queues >> 32);
4674 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);