1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /*********************************************************************
44 * Set this to one to display debug statistics
45 *********************************************************************/
46 int ixgbe_display_debug_stats = 0;
48 /*********************************************************************
50 *********************************************************************/
51 char ixgbe_driver_version[] = "2.8.3";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixgbe_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
95 /* required last entry */
99 /*********************************************************************
100 * Table of branding strings
101 *********************************************************************/
103 static char *ixgbe_strings[] = {
104 "Intel(R) PRO/10GbE PCI-Express Network Driver"
107 /*********************************************************************
108 * Function prototypes
109 *********************************************************************/
110 static int ixgbe_probe(device_t);
111 static int ixgbe_attach(device_t);
112 static int ixgbe_detach(device_t);
113 static int ixgbe_shutdown(device_t);
114 static int ixgbe_suspend(device_t);
115 static int ixgbe_resume(device_t);
116 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
117 static void ixgbe_init(void *);
118 static void ixgbe_init_locked(struct adapter *);
119 static void ixgbe_stop(void *);
120 #if __FreeBSD_version >= 1100036
121 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
123 static void ixgbe_add_media_types(struct adapter *);
124 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
125 static int ixgbe_media_change(struct ifnet *);
126 static void ixgbe_identify_hardware(struct adapter *);
127 static int ixgbe_allocate_pci_resources(struct adapter *);
128 static void ixgbe_get_slot_info(struct ixgbe_hw *);
129 static int ixgbe_allocate_msix(struct adapter *);
130 static int ixgbe_allocate_legacy(struct adapter *);
131 static int ixgbe_setup_msix(struct adapter *);
132 static void ixgbe_free_pci_resources(struct adapter *);
133 static void ixgbe_local_timer(void *);
134 static int ixgbe_setup_interface(device_t, struct adapter *);
135 static void ixgbe_config_dmac(struct adapter *);
136 static void ixgbe_config_delay_values(struct adapter *);
137 static void ixgbe_config_link(struct adapter *);
138 static void ixgbe_check_eee_support(struct adapter *);
139 static void ixgbe_check_wol_support(struct adapter *);
140 static int ixgbe_setup_low_power_mode(struct adapter *);
141 static void ixgbe_rearm_queues(struct adapter *, u64);
143 static void ixgbe_initialize_transmit_units(struct adapter *);
144 static void ixgbe_initialize_receive_units(struct adapter *);
145 static void ixgbe_enable_rx_drop(struct adapter *);
146 static void ixgbe_disable_rx_drop(struct adapter *);
148 static void ixgbe_enable_intr(struct adapter *);
149 static void ixgbe_disable_intr(struct adapter *);
150 static void ixgbe_update_stats_counters(struct adapter *);
151 static void ixgbe_set_promisc(struct adapter *);
152 static void ixgbe_set_multi(struct adapter *);
153 static void ixgbe_update_link_status(struct adapter *);
154 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
155 static void ixgbe_configure_ivars(struct adapter *);
156 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
158 static void ixgbe_setup_vlan_hw_support(struct adapter *);
159 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
160 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
162 static void ixgbe_add_device_sysctls(struct adapter *);
163 static void ixgbe_add_hw_stats(struct adapter *);
165 /* Sysctl handlers */
166 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
167 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
168 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
169 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
170 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
171 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
172 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
173 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
174 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
175 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
176 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
177 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
179 /* Support for pluggable optic modules */
180 static bool ixgbe_sfp_probe(struct adapter *);
181 static void ixgbe_setup_optics(struct adapter *);
183 /* Legacy (single vector interrupt handler */
184 static void ixgbe_legacy_irq(void *);
186 /* The MSI/X Interrupt handlers */
187 static void ixgbe_msix_que(void *);
188 static void ixgbe_msix_link(void *);
190 /* Deferred interrupt tasklets */
191 static void ixgbe_handle_que(void *, int);
192 static void ixgbe_handle_link(void *, int);
193 static void ixgbe_handle_msf(void *, int);
194 static void ixgbe_handle_mod(void *, int);
195 static void ixgbe_handle_phy(void *, int);
198 static void ixgbe_reinit_fdir(void *, int);
201 /*********************************************************************
202 * FreeBSD Device Interface Entry Points
203 *********************************************************************/
205 static device_method_t ix_methods[] = {
206 /* Device interface */
207 DEVMETHOD(device_probe, ixgbe_probe),
208 DEVMETHOD(device_attach, ixgbe_attach),
209 DEVMETHOD(device_detach, ixgbe_detach),
210 DEVMETHOD(device_shutdown, ixgbe_shutdown),
211 DEVMETHOD(device_suspend, ixgbe_suspend),
212 DEVMETHOD(device_resume, ixgbe_resume),
216 static driver_t ix_driver = {
217 "ix", ix_methods, sizeof(struct adapter),
220 devclass_t ix_devclass;
221 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
223 MODULE_DEPEND(ix, pci, 1, 1, 1);
224 MODULE_DEPEND(ix, ether, 1, 1, 1);
227 ** TUNEABLE PARAMETERS:
230 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
231 "IXGBE driver parameters");
234 ** AIM: Adaptive Interrupt Moderation
235 ** which means that the interrupt rate
236 ** is varied over time based on the
237 ** traffic for that interrupt vector
239 static int ixgbe_enable_aim = TRUE;
240 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
241 "Enable adaptive interrupt moderation");
243 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
244 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
245 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
247 /* How many packets rxeof tries to clean at a time */
248 static int ixgbe_rx_process_limit = 256;
249 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
250 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
251 &ixgbe_rx_process_limit, 0,
252 "Maximum number of received packets to process at a time,"
253 "-1 means unlimited");
255 /* How many packets txeof tries to clean at a time */
256 static int ixgbe_tx_process_limit = 256;
257 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
258 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
259 &ixgbe_tx_process_limit, 0,
260 "Maximum number of sent packets to process at a time,"
261 "-1 means unlimited");
264 ** Smart speed setting, default to on
265 ** this only works as a compile option
266 ** right now as its during attach, set
267 ** this to 'ixgbe_smart_speed_off' to
270 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
273 * MSIX should be the default for best performance,
274 * but this allows it to be forced off for testing.
276 static int ixgbe_enable_msix = 1;
277 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
278 "Enable MSI-X interrupts");
281 * Number of Queues, can be set to 0,
282 * it then autoconfigures based on the
283 * number of cpus with a max of 8. This
284 * can be overriden manually here.
286 static int ixgbe_num_queues = 0;
287 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
288 "Number of queues to configure up to a maximum of 8; "
289 "0 indicates autoconfigure");
292 ** Number of TX descriptors per ring,
293 ** setting higher than RX as this seems
294 ** the better performing choice.
296 static int ixgbe_txd = PERFORM_TXD;
297 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
298 "Number of transmit descriptors per queue");
300 /* Number of RX descriptors per ring */
301 static int ixgbe_rxd = PERFORM_RXD;
302 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
303 "Number of receive descriptors per queue");
306 ** Defining this on will allow the use
307 ** of unsupported SFP+ modules, note that
308 ** doing so you are on your own :)
310 static int allow_unsupported_sfp = FALSE;
311 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
313 /* Keep running tab on them for sanity check */
314 static int ixgbe_total_ports;
318 ** Flow Director actually 'steals'
319 ** part of the packet buffer as its
320 ** filter pool, this variable controls
322 ** 0 = 64K, 1 = 128K, 2 = 256K
324 static int fdir_pballoc = 1;
329 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
330 * be a reference on how to implement netmap support in a driver.
331 * Additional comments are in ixgbe_netmap.h .
333 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
334 * that extend the standard driver.
336 #include <dev/netmap/ixgbe_netmap.h>
337 #endif /* DEV_NETMAP */
339 /*********************************************************************
340 * Device identification routine
342 * ixgbe_probe determines if the driver should be loaded on
343 * adapter based on PCI vendor/device id of the adapter.
345 * return BUS_PROBE_DEFAULT on success, positive on failure
346 *********************************************************************/
349 ixgbe_probe(device_t dev)
351 ixgbe_vendor_info_t *ent;
353 u16 pci_vendor_id = 0;
354 u16 pci_device_id = 0;
355 u16 pci_subvendor_id = 0;
356 u16 pci_subdevice_id = 0;
357 char adapter_name[256];
359 INIT_DEBUGOUT("ixgbe_probe: begin");
361 pci_vendor_id = pci_get_vendor(dev);
362 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
365 pci_device_id = pci_get_device(dev);
366 pci_subvendor_id = pci_get_subvendor(dev);
367 pci_subdevice_id = pci_get_subdevice(dev);
369 ent = ixgbe_vendor_info_array;
370 while (ent->vendor_id != 0) {
371 if ((pci_vendor_id == ent->vendor_id) &&
372 (pci_device_id == ent->device_id) &&
374 ((pci_subvendor_id == ent->subvendor_id) ||
375 (ent->subvendor_id == 0)) &&
377 ((pci_subdevice_id == ent->subdevice_id) ||
378 (ent->subdevice_id == 0))) {
379 sprintf(adapter_name, "%s, Version - %s",
380 ixgbe_strings[ent->index],
381 ixgbe_driver_version);
382 device_set_desc_copy(dev, adapter_name);
384 return (BUS_PROBE_DEFAULT);
391 /*********************************************************************
392 * Device initialization routine
394 * The attach entry point is called when the driver is being loaded.
395 * This routine identifies the type of hardware, allocates all resources
396 * and initializes the hardware.
398 * return 0 on success, positive on failure
399 *********************************************************************/
402 ixgbe_attach(device_t dev)
404 struct adapter *adapter;
410 INIT_DEBUGOUT("ixgbe_attach: begin");
412 /* Allocate, clear, and link in our adapter structure */
413 adapter = device_get_softc(dev);
414 adapter->dev = adapter->osdep.dev = dev;
418 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
420 /* Set up the timer callout */
421 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
423 /* Determine hardware revision */
424 ixgbe_identify_hardware(adapter);
426 /* Do base PCI setup - map BAR0 */
427 if (ixgbe_allocate_pci_resources(adapter)) {
428 device_printf(dev, "Allocation of PCI resources failed\n");
433 /* Do descriptor calc and sanity checks */
434 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
435 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
436 device_printf(dev, "TXD config issue, using default!\n");
437 adapter->num_tx_desc = DEFAULT_TXD;
439 adapter->num_tx_desc = ixgbe_txd;
442 ** With many RX rings it is easy to exceed the
443 ** system mbuf allocation. Tuning nmbclusters
444 ** can alleviate this.
446 if (nmbclusters > 0) {
448 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
449 if (s > nmbclusters) {
450 device_printf(dev, "RX Descriptors exceed "
451 "system mbuf max, using default instead!\n");
452 ixgbe_rxd = DEFAULT_RXD;
456 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
457 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
458 device_printf(dev, "RXD config issue, using default!\n");
459 adapter->num_rx_desc = DEFAULT_RXD;
461 adapter->num_rx_desc = ixgbe_rxd;
463 /* Allocate our TX/RX Queues */
464 if (ixgbe_allocate_queues(adapter)) {
469 /* Allocate multicast array memory. */
470 adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
471 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
472 if (adapter->mta == NULL) {
473 device_printf(dev, "Can not allocate multicast setup array\n");
478 /* Initialize the shared code */
479 hw->allow_unsupported_sfp = allow_unsupported_sfp;
480 error = ixgbe_init_shared_code(hw);
481 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
483 ** No optics in this port, set up
484 ** so the timer routine will probe
485 ** for later insertion.
487 adapter->sfp_probe = TRUE;
489 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
490 device_printf(dev,"Unsupported SFP+ module detected!\n");
494 device_printf(dev,"Unable to initialize the shared code\n");
499 /* Make sure we have a good EEPROM before we read from it */
500 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
501 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
506 error = ixgbe_init_hw(hw);
508 case IXGBE_ERR_EEPROM_VERSION:
509 device_printf(dev, "This device is a pre-production adapter/"
510 "LOM. Please be aware there may be issues associated "
511 "with your hardware.\n If you are experiencing problems "
512 "please contact your Intel or hardware representative "
513 "who provided you with this hardware.\n");
515 case IXGBE_ERR_SFP_NOT_SUPPORTED:
516 device_printf(dev,"Unsupported SFP+ Module\n");
519 case IXGBE_ERR_SFP_NOT_PRESENT:
520 device_printf(dev,"No SFP+ Module found\n");
526 /* Detect and set physical type */
527 ixgbe_setup_optics(adapter);
529 if ((adapter->msix > 1) && (ixgbe_enable_msix))
530 error = ixgbe_allocate_msix(adapter);
532 error = ixgbe_allocate_legacy(adapter);
536 /* Setup OS specific network interface */
537 if (ixgbe_setup_interface(dev, adapter) != 0)
540 /* Initialize statistics */
541 ixgbe_update_stats_counters(adapter);
543 /* Register for VLAN events */
544 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
545 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
546 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
547 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
549 /* Check PCIE slot type/speed/width */
550 ixgbe_get_slot_info(hw);
552 /* Set an initial default flow control value */
553 adapter->fc = ixgbe_fc_full;
555 /* Check for certain supported features */
556 ixgbe_check_wol_support(adapter);
557 ixgbe_check_eee_support(adapter);
560 ixgbe_add_device_sysctls(adapter);
561 ixgbe_add_hw_stats(adapter);
563 /* let hardware know driver is loaded */
564 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
565 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
566 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
569 ixgbe_netmap_attach(adapter);
570 #endif /* DEV_NETMAP */
571 INIT_DEBUGOUT("ixgbe_attach: end");
575 ixgbe_free_transmit_structures(adapter);
576 ixgbe_free_receive_structures(adapter);
578 if (adapter->ifp != NULL)
579 if_free(adapter->ifp);
580 ixgbe_free_pci_resources(adapter);
581 free(adapter->mta, M_DEVBUF);
585 /*********************************************************************
586 * Device removal routine
588 * The detach entry point is called when the driver is being removed.
589 * This routine stops the adapter and deallocates all the resources
590 * that were allocated for driver operation.
592 * return 0 on success, positive on failure
593 *********************************************************************/
596 ixgbe_detach(device_t dev)
598 struct adapter *adapter = device_get_softc(dev);
599 struct ix_queue *que = adapter->queues;
600 struct tx_ring *txr = adapter->tx_rings;
603 INIT_DEBUGOUT("ixgbe_detach: begin");
605 /* Make sure VLANS are not using driver */
606 if (adapter->ifp->if_vlantrunk != NULL) {
607 device_printf(dev,"Vlan in use, detach first\n");
611 /* Stop the adapter */
612 IXGBE_CORE_LOCK(adapter);
613 ixgbe_setup_low_power_mode(adapter);
614 IXGBE_CORE_UNLOCK(adapter);
616 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
618 #ifndef IXGBE_LEGACY_TX
619 taskqueue_drain(que->tq, &txr->txq_task);
621 taskqueue_drain(que->tq, &que->que_task);
622 taskqueue_free(que->tq);
626 /* Drain the Link queue */
628 taskqueue_drain(adapter->tq, &adapter->link_task);
629 taskqueue_drain(adapter->tq, &adapter->mod_task);
630 taskqueue_drain(adapter->tq, &adapter->msf_task);
631 taskqueue_drain(adapter->tq, &adapter->phy_task);
633 taskqueue_drain(adapter->tq, &adapter->fdir_task);
635 taskqueue_free(adapter->tq);
638 /* let hardware know driver is unloading */
639 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
640 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
641 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
643 /* Unregister VLAN events */
644 if (adapter->vlan_attach != NULL)
645 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
646 if (adapter->vlan_detach != NULL)
647 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
649 ether_ifdetach(adapter->ifp);
650 callout_drain(&adapter->timer);
652 netmap_detach(adapter->ifp);
653 #endif /* DEV_NETMAP */
654 ixgbe_free_pci_resources(adapter);
655 bus_generic_detach(dev);
656 if_free(adapter->ifp);
658 ixgbe_free_transmit_structures(adapter);
659 ixgbe_free_receive_structures(adapter);
660 free(adapter->mta, M_DEVBUF);
662 IXGBE_CORE_LOCK_DESTROY(adapter);
666 /*********************************************************************
668 * Shutdown entry point
670 **********************************************************************/
673 ixgbe_shutdown(device_t dev)
675 struct adapter *adapter = device_get_softc(dev);
678 INIT_DEBUGOUT("ixgbe_shutdown: begin");
680 IXGBE_CORE_LOCK(adapter);
681 error = ixgbe_setup_low_power_mode(adapter);
682 IXGBE_CORE_UNLOCK(adapter);
688 * Methods for going from:
689 * D0 -> D3: ixgbe_suspend
690 * D3 -> D0: ixgbe_resume
693 ixgbe_suspend(device_t dev)
695 struct adapter *adapter = device_get_softc(dev);
698 INIT_DEBUGOUT("ixgbe_suspend: begin");
700 IXGBE_CORE_LOCK(adapter);
702 error = ixgbe_setup_low_power_mode(adapter);
704 /* Save state and power down */
706 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
708 IXGBE_CORE_UNLOCK(adapter);
714 ixgbe_resume(device_t dev)
716 struct adapter *adapter = device_get_softc(dev);
717 struct ifnet *ifp = adapter->ifp;
718 struct ixgbe_hw *hw = &adapter->hw;
721 INIT_DEBUGOUT("ixgbe_resume: begin");
723 IXGBE_CORE_LOCK(adapter);
725 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
726 pci_restore_state(dev);
728 /* Read & clear WUS register */
729 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
731 device_printf(dev, "Woken up by (WUS): %#010x\n",
732 IXGBE_READ_REG(hw, IXGBE_WUS));
733 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
734 /* And clear WUFC until next low-power transition */
735 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
738 * Required after D3->D0 transition;
739 * will re-advertise all previous advertised speeds
741 if (ifp->if_flags & IFF_UP)
742 ixgbe_init_locked(adapter);
744 IXGBE_CORE_UNLOCK(adapter);
746 INIT_DEBUGOUT("ixgbe_resume: end");
751 /*********************************************************************
754 * ixgbe_ioctl is called when the user wants to configure the
757 * return 0 on success, positive on failure
758 **********************************************************************/
761 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
763 struct adapter *adapter = ifp->if_softc;
764 struct ifreq *ifr = (struct ifreq *) data;
765 #if defined(INET) || defined(INET6)
766 struct ifaddr *ifa = (struct ifaddr *)data;
767 bool avoid_reset = FALSE;
775 if (ifa->ifa_addr->sa_family == AF_INET)
779 if (ifa->ifa_addr->sa_family == AF_INET6)
782 #if defined(INET) || defined(INET6)
784 ** Calling init results in link renegotiation,
785 ** so we avoid doing it when possible.
788 ifp->if_flags |= IFF_UP;
789 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
791 if (!(ifp->if_flags & IFF_NOARP))
792 arp_ifinit(ifp, ifa);
794 error = ether_ioctl(ifp, command, data);
798 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
799 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
802 IXGBE_CORE_LOCK(adapter);
803 ifp->if_mtu = ifr->ifr_mtu;
804 adapter->max_frame_size =
805 ifp->if_mtu + IXGBE_MTU_HDR;
806 ixgbe_init_locked(adapter);
807 IXGBE_CORE_UNLOCK(adapter);
811 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
812 IXGBE_CORE_LOCK(adapter);
813 if (ifp->if_flags & IFF_UP) {
814 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
815 if ((ifp->if_flags ^ adapter->if_flags) &
816 (IFF_PROMISC | IFF_ALLMULTI)) {
817 ixgbe_set_promisc(adapter);
820 ixgbe_init_locked(adapter);
822 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
824 adapter->if_flags = ifp->if_flags;
825 IXGBE_CORE_UNLOCK(adapter);
829 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
830 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
831 IXGBE_CORE_LOCK(adapter);
832 ixgbe_disable_intr(adapter);
833 ixgbe_set_multi(adapter);
834 ixgbe_enable_intr(adapter);
835 IXGBE_CORE_UNLOCK(adapter);
840 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
841 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
845 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
846 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
847 if (mask & IFCAP_HWCSUM)
848 ifp->if_capenable ^= IFCAP_HWCSUM;
849 if (mask & IFCAP_TSO4)
850 ifp->if_capenable ^= IFCAP_TSO4;
851 if (mask & IFCAP_TSO6)
852 ifp->if_capenable ^= IFCAP_TSO6;
853 if (mask & IFCAP_LRO)
854 ifp->if_capenable ^= IFCAP_LRO;
855 if (mask & IFCAP_VLAN_HWTAGGING)
856 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
857 if (mask & IFCAP_VLAN_HWFILTER)
858 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
859 if (mask & IFCAP_VLAN_HWTSO)
860 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
861 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
862 IXGBE_CORE_LOCK(adapter);
863 ixgbe_init_locked(adapter);
864 IXGBE_CORE_UNLOCK(adapter);
866 VLAN_CAPABILITIES(ifp);
869 #if __FreeBSD_version >= 1100036
872 struct ixgbe_hw *hw = &adapter->hw;
875 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
876 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
879 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
883 if (i2c.len > sizeof(i2c.data)) {
888 for (i = 0; i < i2c.len; i++)
889 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
890 i2c.dev_addr, &i2c.data[i]);
891 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
896 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
897 error = ether_ioctl(ifp, command, data);
904 /*********************************************************************
907 * This routine is used in two ways. It is used by the stack as
908 * init entry point in network interface structure. It is also used
909 * by the driver as a hw/sw initialization routine to get to a
912 * return 0 on success, positive on failure
913 **********************************************************************/
914 #define IXGBE_MHADD_MFS_SHIFT 16
917 ixgbe_init_locked(struct adapter *adapter)
919 struct ifnet *ifp = adapter->ifp;
920 device_t dev = adapter->dev;
921 struct ixgbe_hw *hw = &adapter->hw;
922 u32 k, txdctl, mhadd, gpie;
925 mtx_assert(&adapter->core_mtx, MA_OWNED);
926 INIT_DEBUGOUT("ixgbe_init_locked: begin");
927 hw->adapter_stopped = FALSE;
928 ixgbe_stop_adapter(hw);
929 callout_stop(&adapter->timer);
931 /* reprogram the RAR[0] in case user changed it. */
932 ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
934 /* Get the latest mac address, User can use a LAA */
935 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
936 IXGBE_ETH_LENGTH_OF_ADDRESS);
937 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
938 hw->addr_ctrl.rar_used_count = 1;
940 /* Set the various hardware offload abilities */
941 ifp->if_hwassist = 0;
942 if (ifp->if_capenable & IFCAP_TSO)
943 ifp->if_hwassist |= CSUM_TSO;
944 if (ifp->if_capenable & IFCAP_TXCSUM) {
945 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
946 #if __FreeBSD_version >= 800000
947 if (hw->mac.type != ixgbe_mac_82598EB)
948 ifp->if_hwassist |= CSUM_SCTP;
952 /* Prepare transmit descriptors and buffers */
953 if (ixgbe_setup_transmit_structures(adapter)) {
954 device_printf(dev, "Could not setup transmit structures\n");
960 ixgbe_initialize_transmit_units(adapter);
962 /* Setup Multicast table */
963 ixgbe_set_multi(adapter);
966 ** Determine the correct mbuf pool
967 ** for doing jumbo frames
969 if (adapter->max_frame_size <= 2048)
970 adapter->rx_mbuf_sz = MCLBYTES;
971 else if (adapter->max_frame_size <= 4096)
972 adapter->rx_mbuf_sz = MJUMPAGESIZE;
973 else if (adapter->max_frame_size <= 9216)
974 adapter->rx_mbuf_sz = MJUM9BYTES;
976 adapter->rx_mbuf_sz = MJUM16BYTES;
978 /* Prepare receive descriptors and buffers */
979 if (ixgbe_setup_receive_structures(adapter)) {
980 device_printf(dev, "Could not setup receive structures\n");
985 /* Configure RX settings */
986 ixgbe_initialize_receive_units(adapter);
988 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
990 /* Enable Fan Failure Interrupt */
991 gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
993 /* Add for Module detection */
994 if (hw->mac.type == ixgbe_mac_82599EB)
995 gpie |= IXGBE_SDP2_GPIEN;
998 * Thermal Failure Detection (X540)
999 * Link Detection (X552)
1001 if (hw->mac.type == ixgbe_mac_X540 ||
1002 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1003 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1004 gpie |= IXGBE_SDP0_GPIEN_X540;
1006 if (adapter->msix > 1) {
1007 /* Enable Enhanced MSIX mode */
1008 gpie |= IXGBE_GPIE_MSIX_MODE;
1009 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1012 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1015 if (ifp->if_mtu > ETHERMTU) {
1016 /* aka IXGBE_MAXFRS on 82599 and newer */
1017 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1018 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1019 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1020 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1023 /* Now enable all the queues */
1024 for (int i = 0; i < adapter->num_queues; i++) {
1025 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1026 txdctl |= IXGBE_TXDCTL_ENABLE;
1027 /* Set WTHRESH to 8, burst writeback */
1028 txdctl |= (8 << 16);
1030 * When the internal queue falls below PTHRESH (32),
1031 * start prefetching as long as there are at least
1032 * HTHRESH (1) buffers ready. The values are taken
1033 * from the Intel linux driver 3.8.21.
1034 * Prefetching enables tx line rate even with 1 queue.
1036 txdctl |= (32 << 0) | (1 << 8);
1037 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1040 for (int i = 0; i < adapter->num_queues; i++) {
1041 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1042 if (hw->mac.type == ixgbe_mac_82598EB) {
1048 rxdctl &= ~0x3FFFFF;
1051 rxdctl |= IXGBE_RXDCTL_ENABLE;
1052 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1053 for (k = 0; k < 10; k++) {
1054 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1055 IXGBE_RXDCTL_ENABLE)
1063 * In netmap mode, we must preserve the buffers made
1064 * available to userspace before the if_init()
1065 * (this is true by default on the TX side, because
1066 * init makes all buffers available to userspace).
1068 * netmap_reset() and the device specific routines
1069 * (e.g. ixgbe_setup_receive_rings()) map these
1070 * buffers at the end of the NIC ring, so here we
1071 * must set the RDT (tail) register to make sure
1072 * they are not overwritten.
1074 * In this driver the NIC ring starts at RDH = 0,
1075 * RDT points to the last slot available for reception (?),
1076 * so RDT = num_rx_desc - 1 means the whole ring is available.
1078 if (ifp->if_capenable & IFCAP_NETMAP) {
1079 struct netmap_adapter *na = NA(adapter->ifp);
1080 struct netmap_kring *kring = &na->rx_rings[i];
1081 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1083 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1085 #endif /* DEV_NETMAP */
1086 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1089 /* Enable Receive engine */
1090 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1091 if (hw->mac.type == ixgbe_mac_82598EB)
1092 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1093 rxctrl |= IXGBE_RXCTRL_RXEN;
1094 ixgbe_enable_rx_dma(hw, rxctrl);
1096 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1098 /* Set up MSI/X routing */
1099 if (ixgbe_enable_msix) {
1100 ixgbe_configure_ivars(adapter);
1101 /* Set up auto-mask */
1102 if (hw->mac.type == ixgbe_mac_82598EB)
1103 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1105 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1106 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1108 } else { /* Simple settings for Legacy/MSI */
1109 ixgbe_set_ivar(adapter, 0, 0, 0);
1110 ixgbe_set_ivar(adapter, 0, 0, 1);
1111 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1115 /* Init Flow director */
1116 if (hw->mac.type != ixgbe_mac_82598EB) {
1117 u32 hdrm = 32 << fdir_pballoc;
1119 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1120 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1125 ** Check on any SFP devices that
1126 ** need to be kick-started
1128 if (hw->phy.type == ixgbe_phy_none) {
1129 int err = hw->phy.ops.identify(hw);
1130 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1132 "Unsupported SFP+ module type was detected.\n");
1137 /* Set moderation on the Link interrupt */
1138 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1140 /* Configure Energy Efficient Ethernet for supported devices */
1141 if (adapter->eee_support)
1142 ixgbe_setup_eee(hw, adapter->eee_enabled);
1144 /* Config/Enable Link */
1145 ixgbe_config_link(adapter);
1147 /* Hardware Packet Buffer & Flow Control setup */
1148 ixgbe_config_delay_values(adapter);
1150 /* Initialize the FC settings */
1153 /* Set up VLAN support and filter */
1154 ixgbe_setup_vlan_hw_support(adapter);
1156 /* Setup DMA Coalescing */
1157 ixgbe_config_dmac(adapter);
1159 /* And now turn on interrupts */
1160 ixgbe_enable_intr(adapter);
1162 /* Now inform the stack we're ready */
1163 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1169 ixgbe_init(void *arg)
1171 struct adapter *adapter = arg;
1173 IXGBE_CORE_LOCK(adapter);
1174 ixgbe_init_locked(adapter);
1175 IXGBE_CORE_UNLOCK(adapter);
1180 ixgbe_config_delay_values(struct adapter *adapter)
1182 struct ixgbe_hw *hw = &adapter->hw;
1183 u32 rxpb, frame, size, tmp;
1185 frame = adapter->max_frame_size;
1187 /* Calculate High Water */
1188 switch (hw->mac.type) {
1189 case ixgbe_mac_X540:
1190 case ixgbe_mac_X550:
1191 case ixgbe_mac_X550EM_x:
1192 tmp = IXGBE_DV_X540(frame, frame);
1195 tmp = IXGBE_DV(frame, frame);
1198 size = IXGBE_BT2KB(tmp);
1199 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1200 hw->fc.high_water[0] = rxpb - size;
1202 /* Now calculate Low Water */
1203 switch (hw->mac.type) {
1204 case ixgbe_mac_X540:
1205 case ixgbe_mac_X550:
1206 case ixgbe_mac_X550EM_x:
1207 tmp = IXGBE_LOW_DV_X540(frame);
1210 tmp = IXGBE_LOW_DV(frame);
1213 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1215 hw->fc.requested_mode = adapter->fc;
1216 hw->fc.pause_time = IXGBE_FC_PAUSE;
1217 hw->fc.send_xon = TRUE;
1222 ** MSIX Interrupt Handlers and Tasklets
1227 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1229 struct ixgbe_hw *hw = &adapter->hw;
1230 u64 queue = (u64)(1 << vector);
1233 if (hw->mac.type == ixgbe_mac_82598EB) {
1234 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1235 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1237 mask = (queue & 0xFFFFFFFF);
1239 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1240 mask = (queue >> 32);
1242 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1247 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1249 struct ixgbe_hw *hw = &adapter->hw;
1250 u64 queue = (u64)(1 << vector);
1253 if (hw->mac.type == ixgbe_mac_82598EB) {
1254 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1255 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1257 mask = (queue & 0xFFFFFFFF);
1259 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1260 mask = (queue >> 32);
1262 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1267 ixgbe_handle_que(void *context, int pending)
1269 struct ix_queue *que = context;
1270 struct adapter *adapter = que->adapter;
1271 struct tx_ring *txr = que->txr;
1272 struct ifnet *ifp = adapter->ifp;
1275 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1276 more = ixgbe_rxeof(que);
1279 #ifndef IXGBE_LEGACY_TX
1280 if (!drbr_empty(ifp, txr->br))
1281 ixgbe_mq_start_locked(ifp, txr);
1283 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1284 ixgbe_start_locked(txr, ifp);
1286 IXGBE_TX_UNLOCK(txr);
1289 /* Reenable this interrupt */
1290 if (que->res != NULL)
1291 ixgbe_enable_queue(adapter, que->msix);
1293 ixgbe_enable_intr(adapter);
1298 /*********************************************************************
1300 * Legacy Interrupt Service routine
1302 **********************************************************************/
1305 ixgbe_legacy_irq(void *arg)
1307 struct ix_queue *que = arg;
1308 struct adapter *adapter = que->adapter;
1309 struct ixgbe_hw *hw = &adapter->hw;
1310 struct ifnet *ifp = adapter->ifp;
1311 struct tx_ring *txr = adapter->tx_rings;
1316 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1319 if (reg_eicr == 0) {
1320 ixgbe_enable_intr(adapter);
1324 more = ixgbe_rxeof(que);
1328 #ifdef IXGBE_LEGACY_TX
1329 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1330 ixgbe_start_locked(txr, ifp);
1332 if (!drbr_empty(ifp, txr->br))
1333 ixgbe_mq_start_locked(ifp, txr);
1335 IXGBE_TX_UNLOCK(txr);
1337 /* Check for fan failure */
1338 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1339 (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1340 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1341 "REPLACE IMMEDIATELY!!\n");
1342 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1345 /* Link status change */
1346 if (reg_eicr & IXGBE_EICR_LSC)
1347 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1349 /* External PHY interrupt */
1350 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1351 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1352 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1355 taskqueue_enqueue(que->tq, &que->que_task);
1357 ixgbe_enable_intr(adapter);
1362 /*********************************************************************
1364 * MSIX Queue Interrupt Service routine
1366 **********************************************************************/
1368 ixgbe_msix_que(void *arg)
1370 struct ix_queue *que = arg;
1371 struct adapter *adapter = que->adapter;
1372 struct ifnet *ifp = adapter->ifp;
1373 struct tx_ring *txr = que->txr;
1374 struct rx_ring *rxr = que->rxr;
1378 /* Protect against spurious interrupts */
1379 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1382 ixgbe_disable_queue(adapter, que->msix);
1385 more = ixgbe_rxeof(que);
1389 #ifdef IXGBE_LEGACY_TX
1390 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1391 ixgbe_start_locked(txr, ifp);
1393 if (!drbr_empty(ifp, txr->br))
1394 ixgbe_mq_start_locked(ifp, txr);
1396 IXGBE_TX_UNLOCK(txr);
1400 if (ixgbe_enable_aim == FALSE)
1403 ** Do Adaptive Interrupt Moderation:
1404 ** - Write out last calculated setting
1405 ** - Calculate based on average size over
1406 ** the last interval.
1408 if (que->eitr_setting)
1409 IXGBE_WRITE_REG(&adapter->hw,
1410 IXGBE_EITR(que->msix), que->eitr_setting);
1412 que->eitr_setting = 0;
1414 /* Idle, do nothing */
1415 if ((txr->bytes == 0) && (rxr->bytes == 0))
1418 if ((txr->bytes) && (txr->packets))
1419 newitr = txr->bytes/txr->packets;
1420 if ((rxr->bytes) && (rxr->packets))
1421 newitr = max(newitr,
1422 (rxr->bytes / rxr->packets));
1423 newitr += 24; /* account for hardware frame, crc */
1425 /* set an upper boundary */
1426 newitr = min(newitr, 3000);
1428 /* Be nice to the mid range */
1429 if ((newitr > 300) && (newitr < 1200))
1430 newitr = (newitr / 3);
1432 newitr = (newitr / 2);
1434 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1435 newitr |= newitr << 16;
1437 newitr |= IXGBE_EITR_CNT_WDIS;
1439 /* save for next interrupt */
1440 que->eitr_setting = newitr;
1450 taskqueue_enqueue(que->tq, &que->que_task);
1452 ixgbe_enable_queue(adapter, que->msix);
1458 ixgbe_msix_link(void *arg)
1460 struct adapter *adapter = arg;
1461 struct ixgbe_hw *hw = &adapter->hw;
1462 u32 reg_eicr, mod_mask;
1464 ++adapter->link_irq;
1466 /* First get the cause */
1467 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1468 /* Be sure the queue bits are not cleared */
1469 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1470 /* Clear interrupt with write */
1471 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1473 /* Link status change */
1474 if (reg_eicr & IXGBE_EICR_LSC)
1475 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1477 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1479 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1480 /* This is probably overkill :) */
1481 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1483 /* Disable the interrupt */
1484 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1485 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1488 if (reg_eicr & IXGBE_EICR_ECC) {
1489 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1490 "Please Reboot!!\n");
1491 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1494 /* Check for over temp condition */
1495 if (reg_eicr & IXGBE_EICR_TS) {
1496 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1497 "PHY IS SHUT DOWN!!\n");
1498 device_printf(adapter->dev, "System shutdown required!\n");
1499 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1503 /* Pluggable optics-related interrupt */
1504 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1505 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1507 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1509 if (ixgbe_is_sfp(hw)) {
1510 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1511 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1512 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1513 } else if (reg_eicr & mod_mask) {
1514 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1515 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1519 /* Check for fan failure */
1520 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1521 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1522 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1523 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1524 "REPLACE IMMEDIATELY!!\n");
1527 /* External PHY interrupt */
1528 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1529 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1530 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1531 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1538 /*********************************************************************
1540 * Media Ioctl callback
1542 * This routine is called whenever the user queries the status of
1543 * the interface using ifconfig.
1545 **********************************************************************/
1547 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1549 struct adapter *adapter = ifp->if_softc;
1550 struct ixgbe_hw *hw = &adapter->hw;
1553 INIT_DEBUGOUT("ixgbe_media_status: begin");
1554 IXGBE_CORE_LOCK(adapter);
1555 ixgbe_update_link_status(adapter);
1557 ifmr->ifm_status = IFM_AVALID;
1558 ifmr->ifm_active = IFM_ETHER;
1560 if (!adapter->link_active) {
1561 IXGBE_CORE_UNLOCK(adapter);
1565 ifmr->ifm_status |= IFM_ACTIVE;
1566 layer = ixgbe_get_supported_physical_layer(hw);
1568 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1569 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1570 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1571 switch (adapter->link_speed) {
1572 case IXGBE_LINK_SPEED_10GB_FULL:
1573 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1575 case IXGBE_LINK_SPEED_1GB_FULL:
1576 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1578 case IXGBE_LINK_SPEED_100_FULL:
1579 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1582 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1583 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1584 switch (adapter->link_speed) {
1585 case IXGBE_LINK_SPEED_10GB_FULL:
1586 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1589 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1590 switch (adapter->link_speed) {
1591 case IXGBE_LINK_SPEED_10GB_FULL:
1592 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1594 case IXGBE_LINK_SPEED_1GB_FULL:
1595 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1598 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1599 switch (adapter->link_speed) {
1600 case IXGBE_LINK_SPEED_10GB_FULL:
1601 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1603 case IXGBE_LINK_SPEED_1GB_FULL:
1604 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1607 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1608 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1609 switch (adapter->link_speed) {
1610 case IXGBE_LINK_SPEED_10GB_FULL:
1611 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1613 case IXGBE_LINK_SPEED_1GB_FULL:
1614 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1617 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1618 switch (adapter->link_speed) {
1619 case IXGBE_LINK_SPEED_10GB_FULL:
1620 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1624 ** XXX: These need to use the proper media types once
1627 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1628 switch (adapter->link_speed) {
1629 case IXGBE_LINK_SPEED_10GB_FULL:
1630 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1632 case IXGBE_LINK_SPEED_2_5GB_FULL:
1633 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1635 case IXGBE_LINK_SPEED_1GB_FULL:
1636 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1639 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1640 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1641 switch (adapter->link_speed) {
1642 case IXGBE_LINK_SPEED_10GB_FULL:
1643 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1645 case IXGBE_LINK_SPEED_2_5GB_FULL:
1646 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1648 case IXGBE_LINK_SPEED_1GB_FULL:
1649 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1653 /* If nothing is recognized... */
1654 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1655 ifmr->ifm_active |= IFM_UNKNOWN;
1657 #if __FreeBSD_version >= 900025
1658 /* Display current flow control setting used on link */
1659 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1660 hw->fc.current_mode == ixgbe_fc_full)
1661 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1662 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1663 hw->fc.current_mode == ixgbe_fc_full)
1664 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1667 IXGBE_CORE_UNLOCK(adapter);
1672 /*********************************************************************
1674 * Media Ioctl callback
1676 * This routine is called when the user changes speed/duplex using
1677 * media/mediopt option with ifconfig.
1679 **********************************************************************/
1681 ixgbe_media_change(struct ifnet * ifp)
1683 struct adapter *adapter = ifp->if_softc;
1684 struct ifmedia *ifm = &adapter->media;
1685 struct ixgbe_hw *hw = &adapter->hw;
1686 ixgbe_link_speed speed = 0;
1688 INIT_DEBUGOUT("ixgbe_media_change: begin");
1690 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1693 if (hw->phy.media_type == ixgbe_media_type_backplane)
1697 ** We don't actually need to check against the supported
1698 ** media types of the adapter; ifmedia will take care of
1701 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1704 speed |= IXGBE_LINK_SPEED_100_FULL;
1706 case IFM_10G_SR: /* KR, too */
1708 case IFM_10G_CX4: /* KX4 */
1709 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1710 case IFM_10G_TWINAX:
1711 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1714 speed |= IXGBE_LINK_SPEED_100_FULL;
1717 case IFM_1000_CX: /* KX */
1718 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1721 speed |= IXGBE_LINK_SPEED_100_FULL;
1727 hw->mac.autotry_restart = TRUE;
1728 hw->mac.ops.setup_link(hw, speed, TRUE);
1729 adapter->advertise =
1730 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1731 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1732 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1737 device_printf(adapter->dev, "Invalid media type!\n");
1742 ixgbe_set_promisc(struct adapter *adapter)
1745 struct ifnet *ifp = adapter->ifp;
1748 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1749 reg_rctl &= (~IXGBE_FCTRL_UPE);
1750 if (ifp->if_flags & IFF_ALLMULTI)
1751 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1753 struct ifmultiaddr *ifma;
1754 #if __FreeBSD_version < 800000
1757 if_maddr_rlock(ifp);
1759 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1760 if (ifma->ifma_addr->sa_family != AF_LINK)
1762 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1766 #if __FreeBSD_version < 800000
1767 IF_ADDR_UNLOCK(ifp);
1769 if_maddr_runlock(ifp);
1772 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1773 reg_rctl &= (~IXGBE_FCTRL_MPE);
1774 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1776 if (ifp->if_flags & IFF_PROMISC) {
1777 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1778 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1779 } else if (ifp->if_flags & IFF_ALLMULTI) {
1780 reg_rctl |= IXGBE_FCTRL_MPE;
1781 reg_rctl &= ~IXGBE_FCTRL_UPE;
1782 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1788 /*********************************************************************
1791 * This routine is called whenever multicast address list is updated.
1793 **********************************************************************/
1794 #define IXGBE_RAR_ENTRIES 16
1797 ixgbe_set_multi(struct adapter *adapter)
1802 struct ifmultiaddr *ifma;
1804 struct ifnet *ifp = adapter->ifp;
1806 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1809 bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1810 MAX_NUM_MULTICAST_ADDRESSES);
1812 #if __FreeBSD_version < 800000
1815 if_maddr_rlock(ifp);
1817 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1818 if (ifma->ifma_addr->sa_family != AF_LINK)
1820 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1822 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1823 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1824 IXGBE_ETH_LENGTH_OF_ADDRESS);
1827 #if __FreeBSD_version < 800000
1828 IF_ADDR_UNLOCK(ifp);
1830 if_maddr_runlock(ifp);
1833 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1834 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1835 if (ifp->if_flags & IFF_PROMISC)
1836 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1837 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1838 ifp->if_flags & IFF_ALLMULTI) {
1839 fctrl |= IXGBE_FCTRL_MPE;
1840 fctrl &= ~IXGBE_FCTRL_UPE;
1842 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1846 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1848 ixgbe_update_mc_addr_list(&adapter->hw,
1849 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1856 * This is an iterator function now needed by the multicast
1857 * shared code. It simply feeds the shared code routine the
1858 * addresses in the array of ixgbe_set_multi() one by one.
1861 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1863 u8 *addr = *update_ptr;
1867 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1868 *update_ptr = newptr;
1873 /*********************************************************************
1876 * This routine checks for link status,updates statistics,
1877 * and runs the watchdog check.
1879 **********************************************************************/
1882 ixgbe_local_timer(void *arg)
1884 struct adapter *adapter = arg;
1885 device_t dev = adapter->dev;
1886 struct ix_queue *que = adapter->queues;
1890 mtx_assert(&adapter->core_mtx, MA_OWNED);
1892 /* Check for pluggable optics */
1893 if (adapter->sfp_probe)
1894 if (!ixgbe_sfp_probe(adapter))
1895 goto out; /* Nothing to do */
1897 ixgbe_update_link_status(adapter);
1898 ixgbe_update_stats_counters(adapter);
1901 ** Check the TX queues status
1902 ** - mark hung queues so we don't schedule on them
1903 ** - watchdog only if all queues show hung
1905 for (int i = 0; i < adapter->num_queues; i++, que++) {
1906 /* Keep track of queues with work for soft irq */
1908 queues |= ((u64)1 << que->me);
1910 ** Each time txeof runs without cleaning, but there
1911 ** are uncleaned descriptors it increments busy. If
1912 ** we get to the MAX we declare it hung.
1914 if (que->busy == IXGBE_QUEUE_HUNG) {
1916 /* Mark the queue as inactive */
1917 adapter->active_queues &= ~((u64)1 << que->me);
1920 /* Check if we've come back from hung */
1921 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1922 adapter->active_queues |= ((u64)1 << que->me);
1924 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1925 device_printf(dev,"Warning queue %d "
1926 "appears to be hung!\n", i);
1927 que->txr->busy = IXGBE_QUEUE_HUNG;
1933 /* Only truly watchdog if all queues show hung */
1934 if (hung == adapter->num_queues)
1936 else if (queues != 0) { /* Force an IRQ on queues with work */
1937 ixgbe_rearm_queues(adapter, queues);
1941 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1945 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1946 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1947 adapter->watchdog_events++;
1948 ixgbe_init_locked(adapter);
1952 ** Note: this routine updates the OS on the link state
1953 ** the real check of the hardware only happens with
1954 ** a link interrupt.
1957 ixgbe_update_link_status(struct adapter *adapter)
1959 struct ifnet *ifp = adapter->ifp;
1960 device_t dev = adapter->dev;
1962 if (adapter->link_up){
1963 if (adapter->link_active == FALSE) {
1965 device_printf(dev,"Link is up %d Gbps %s \n",
1966 ((adapter->link_speed == 128)? 10:1),
1968 adapter->link_active = TRUE;
1969 /* Update any Flow Control changes */
1970 ixgbe_fc_enable(&adapter->hw);
1971 /* Update DMA coalescing config */
1972 ixgbe_config_dmac(adapter);
1973 if_link_state_change(ifp, LINK_STATE_UP);
1975 } else { /* Link down */
1976 if (adapter->link_active == TRUE) {
1978 device_printf(dev,"Link is Down\n");
1979 if_link_state_change(ifp, LINK_STATE_DOWN);
1980 adapter->link_active = FALSE;
1988 /*********************************************************************
1990 * This routine disables all traffic on the adapter by issuing a
1991 * global reset on the MAC and deallocates TX/RX buffers.
1993 **********************************************************************/
1996 ixgbe_stop(void *arg)
1999 struct adapter *adapter = arg;
2000 struct ixgbe_hw *hw = &adapter->hw;
2003 mtx_assert(&adapter->core_mtx, MA_OWNED);
2005 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2006 ixgbe_disable_intr(adapter);
2007 callout_stop(&adapter->timer);
2009 /* Let the stack know...*/
2010 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2013 hw->adapter_stopped = FALSE;
2014 ixgbe_stop_adapter(hw);
2015 if (hw->mac.type == ixgbe_mac_82599EB)
2016 ixgbe_stop_mac_link_on_d3_82599(hw);
2017 /* Turn off the laser - noop with no optics */
2018 ixgbe_disable_tx_laser(hw);
2020 /* Update the stack */
2021 adapter->link_up = FALSE;
2022 ixgbe_update_link_status(adapter);
2024 /* reprogram the RAR[0] in case user changed it. */
2025 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2031 /*********************************************************************
2033 * Determine hardware revision.
2035 **********************************************************************/
2037 ixgbe_identify_hardware(struct adapter *adapter)
2039 device_t dev = adapter->dev;
2040 struct ixgbe_hw *hw = &adapter->hw;
2042 /* Save off the information about this board */
2043 hw->vendor_id = pci_get_vendor(dev);
2044 hw->device_id = pci_get_device(dev);
2045 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2046 hw->subsystem_vendor_id =
2047 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2048 hw->subsystem_device_id =
2049 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2052 ** Make sure BUSMASTER is set
2054 pci_enable_busmaster(dev);
2056 /* We need this here to set the num_segs below */
2057 ixgbe_set_mac_type(hw);
2059 /* Pick up the 82599 settings */
2060 if (hw->mac.type != ixgbe_mac_82598EB) {
2061 hw->phy.smart_speed = ixgbe_smart_speed;
2062 adapter->num_segs = IXGBE_82599_SCATTER;
2064 adapter->num_segs = IXGBE_82598_SCATTER;
2069 /*********************************************************************
2071 * Determine optic type
2073 **********************************************************************/
2075 ixgbe_setup_optics(struct adapter *adapter)
2077 struct ixgbe_hw *hw = &adapter->hw;
2080 layer = ixgbe_get_supported_physical_layer(hw);
2082 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2083 adapter->optics = IFM_10G_T;
2087 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2088 adapter->optics = IFM_1000_T;
2092 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2093 adapter->optics = IFM_1000_SX;
2097 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2098 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2099 adapter->optics = IFM_10G_LR;
2103 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2104 adapter->optics = IFM_10G_SR;
2108 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2109 adapter->optics = IFM_10G_TWINAX;
2113 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2114 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2115 adapter->optics = IFM_10G_CX4;
2119 /* If we get here just set the default */
2120 adapter->optics = IFM_ETHER | IFM_AUTO;
2124 /*********************************************************************
2126 * Setup the Legacy or MSI Interrupt handler
2128 **********************************************************************/
2130 ixgbe_allocate_legacy(struct adapter *adapter)
2132 device_t dev = adapter->dev;
2133 struct ix_queue *que = adapter->queues;
2134 #ifndef IXGBE_LEGACY_TX
2135 struct tx_ring *txr = adapter->tx_rings;
2140 if (adapter->msix == 1)
2143 /* We allocate a single interrupt resource */
2144 adapter->res = bus_alloc_resource_any(dev,
2145 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2146 if (adapter->res == NULL) {
2147 device_printf(dev, "Unable to allocate bus resource: "
2153 * Try allocating a fast interrupt and the associated deferred
2154 * processing contexts.
2156 #ifndef IXGBE_LEGACY_TX
2157 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2159 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2160 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2161 taskqueue_thread_enqueue, &que->tq);
2162 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2163 device_get_nameunit(adapter->dev));
2165 /* Tasklets for Link, SFP and Multispeed Fiber */
2166 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2167 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2168 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2169 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2171 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2173 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2174 taskqueue_thread_enqueue, &adapter->tq);
2175 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2176 device_get_nameunit(adapter->dev));
2178 if ((error = bus_setup_intr(dev, adapter->res,
2179 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2180 que, &adapter->tag)) != 0) {
2181 device_printf(dev, "Failed to register fast interrupt "
2182 "handler: %d\n", error);
2183 taskqueue_free(que->tq);
2184 taskqueue_free(adapter->tq);
2189 /* For simplicity in the handlers */
2190 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2196 /*********************************************************************
2198 * Setup MSIX Interrupt resources and handlers
2200 **********************************************************************/
2202 ixgbe_allocate_msix(struct adapter *adapter)
2204 device_t dev = adapter->dev;
2205 struct ix_queue *que = adapter->queues;
2206 struct tx_ring *txr = adapter->tx_rings;
2207 int error, rid, vector = 0;
2210 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2212 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2213 RF_SHAREABLE | RF_ACTIVE);
2214 if (que->res == NULL) {
2215 device_printf(dev,"Unable to allocate"
2216 " bus resource: que interrupt [%d]\n", vector);
2219 /* Set the handler function */
2220 error = bus_setup_intr(dev, que->res,
2221 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2222 ixgbe_msix_que, que, &que->tag);
2225 device_printf(dev, "Failed to register QUE handler");
2228 #if __FreeBSD_version >= 800504
2229 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2232 adapter->active_queues |= (u64)(1 << que->msix);
2234 * Bind the msix vector, and thus the
2235 * rings to the corresponding cpu.
2237 * This just happens to match the default RSS round-robin
2238 * bucket -> queue -> CPU allocation.
2240 if (adapter->num_queues > 1)
2243 if (adapter->num_queues > 1)
2244 bus_bind_intr(dev, que->res, cpu_id);
2246 #ifndef IXGBE_LEGACY_TX
2247 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2249 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2250 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2251 taskqueue_thread_enqueue, &que->tq);
2252 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2253 device_get_nameunit(adapter->dev));
2258 adapter->res = bus_alloc_resource_any(dev,
2259 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2260 if (!adapter->res) {
2261 device_printf(dev,"Unable to allocate"
2262 " bus resource: Link interrupt [%d]\n", rid);
2265 /* Set the link handler function */
2266 error = bus_setup_intr(dev, adapter->res,
2267 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2268 ixgbe_msix_link, adapter, &adapter->tag);
2270 adapter->res = NULL;
2271 device_printf(dev, "Failed to register LINK handler");
2274 #if __FreeBSD_version >= 800504
2275 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2277 adapter->vector = vector;
2278 /* Tasklets for Link, SFP and Multispeed Fiber */
2279 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2280 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2281 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2282 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2284 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2286 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2287 taskqueue_thread_enqueue, &adapter->tq);
2288 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2289 device_get_nameunit(adapter->dev));
2295 * Setup Either MSI/X or MSI
2298 ixgbe_setup_msix(struct adapter *adapter)
2300 device_t dev = adapter->dev;
2301 int rid, want, queues, msgs;
2303 /* Override by tuneable */
2304 if (ixgbe_enable_msix == 0)
2307 /* First try MSI/X */
2308 msgs = pci_msix_count(dev);
2311 rid = PCIR_BAR(MSIX_82598_BAR);
2312 adapter->msix_mem = bus_alloc_resource_any(dev,
2313 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2314 if (adapter->msix_mem == NULL) {
2315 rid += 4; /* 82599 maps in higher BAR */
2316 adapter->msix_mem = bus_alloc_resource_any(dev,
2317 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2319 if (adapter->msix_mem == NULL) {
2320 /* May not be enabled */
2321 device_printf(adapter->dev,
2322 "Unable to map MSIX table \n");
2326 /* Figure out a reasonable auto config value */
2327 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2329 if (ixgbe_num_queues != 0)
2330 queues = ixgbe_num_queues;
2331 /* Set max queues to 8 when autoconfiguring */
2332 else if ((ixgbe_num_queues == 0) && (queues > 8))
2335 /* reflect correct sysctl value */
2336 ixgbe_num_queues = queues;
2339 ** Want one vector (RX/TX pair) per queue
2340 ** plus an additional for Link.
2346 device_printf(adapter->dev,
2347 "MSIX Configuration Problem, "
2348 "%d vectors but %d queues wanted!\n",
2352 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2353 device_printf(adapter->dev,
2354 "Using MSIX interrupts with %d vectors\n", msgs);
2355 adapter->num_queues = queues;
2359 ** If MSIX alloc failed or provided us with
2360 ** less than needed, free and fall through to MSI
2362 pci_release_msi(dev);
2365 if (adapter->msix_mem != NULL) {
2366 bus_release_resource(dev, SYS_RES_MEMORY,
2367 rid, adapter->msix_mem);
2368 adapter->msix_mem = NULL;
2371 if (pci_alloc_msi(dev, &msgs) == 0) {
2372 device_printf(adapter->dev,"Using an MSI interrupt\n");
2375 device_printf(adapter->dev,"Using a Legacy interrupt\n");
2381 ixgbe_allocate_pci_resources(struct adapter *adapter)
2384 device_t dev = adapter->dev;
2387 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2390 if (!(adapter->pci_mem)) {
2391 device_printf(dev,"Unable to allocate bus resource: memory\n");
2395 adapter->osdep.mem_bus_space_tag =
2396 rman_get_bustag(adapter->pci_mem);
2397 adapter->osdep.mem_bus_space_handle =
2398 rman_get_bushandle(adapter->pci_mem);
2399 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2401 /* Legacy defaults */
2402 adapter->num_queues = 1;
2403 adapter->hw.back = &adapter->osdep;
2406 ** Now setup MSI or MSI/X, should
2407 ** return us the number of supported
2408 ** vectors. (Will be 1 for MSI)
2410 adapter->msix = ixgbe_setup_msix(adapter);
2415 ixgbe_free_pci_resources(struct adapter * adapter)
2417 struct ix_queue *que = adapter->queues;
2418 device_t dev = adapter->dev;
2421 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2422 memrid = PCIR_BAR(MSIX_82598_BAR);
2424 memrid = PCIR_BAR(MSIX_82599_BAR);
2427 ** There is a slight possibility of a failure mode
2428 ** in attach that will result in entering this function
2429 ** before interrupt resources have been initialized, and
2430 ** in that case we do not want to execute the loops below
2431 ** We can detect this reliably by the state of the adapter
2434 if (adapter->res == NULL)
2438 ** Release all msix queue resources:
2440 for (int i = 0; i < adapter->num_queues; i++, que++) {
2441 rid = que->msix + 1;
2442 if (que->tag != NULL) {
2443 bus_teardown_intr(dev, que->res, que->tag);
2446 if (que->res != NULL)
2447 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2451 /* Clean the Legacy or Link interrupt last */
2452 if (adapter->vector) /* we are doing MSIX */
2453 rid = adapter->vector + 1;
2455 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2457 if (adapter->tag != NULL) {
2458 bus_teardown_intr(dev, adapter->res, adapter->tag);
2459 adapter->tag = NULL;
2461 if (adapter->res != NULL)
2462 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2466 pci_release_msi(dev);
2468 if (adapter->msix_mem != NULL)
2469 bus_release_resource(dev, SYS_RES_MEMORY,
2470 memrid, adapter->msix_mem);
2472 if (adapter->pci_mem != NULL)
2473 bus_release_resource(dev, SYS_RES_MEMORY,
2474 PCIR_BAR(0), adapter->pci_mem);
2479 /*********************************************************************
2481 * Setup networking device structure and register an interface.
2483 **********************************************************************/
2485 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2489 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2491 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2493 device_printf(dev, "can not allocate ifnet structure\n");
2496 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2497 if_initbaudrate(ifp, IF_Gbps(10));
2498 ifp->if_init = ixgbe_init;
2499 ifp->if_softc = adapter;
2500 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2501 ifp->if_ioctl = ixgbe_ioctl;
2502 /* TSO parameters */
2503 ifp->if_hw_tsomax = 65518;
2504 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2505 ifp->if_hw_tsomaxsegsize = 2048;
2506 #ifndef IXGBE_LEGACY_TX
2507 ifp->if_transmit = ixgbe_mq_start;
2508 ifp->if_qflush = ixgbe_qflush;
2510 ifp->if_start = ixgbe_start;
2511 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2512 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2513 IFQ_SET_READY(&ifp->if_snd);
2516 ether_ifattach(ifp, adapter->hw.mac.addr);
2518 adapter->max_frame_size =
2519 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2522 * Tell the upper layer(s) we support long frames.
2524 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2526 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2527 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2528 ifp->if_capabilities |= IFCAP_LRO;
2529 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2533 ifp->if_capenable = ifp->if_capabilities;
2536 ** Don't turn this on by default, if vlans are
2537 ** created on another pseudo device (eg. lagg)
2538 ** then vlan events are not passed thru, breaking
2539 ** operation, but with HW FILTER off it works. If
2540 ** using vlans directly on the ixgbe driver you can
2541 ** enable this and get full hardware tag filtering.
2543 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2546 * Specify the media types supported by this adapter and register
2547 * callbacks to update media and link information
2549 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2550 ixgbe_media_status);
2552 ixgbe_add_media_types(adapter);
2554 /* Autoselect media by default */
2555 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2561 ixgbe_add_media_types(struct adapter *adapter)
2563 struct ixgbe_hw *hw = &adapter->hw;
2564 device_t dev = adapter->dev;
2567 layer = ixgbe_get_supported_physical_layer(hw);
2569 /* Media types with matching FreeBSD media defines */
2570 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2571 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2572 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2573 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2574 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2575 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2577 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2578 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2579 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2581 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2582 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2583 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2584 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2585 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2586 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2587 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2588 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2591 ** Other (no matching FreeBSD media type):
2592 ** To workaround this, we'll assign these completely
2593 ** inappropriate media types.
2595 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2596 device_printf(dev, "Media supported: 10GbaseKR\n");
2597 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2598 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2600 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2601 device_printf(dev, "Media supported: 10GbaseKX4\n");
2602 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2603 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2605 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2606 device_printf(dev, "Media supported: 1000baseKX\n");
2607 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2608 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2610 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2611 /* Someday, someone will care about you... */
2612 device_printf(dev, "Media supported: 1000baseBX\n");
2615 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2616 ifmedia_add(&adapter->media,
2617 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2618 ifmedia_add(&adapter->media,
2619 IFM_ETHER | IFM_1000_T, 0, NULL);
2622 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2626 ixgbe_config_link(struct adapter *adapter)
2628 struct ixgbe_hw *hw = &adapter->hw;
2629 u32 autoneg, err = 0;
2630 bool sfp, negotiate;
2632 sfp = ixgbe_is_sfp(hw);
2635 if (hw->phy.multispeed_fiber) {
2636 hw->mac.ops.setup_sfp(hw);
2637 ixgbe_enable_tx_laser(hw);
2638 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2640 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2642 if (hw->mac.ops.check_link)
2643 err = ixgbe_check_link(hw, &adapter->link_speed,
2644 &adapter->link_up, FALSE);
2647 autoneg = hw->phy.autoneg_advertised;
2648 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2649 err = hw->mac.ops.get_link_capabilities(hw,
2650 &autoneg, &negotiate);
2653 if (hw->mac.ops.setup_link)
2654 err = hw->mac.ops.setup_link(hw,
2655 autoneg, adapter->link_up);
2662 /*********************************************************************
2664 * Enable transmit units.
2666 **********************************************************************/
2668 ixgbe_initialize_transmit_units(struct adapter *adapter)
2670 struct tx_ring *txr = adapter->tx_rings;
2671 struct ixgbe_hw *hw = &adapter->hw;
2673 /* Setup the Base and Length of the Tx Descriptor Ring */
2675 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2676 u64 tdba = txr->txdma.dma_paddr;
2679 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2680 (tdba & 0x00000000ffffffffULL));
2681 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2682 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2683 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2685 /* Setup the HW Tx Head and Tail descriptor pointers */
2686 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2687 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2689 /* Cache the tail address */
2690 txr->tail = IXGBE_TDT(txr->me);
2692 /* Set the processing limit */
2693 txr->process_limit = ixgbe_tx_process_limit;
2695 /* Disable Head Writeback */
2696 switch (hw->mac.type) {
2697 case ixgbe_mac_82598EB:
2698 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2700 case ixgbe_mac_82599EB:
2701 case ixgbe_mac_X540:
2703 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2706 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2707 switch (hw->mac.type) {
2708 case ixgbe_mac_82598EB:
2709 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2711 case ixgbe_mac_82599EB:
2712 case ixgbe_mac_X540:
2714 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2720 if (hw->mac.type != ixgbe_mac_82598EB) {
2721 u32 dmatxctl, rttdcs;
2722 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2723 dmatxctl |= IXGBE_DMATXCTL_TE;
2724 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2725 /* Disable arbiter to set MTQC */
2726 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2727 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2728 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2729 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2730 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2731 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2738 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2740 struct ixgbe_hw *hw = &adapter->hw;
2742 int i, j, queue_id, table_size;
2744 uint32_t rss_key[10];
2750 /* set up random bits */
2751 arc4rand(&rss_key, sizeof(rss_key), 0);
2753 /* Set multiplier for RETA setup and table size based on MAC */
2756 switch (adapter->hw.mac.type) {
2757 case ixgbe_mac_82598EB:
2760 case ixgbe_mac_X550:
2761 case ixgbe_mac_X550EM_x:
2768 /* Set up the redirection table */
2769 for (i = 0, j = 0; i < table_size; i++, j++) {
2770 if (j == adapter->num_queues) j = 0;
2771 queue_id = (j * index_mult);
2773 * The low 8 bits are for hash value (n+0);
2774 * The next 8 bits are for hash value (n+1), etc.
2777 reta = reta | ( ((uint32_t) queue_id) << 24);
2780 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2782 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2787 /* Now fill our hash function seeds */
2788 for (int i = 0; i < 10; i++)
2789 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2791 /* Perform hash on these packet types */
2793 * Disable UDP - IP fragments aren't currently being handled
2794 * and so we end up with a mix of 2-tuple and 4-tuple
2797 mrqc = IXGBE_MRQC_RSSEN
2798 | IXGBE_MRQC_RSS_FIELD_IPV4
2799 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2801 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2803 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2804 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2805 | IXGBE_MRQC_RSS_FIELD_IPV6
2806 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2808 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2809 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2812 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2816 /*********************************************************************
2818 * Setup receive registers and features.
2820 **********************************************************************/
2821 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2823 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2826 ixgbe_initialize_receive_units(struct adapter *adapter)
2828 struct rx_ring *rxr = adapter->rx_rings;
2829 struct ixgbe_hw *hw = &adapter->hw;
2830 struct ifnet *ifp = adapter->ifp;
2831 u32 bufsz, fctrl, srrctl, rxcsum;
2836 * Make sure receives are disabled while
2837 * setting up the descriptor ring
2839 ixgbe_disable_rx(hw);
2841 /* Enable broadcasts */
2842 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2843 fctrl |= IXGBE_FCTRL_BAM;
2844 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2845 fctrl |= IXGBE_FCTRL_DPF;
2846 fctrl |= IXGBE_FCTRL_PMCF;
2848 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2850 /* Set for Jumbo Frames? */
2851 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2852 if (ifp->if_mtu > ETHERMTU)
2853 hlreg |= IXGBE_HLREG0_JUMBOEN;
2855 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2857 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2858 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2859 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2861 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2862 #endif /* DEV_NETMAP */
2863 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2865 bufsz = (adapter->rx_mbuf_sz +
2866 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2868 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2869 u64 rdba = rxr->rxdma.dma_paddr;
2871 /* Setup the Base and Length of the Rx Descriptor Ring */
2872 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2873 (rdba & 0x00000000ffffffffULL));
2874 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2875 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2876 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2878 /* Set up the SRRCTL register */
2879 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2880 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2881 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2883 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2886 * Set DROP_EN iff we have no flow control and >1 queue.
2887 * Note that srrctl was cleared shortly before during reset,
2888 * so we do not need to clear the bit, but do it just in case
2889 * this code is moved elsewhere.
2891 if (adapter->num_queues > 1 &&
2892 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2893 srrctl |= IXGBE_SRRCTL_DROP_EN;
2895 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2898 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2900 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2901 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2902 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2904 /* Set the processing limit */
2905 rxr->process_limit = ixgbe_rx_process_limit;
2907 /* Set the driver rx tail address */
2908 rxr->tail = IXGBE_RDT(rxr->me);
2911 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2912 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2913 IXGBE_PSRTYPE_UDPHDR |
2914 IXGBE_PSRTYPE_IPV4HDR |
2915 IXGBE_PSRTYPE_IPV6HDR;
2916 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2919 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2921 ixgbe_initialise_rss_mapping(adapter);
2923 if (adapter->num_queues > 1) {
2924 /* RSS and RX IPP Checksum are mutually exclusive */
2925 rxcsum |= IXGBE_RXCSUM_PCSD;
2928 if (ifp->if_capenable & IFCAP_RXCSUM)
2929 rxcsum |= IXGBE_RXCSUM_PCSD;
2931 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2932 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2934 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2941 ** This routine is run via an vlan config EVENT,
2942 ** it enables us to use the HW Filter table since
2943 ** we can get the vlan id. This just creates the
2944 ** entry in the soft version of the VFTA, init will
2945 ** repopulate the real table.
2948 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2950 struct adapter *adapter = ifp->if_softc;
2953 if (ifp->if_softc != arg) /* Not our event */
2956 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2959 IXGBE_CORE_LOCK(adapter);
2960 index = (vtag >> 5) & 0x7F;
2962 adapter->shadow_vfta[index] |= (1 << bit);
2963 ++adapter->num_vlans;
2964 ixgbe_setup_vlan_hw_support(adapter);
2965 IXGBE_CORE_UNLOCK(adapter);
2969 ** This routine is run via an vlan
2970 ** unconfig EVENT, remove our entry
2971 ** in the soft vfta.
2974 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2976 struct adapter *adapter = ifp->if_softc;
2979 if (ifp->if_softc != arg)
2982 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2985 IXGBE_CORE_LOCK(adapter);
2986 index = (vtag >> 5) & 0x7F;
2988 adapter->shadow_vfta[index] &= ~(1 << bit);
2989 --adapter->num_vlans;
2990 /* Re-init to load the changes */
2991 ixgbe_setup_vlan_hw_support(adapter);
2992 IXGBE_CORE_UNLOCK(adapter);
2996 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2998 struct ifnet *ifp = adapter->ifp;
2999 struct ixgbe_hw *hw = &adapter->hw;
3000 struct rx_ring *rxr;
3005 ** We get here thru init_locked, meaning
3006 ** a soft reset, this has already cleared
3007 ** the VFTA and other state, so if there
3008 ** have been no vlan's registered do nothing.
3010 if (adapter->num_vlans == 0)
3013 /* Setup the queues for vlans */
3014 for (int i = 0; i < adapter->num_queues; i++) {
3015 rxr = &adapter->rx_rings[i];
3016 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3017 if (hw->mac.type != ixgbe_mac_82598EB) {
3018 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3019 ctrl |= IXGBE_RXDCTL_VME;
3020 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3022 rxr->vtag_strip = TRUE;
3025 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3028 ** A soft reset zero's out the VFTA, so
3029 ** we need to repopulate it now.
3031 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3032 if (adapter->shadow_vfta[i] != 0)
3033 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3034 adapter->shadow_vfta[i]);
3036 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3037 /* Enable the Filter Table if enabled */
3038 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3039 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3040 ctrl |= IXGBE_VLNCTRL_VFE;
3042 if (hw->mac.type == ixgbe_mac_82598EB)
3043 ctrl |= IXGBE_VLNCTRL_VME;
3044 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3048 ixgbe_enable_intr(struct adapter *adapter)
3050 struct ixgbe_hw *hw = &adapter->hw;
3051 struct ix_queue *que = adapter->queues;
3054 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3055 /* Enable Fan Failure detection */
3056 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3057 mask |= IXGBE_EIMS_GPI_SDP1;
3059 switch (adapter->hw.mac.type) {
3060 case ixgbe_mac_82599EB:
3061 mask |= IXGBE_EIMS_ECC;
3062 /* Temperature sensor on some adapters */
3063 mask |= IXGBE_EIMS_GPI_SDP0;
3064 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3065 mask |= IXGBE_EIMS_GPI_SDP1;
3066 mask |= IXGBE_EIMS_GPI_SDP2;
3068 mask |= IXGBE_EIMS_FLOW_DIR;
3071 case ixgbe_mac_X540:
3072 /* Detect if Thermal Sensor is enabled */
3073 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3074 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3075 mask |= IXGBE_EIMS_TS;
3076 mask |= IXGBE_EIMS_ECC;
3078 mask |= IXGBE_EIMS_FLOW_DIR;
3081 case ixgbe_mac_X550:
3082 case ixgbe_mac_X550EM_x:
3083 /* MAC thermal sensor is automatically enabled */
3084 mask |= IXGBE_EIMS_TS;
3085 /* Some devices use SDP0 for important information */
3086 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3087 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3088 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3089 mask |= IXGBE_EIMS_ECC;
3091 mask |= IXGBE_EIMS_FLOW_DIR;
3098 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3100 /* With MSI-X we use auto clear */
3101 if (adapter->msix_mem) {
3102 mask = IXGBE_EIMS_ENABLE_MASK;
3103 /* Don't autoclear Link */
3104 mask &= ~IXGBE_EIMS_OTHER;
3105 mask &= ~IXGBE_EIMS_LSC;
3106 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3110 ** Now enable all queues, this is done separately to
3111 ** allow for handling the extended (beyond 32) MSIX
3112 ** vectors that can be used by 82599
3114 for (int i = 0; i < adapter->num_queues; i++, que++)
3115 ixgbe_enable_queue(adapter, que->msix);
3117 IXGBE_WRITE_FLUSH(hw);
3123 ixgbe_disable_intr(struct adapter *adapter)
3125 if (adapter->msix_mem)
3126 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3127 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3128 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3130 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3131 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3132 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3134 IXGBE_WRITE_FLUSH(&adapter->hw);
3139 ** Get the width and transaction speed of
3140 ** the slot this adapter is plugged into.
3143 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3145 device_t dev = ((struct ixgbe_osdep *)hw->back)->dev;
3146 struct ixgbe_mac_info *mac = &hw->mac;
3150 /* For most devices simply call the shared code routine */
3151 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3152 ixgbe_get_bus_info(hw);
3153 /* These devices don't use PCI-E */
3154 switch (hw->mac.type) {
3155 case ixgbe_mac_X550EM_x:
3163 ** For the Quad port adapter we need to parse back
3164 ** up the PCI tree to find the speed of the expansion
3165 ** slot into which this adapter is plugged. A bit more work.
3167 dev = device_get_parent(device_get_parent(dev));
3169 device_printf(dev, "parent pcib = %x,%x,%x\n",
3170 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3172 dev = device_get_parent(device_get_parent(dev));
3174 device_printf(dev, "slot pcib = %x,%x,%x\n",
3175 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3177 /* Now get the PCI Express Capabilities offset */
3178 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3179 /* ...and read the Link Status Register */
3180 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3181 switch (link & IXGBE_PCI_LINK_WIDTH) {
3182 case IXGBE_PCI_LINK_WIDTH_1:
3183 hw->bus.width = ixgbe_bus_width_pcie_x1;
3185 case IXGBE_PCI_LINK_WIDTH_2:
3186 hw->bus.width = ixgbe_bus_width_pcie_x2;
3188 case IXGBE_PCI_LINK_WIDTH_4:
3189 hw->bus.width = ixgbe_bus_width_pcie_x4;
3191 case IXGBE_PCI_LINK_WIDTH_8:
3192 hw->bus.width = ixgbe_bus_width_pcie_x8;
3195 hw->bus.width = ixgbe_bus_width_unknown;
3199 switch (link & IXGBE_PCI_LINK_SPEED) {
3200 case IXGBE_PCI_LINK_SPEED_2500:
3201 hw->bus.speed = ixgbe_bus_speed_2500;
3203 case IXGBE_PCI_LINK_SPEED_5000:
3204 hw->bus.speed = ixgbe_bus_speed_5000;
3206 case IXGBE_PCI_LINK_SPEED_8000:
3207 hw->bus.speed = ixgbe_bus_speed_8000;
3210 hw->bus.speed = ixgbe_bus_speed_unknown;
3214 mac->ops.set_lan_id(hw);
3217 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3218 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3219 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3220 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3221 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3222 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3223 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3226 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3227 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3228 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3229 device_printf(dev, "PCI-Express bandwidth available"
3230 " for this card\n is not sufficient for"
3231 " optimal performance.\n");
3232 device_printf(dev, "For optimal performance a x8 "
3233 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3235 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3236 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3237 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3238 device_printf(dev, "PCI-Express bandwidth available"
3239 " for this card\n is not sufficient for"
3240 " optimal performance.\n");
3241 device_printf(dev, "For optimal performance a x8 "
3242 "PCIE Gen3 slot is required.\n");
3250 ** Setup the correct IVAR register for a particular MSIX interrupt
3251 ** (yes this is all very magic and confusing :)
3252 ** - entry is the register array entry
3253 ** - vector is the MSIX vector for this queue
3254 ** - type is RX/TX/MISC
3257 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3259 struct ixgbe_hw *hw = &adapter->hw;
3262 vector |= IXGBE_IVAR_ALLOC_VAL;
3264 switch (hw->mac.type) {
3266 case ixgbe_mac_82598EB:
3268 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3270 entry += (type * 64);
3271 index = (entry >> 2) & 0x1F;
3272 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3273 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3274 ivar |= (vector << (8 * (entry & 0x3)));
3275 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3278 case ixgbe_mac_82599EB:
3279 case ixgbe_mac_X540:
3280 case ixgbe_mac_X550:
3281 case ixgbe_mac_X550EM_x:
3282 if (type == -1) { /* MISC IVAR */
3283 index = (entry & 1) * 8;
3284 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3285 ivar &= ~(0xFF << index);
3286 ivar |= (vector << index);
3287 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3288 } else { /* RX/TX IVARS */
3289 index = (16 * (entry & 1)) + (8 * type);
3290 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3291 ivar &= ~(0xFF << index);
3292 ivar |= (vector << index);
3293 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3302 ixgbe_configure_ivars(struct adapter *adapter)
3304 struct ix_queue *que = adapter->queues;
3307 if (ixgbe_max_interrupt_rate > 0)
3308 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3311 ** Disable DMA coalescing if interrupt moderation is
3318 for (int i = 0; i < adapter->num_queues; i++, que++) {
3319 /* First the RX queue entry */
3320 ixgbe_set_ivar(adapter, i, que->msix, 0);
3321 /* ... and the TX */
3322 ixgbe_set_ivar(adapter, i, que->msix, 1);
3323 /* Set an Initial EITR value */
3324 IXGBE_WRITE_REG(&adapter->hw,
3325 IXGBE_EITR(que->msix), newitr);
3328 /* For the Link interrupt */
3329 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3333 ** ixgbe_sfp_probe - called in the local timer to
3334 ** determine if a port had optics inserted.
3336 static bool ixgbe_sfp_probe(struct adapter *adapter)
3338 struct ixgbe_hw *hw = &adapter->hw;
3339 device_t dev = adapter->dev;
3340 bool result = FALSE;
3342 if ((hw->phy.type == ixgbe_phy_nl) &&
3343 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3344 s32 ret = hw->phy.ops.identify_sfp(hw);
3347 ret = hw->phy.ops.reset(hw);
3348 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3349 device_printf(dev,"Unsupported SFP+ module detected!");
3350 printf(" Reload driver with supported module.\n");
3351 adapter->sfp_probe = FALSE;
3354 device_printf(dev,"SFP+ module detected!\n");
3355 /* We now have supported optics */
3356 adapter->sfp_probe = FALSE;
3357 /* Set the optics type so system reports correctly */
3358 ixgbe_setup_optics(adapter);
3366 ** Tasklet handler for MSIX Link interrupts
3367 ** - do outside interrupt since it might sleep
3370 ixgbe_handle_link(void *context, int pending)
3372 struct adapter *adapter = context;
3374 ixgbe_check_link(&adapter->hw,
3375 &adapter->link_speed, &adapter->link_up, 0);
3376 ixgbe_update_link_status(adapter);
3380 ** Tasklet for handling SFP module interrupts
3383 ixgbe_handle_mod(void *context, int pending)
3385 struct adapter *adapter = context;
3386 struct ixgbe_hw *hw = &adapter->hw;
3387 device_t dev = adapter->dev;
3390 err = hw->phy.ops.identify_sfp(hw);
3391 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3393 "Unsupported SFP+ module type was detected.\n");
3396 err = hw->mac.ops.setup_sfp(hw);
3397 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3399 "Setup failure - unsupported SFP+ module type.\n");
3402 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3408 ** Tasklet for handling MSF (multispeed fiber) interrupts
3411 ixgbe_handle_msf(void *context, int pending)
3413 struct adapter *adapter = context;
3414 struct ixgbe_hw *hw = &adapter->hw;
3419 err = hw->phy.ops.identify_sfp(hw);
3421 ixgbe_setup_optics(adapter);
3422 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3425 autoneg = hw->phy.autoneg_advertised;
3426 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3427 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3428 if (hw->mac.ops.setup_link)
3429 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3431 ifmedia_removeall(&adapter->media);
3432 ixgbe_add_media_types(adapter);
3437 ** Tasklet for handling interrupts from an external PHY
3440 ixgbe_handle_phy(void *context, int pending)
3442 struct adapter *adapter = context;
3443 struct ixgbe_hw *hw = &adapter->hw;
3446 error = hw->phy.ops.handle_lasi(hw);
3447 if (error == IXGBE_ERR_OVERTEMP)
3448 device_printf(adapter->dev,
3449 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3450 " PHY will downshift to lower power state!\n");
3452 device_printf(adapter->dev,
3453 "Error handling LASI interrupt: %d\n",
3460 ** Tasklet for reinitializing the Flow Director filter table
3463 ixgbe_reinit_fdir(void *context, int pending)
3465 struct adapter *adapter = context;
3466 struct ifnet *ifp = adapter->ifp;
3468 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3470 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3471 adapter->fdir_reinit = 0;
3472 /* re-enable flow director interrupts */
3473 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3474 /* Restart the interface */
3475 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3480 /*********************************************************************
3482 * Configure DMA Coalescing
3484 **********************************************************************/
3486 ixgbe_config_dmac(struct adapter *adapter)
3488 struct ixgbe_hw *hw = &adapter->hw;
3489 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3491 if (hw->mac.type < ixgbe_mac_X550 ||
3492 !hw->mac.ops.dmac_config)
3495 if (dcfg->watchdog_timer ^ adapter->dmac ||
3496 dcfg->link_speed ^ adapter->link_speed) {
3497 dcfg->watchdog_timer = adapter->dmac;
3498 dcfg->fcoe_en = false;
3499 dcfg->link_speed = adapter->link_speed;
3502 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3503 dcfg->watchdog_timer, dcfg->link_speed);
3505 hw->mac.ops.dmac_config(hw);
3510 * Checks whether the adapter supports Energy Efficient Ethernet
3511 * or not, based on device ID.
3514 ixgbe_check_eee_support(struct adapter *adapter)
3516 struct ixgbe_hw *hw = &adapter->hw;
3518 adapter->eee_support = adapter->eee_enabled =
3519 (hw->device_id == IXGBE_DEV_ID_X550T ||
3520 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
3524 * Checks whether the adapter's ports are capable of
3525 * Wake On LAN by reading the adapter's NVM.
3527 * Sets each port's hw->wol_enabled value depending
3528 * on the value read here.
3531 ixgbe_check_wol_support(struct adapter *adapter)
3533 struct ixgbe_hw *hw = &adapter->hw;
3536 /* Find out WoL support for port */
3537 adapter->wol_support = hw->wol_enabled = 0;
3538 ixgbe_get_device_caps(hw, &dev_caps);
3539 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3540 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3542 adapter->wol_support = hw->wol_enabled = 1;
3544 /* Save initial wake up filter configuration */
3545 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3551 * Prepare the adapter/port for LPLU and/or WoL
3554 ixgbe_setup_low_power_mode(struct adapter *adapter)
3556 struct ixgbe_hw *hw = &adapter->hw;
3557 device_t dev = adapter->dev;
3560 mtx_assert(&adapter->core_mtx, MA_OWNED);
3562 /* Limit power management flow to X550EM baseT */
3563 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3564 && hw->phy.ops.enter_lplu) {
3565 /* Turn off support for APM wakeup. (Using ACPI instead) */
3566 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3567 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3570 * Clear Wake Up Status register to prevent any previous wakeup
3571 * events from waking us up immediately after we suspend.
3573 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3576 * Program the Wakeup Filter Control register with user filter
3579 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3581 /* Enable wakeups and power management in Wakeup Control */
3582 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3583 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3585 /* X550EM baseT adapters need a special LPLU flow */
3586 hw->phy.reset_disable = true;
3587 ixgbe_stop(adapter);
3588 error = hw->phy.ops.enter_lplu(hw);
3591 "Error entering LPLU: %d\n", error);
3592 hw->phy.reset_disable = false;
3594 /* Just stop for other adapters */
3595 ixgbe_stop(adapter);
3601 /**********************************************************************
3603 * Update the board statistics counters.
3605 **********************************************************************/
3607 ixgbe_update_stats_counters(struct adapter *adapter)
3609 struct ixgbe_hw *hw = &adapter->hw;
3610 u32 missed_rx = 0, bprc, lxon, lxoff, total;
3611 u64 total_missed_rx = 0;
3613 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3614 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3615 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3616 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3618 for (int i = 0; i < 16; i++) {
3619 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3620 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3621 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3623 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3624 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3625 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3627 /* Hardware workaround, gprc counts missed packets */
3628 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3629 adapter->stats.pf.gprc -= missed_rx;
3631 if (hw->mac.type != ixgbe_mac_82598EB) {
3632 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3633 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3634 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3635 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3636 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3637 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3638 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3639 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3641 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3642 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3643 /* 82598 only has a counter in the high register */
3644 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3645 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3646 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3650 * Workaround: mprc hardware is incorrectly counting
3651 * broadcasts, so for now we subtract those.
3653 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3654 adapter->stats.pf.bprc += bprc;
3655 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3656 if (hw->mac.type == ixgbe_mac_82598EB)
3657 adapter->stats.pf.mprc -= bprc;
3659 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3660 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3661 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3662 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3663 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3664 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3666 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3667 adapter->stats.pf.lxontxc += lxon;
3668 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3669 adapter->stats.pf.lxofftxc += lxoff;
3670 total = lxon + lxoff;
3672 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3673 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3674 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3675 adapter->stats.pf.gptc -= total;
3676 adapter->stats.pf.mptc -= total;
3677 adapter->stats.pf.ptc64 -= total;
3678 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3680 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3681 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3682 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3683 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3684 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3685 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3686 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3687 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3688 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3689 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3690 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3691 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3692 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3693 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3694 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3695 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3696 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3697 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3698 /* Only read FCOE on 82599 */
3699 if (hw->mac.type != ixgbe_mac_82598EB) {
3700 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3701 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3702 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3703 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3704 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3707 /* Fill out the OS statistics structure */
3708 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3709 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3710 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3711 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3712 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3713 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3714 IXGBE_SET_COLLISIONS(adapter, 0);
3715 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3716 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3717 + adapter->stats.pf.rlec);
3720 #if __FreeBSD_version >= 1100036
3722 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3724 struct adapter *adapter;
3725 struct tx_ring *txr;
3728 adapter = if_getsoftc(ifp);
3731 case IFCOUNTER_IPACKETS:
3732 return (adapter->ipackets);
3733 case IFCOUNTER_OPACKETS:
3734 return (adapter->opackets);
3735 case IFCOUNTER_IBYTES:
3736 return (adapter->ibytes);
3737 case IFCOUNTER_OBYTES:
3738 return (adapter->obytes);
3739 case IFCOUNTER_IMCASTS:
3740 return (adapter->imcasts);
3741 case IFCOUNTER_OMCASTS:
3742 return (adapter->omcasts);
3743 case IFCOUNTER_COLLISIONS:
3745 case IFCOUNTER_IQDROPS:
3746 return (adapter->iqdrops);
3747 case IFCOUNTER_OQDROPS:
3749 txr = adapter->tx_rings;
3750 for (int i = 0; i < adapter->num_queues; i++, txr++)
3751 rv += txr->br->br_drops;
3753 case IFCOUNTER_IERRORS:
3754 return (adapter->ierrors);
3756 return (if_get_counter_default(ifp, cnt));
3761 /** ixgbe_sysctl_tdh_handler - Handler function
3762 * Retrieves the TDH value from the hardware
3765 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3769 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3772 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3773 error = sysctl_handle_int(oidp, &val, 0, req);
3774 if (error || !req->newptr)
3779 /** ixgbe_sysctl_tdt_handler - Handler function
3780 * Retrieves the TDT value from the hardware
3783 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3787 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3790 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3791 error = sysctl_handle_int(oidp, &val, 0, req);
3792 if (error || !req->newptr)
3797 /** ixgbe_sysctl_rdh_handler - Handler function
3798 * Retrieves the RDH value from the hardware
3801 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3805 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3808 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3809 error = sysctl_handle_int(oidp, &val, 0, req);
3810 if (error || !req->newptr)
3815 /** ixgbe_sysctl_rdt_handler - Handler function
3816 * Retrieves the RDT value from the hardware
3819 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3823 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3826 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3827 error = sysctl_handle_int(oidp, &val, 0, req);
3828 if (error || !req->newptr)
3834 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3837 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3838 unsigned int reg, usec, rate;
3840 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3841 usec = ((reg & 0x0FF8) >> 3);
3843 rate = 500000 / usec;
3846 error = sysctl_handle_int(oidp, &rate, 0, req);
3847 if (error || !req->newptr)
3849 reg &= ~0xfff; /* default, no limitation */
3850 ixgbe_max_interrupt_rate = 0;
3851 if (rate > 0 && rate < 500000) {
3854 ixgbe_max_interrupt_rate = rate;
3855 reg |= ((4000000/rate) & 0xff8 );
3857 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3862 ixgbe_add_device_sysctls(struct adapter *adapter)
3864 device_t dev = adapter->dev;
3865 struct ixgbe_hw *hw = &adapter->hw;
3866 struct sysctl_oid_list *child;
3867 struct sysctl_ctx_list *ctx;
3869 ctx = device_get_sysctl_ctx(dev);
3870 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3872 /* Sysctls for all devices */
3873 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
3874 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3875 ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3877 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
3879 &ixgbe_enable_aim, 1, "Interrupt Moderation");
3881 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
3882 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3883 ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3885 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
3886 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3887 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
3889 /* for X550 devices */
3890 if (hw->mac.type >= ixgbe_mac_X550)
3891 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
3892 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3893 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
3895 /* for X550T and X550EM backplane devices */
3896 if (hw->device_id == IXGBE_DEV_ID_X550T ||
3897 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
3898 struct sysctl_oid *eee_node;
3899 struct sysctl_oid_list *eee_list;
3901 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
3903 "Energy Efficient Ethernet sysctls");
3904 eee_list = SYSCTL_CHILDREN(eee_node);
3906 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
3907 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3908 ixgbe_sysctl_eee_enable, "I",
3909 "Enable or Disable EEE");
3911 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
3912 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3913 ixgbe_sysctl_eee_negotiated, "I",
3914 "EEE negotiated on link");
3916 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
3917 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3918 ixgbe_sysctl_eee_tx_lpi_status, "I",
3919 "Whether or not TX link is in LPI state");
3921 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
3922 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3923 ixgbe_sysctl_eee_rx_lpi_status, "I",
3924 "Whether or not RX link is in LPI state");
3927 /* for certain 10GBaseT devices */
3928 if (hw->device_id == IXGBE_DEV_ID_X550T ||
3929 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3930 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
3931 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3932 ixgbe_sysctl_wol_enable, "I",
3933 "Enable/Disable Wake on LAN");
3935 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
3936 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3937 ixgbe_sysctl_wufc, "I",
3938 "Enable/Disable Wake Up Filters");
3941 /* for X550EM 10GBaseT devices */
3942 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3943 struct sysctl_oid *phy_node;
3944 struct sysctl_oid_list *phy_list;
3946 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
3948 "External PHY sysctls");
3949 phy_list = SYSCTL_CHILDREN(phy_node);
3951 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
3952 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3953 ixgbe_sysctl_phy_temp, "I",
3954 "Current External PHY Temperature (Celsius)");
3956 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
3957 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3958 ixgbe_sysctl_phy_overtemp_occurred, "I",
3959 "External PHY High Temperature Event Occurred");
3964 * Add sysctl variables, one per statistic, to the system.
3967 ixgbe_add_hw_stats(struct adapter *adapter)
3969 device_t dev = adapter->dev;
3971 struct tx_ring *txr = adapter->tx_rings;
3972 struct rx_ring *rxr = adapter->rx_rings;
3974 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3975 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3976 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3977 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3979 struct sysctl_oid *stat_node, *queue_node;
3980 struct sysctl_oid_list *stat_list, *queue_list;
3982 #define QUEUE_NAME_LEN 32
3983 char namebuf[QUEUE_NAME_LEN];
3985 /* Driver Statistics */
3986 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3987 CTLFLAG_RD, &adapter->dropped_pkts,
3988 "Driver dropped packets");
3989 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3990 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3991 "m_defrag() failed");
3992 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3993 CTLFLAG_RD, &adapter->watchdog_events,
3994 "Watchdog timeouts");
3995 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
3996 CTLFLAG_RD, &adapter->link_irq,
3997 "Link MSIX IRQ Handled");
3999 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4000 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4001 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4002 CTLFLAG_RD, NULL, "Queue Name");
4003 queue_list = SYSCTL_CHILDREN(queue_node);
4005 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4006 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4007 sizeof(&adapter->queues[i]),
4008 ixgbe_sysctl_interrupt_rate_handler, "IU",
4010 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4011 CTLFLAG_RD, &(adapter->queues[i].irqs),
4012 "irqs on this queue");
4013 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4014 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4015 ixgbe_sysctl_tdh_handler, "IU",
4016 "Transmit Descriptor Head");
4017 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4018 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4019 ixgbe_sysctl_tdt_handler, "IU",
4020 "Transmit Descriptor Tail");
4021 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4022 CTLFLAG_RD, &txr->tso_tx,
4024 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4025 CTLFLAG_RD, &txr->no_tx_dma_setup,
4026 "Driver tx dma failure in xmit");
4027 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4028 CTLFLAG_RD, &txr->no_desc_avail,
4029 "Queue No Descriptor Available");
4030 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4031 CTLFLAG_RD, &txr->total_packets,
4032 "Queue Packets Transmitted");
4033 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4034 CTLFLAG_RD, &txr->br->br_drops,
4035 "Packets dropped in buf_ring");
4038 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4039 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4040 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4041 CTLFLAG_RD, NULL, "Queue Name");
4042 queue_list = SYSCTL_CHILDREN(queue_node);
4044 struct lro_ctrl *lro = &rxr->lro;
4046 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4047 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4048 CTLFLAG_RD, NULL, "Queue Name");
4049 queue_list = SYSCTL_CHILDREN(queue_node);
4051 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4052 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4053 ixgbe_sysctl_rdh_handler, "IU",
4054 "Receive Descriptor Head");
4055 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4056 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4057 ixgbe_sysctl_rdt_handler, "IU",
4058 "Receive Descriptor Tail");
4059 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4060 CTLFLAG_RD, &rxr->rx_packets,
4061 "Queue Packets Received");
4062 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4063 CTLFLAG_RD, &rxr->rx_bytes,
4064 "Queue Bytes Received");
4065 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4066 CTLFLAG_RD, &rxr->rx_copies,
4067 "Copied RX Frames");
4068 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4069 CTLFLAG_RD, &lro->lro_queued, 0,
4071 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4072 CTLFLAG_RD, &lro->lro_flushed, 0,
4076 /* MAC stats get the own sub node */
4078 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4079 CTLFLAG_RD, NULL, "MAC Statistics");
4080 stat_list = SYSCTL_CHILDREN(stat_node);
4082 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4083 CTLFLAG_RD, &stats->crcerrs,
4085 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4086 CTLFLAG_RD, &stats->illerrc,
4087 "Illegal Byte Errors");
4088 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4089 CTLFLAG_RD, &stats->errbc,
4091 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4092 CTLFLAG_RD, &stats->mspdc,
4093 "MAC Short Packets Discarded");
4094 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4095 CTLFLAG_RD, &stats->mlfc,
4096 "MAC Local Faults");
4097 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4098 CTLFLAG_RD, &stats->mrfc,
4099 "MAC Remote Faults");
4100 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4101 CTLFLAG_RD, &stats->rlec,
4102 "Receive Length Errors");
4104 /* Flow Control stats */
4105 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4106 CTLFLAG_RD, &stats->lxontxc,
4107 "Link XON Transmitted");
4108 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4109 CTLFLAG_RD, &stats->lxonrxc,
4110 "Link XON Received");
4111 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4112 CTLFLAG_RD, &stats->lxofftxc,
4113 "Link XOFF Transmitted");
4114 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4115 CTLFLAG_RD, &stats->lxoffrxc,
4116 "Link XOFF Received");
4118 /* Packet Reception Stats */
4119 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4120 CTLFLAG_RD, &stats->tor,
4121 "Total Octets Received");
4122 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4123 CTLFLAG_RD, &stats->gorc,
4124 "Good Octets Received");
4125 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4126 CTLFLAG_RD, &stats->tpr,
4127 "Total Packets Received");
4128 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4129 CTLFLAG_RD, &stats->gprc,
4130 "Good Packets Received");
4131 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4132 CTLFLAG_RD, &stats->mprc,
4133 "Multicast Packets Received");
4134 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4135 CTLFLAG_RD, &stats->bprc,
4136 "Broadcast Packets Received");
4137 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4138 CTLFLAG_RD, &stats->prc64,
4139 "64 byte frames received ");
4140 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4141 CTLFLAG_RD, &stats->prc127,
4142 "65-127 byte frames received");
4143 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4144 CTLFLAG_RD, &stats->prc255,
4145 "128-255 byte frames received");
4146 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4147 CTLFLAG_RD, &stats->prc511,
4148 "256-511 byte frames received");
4149 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4150 CTLFLAG_RD, &stats->prc1023,
4151 "512-1023 byte frames received");
4152 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4153 CTLFLAG_RD, &stats->prc1522,
4154 "1023-1522 byte frames received");
4155 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4156 CTLFLAG_RD, &stats->ruc,
4157 "Receive Undersized");
4158 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4159 CTLFLAG_RD, &stats->rfc,
4160 "Fragmented Packets Received ");
4161 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4162 CTLFLAG_RD, &stats->roc,
4163 "Oversized Packets Received");
4164 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4165 CTLFLAG_RD, &stats->rjc,
4167 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4168 CTLFLAG_RD, &stats->mngprc,
4169 "Management Packets Received");
4170 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4171 CTLFLAG_RD, &stats->mngptc,
4172 "Management Packets Dropped");
4173 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4174 CTLFLAG_RD, &stats->xec,
4177 /* Packet Transmission Stats */
4178 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4179 CTLFLAG_RD, &stats->gotc,
4180 "Good Octets Transmitted");
4181 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4182 CTLFLAG_RD, &stats->tpt,
4183 "Total Packets Transmitted");
4184 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4185 CTLFLAG_RD, &stats->gptc,
4186 "Good Packets Transmitted");
4187 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4188 CTLFLAG_RD, &stats->bptc,
4189 "Broadcast Packets Transmitted");
4190 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4191 CTLFLAG_RD, &stats->mptc,
4192 "Multicast Packets Transmitted");
4193 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4194 CTLFLAG_RD, &stats->mngptc,
4195 "Management Packets Transmitted");
4196 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4197 CTLFLAG_RD, &stats->ptc64,
4198 "64 byte frames transmitted ");
4199 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4200 CTLFLAG_RD, &stats->ptc127,
4201 "65-127 byte frames transmitted");
4202 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4203 CTLFLAG_RD, &stats->ptc255,
4204 "128-255 byte frames transmitted");
4205 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4206 CTLFLAG_RD, &stats->ptc511,
4207 "256-511 byte frames transmitted");
4208 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4209 CTLFLAG_RD, &stats->ptc1023,
4210 "512-1023 byte frames transmitted");
4211 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4212 CTLFLAG_RD, &stats->ptc1522,
4213 "1024-1522 byte frames transmitted");
4217 ** Set flow control using sysctl:
4218 ** Flow control values:
4225 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4228 struct adapter *adapter = (struct adapter *) arg1;
4231 error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4232 if ((error) || (req->newptr == NULL))
4235 /* Don't bother if it's not changed */
4236 if (adapter->fc == last)
4239 switch (adapter->fc) {
4240 case ixgbe_fc_rx_pause:
4241 case ixgbe_fc_tx_pause:
4243 adapter->hw.fc.requested_mode = adapter->fc;
4244 if (adapter->num_queues > 1)
4245 ixgbe_disable_rx_drop(adapter);
4248 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4249 if (adapter->num_queues > 1)
4250 ixgbe_enable_rx_drop(adapter);
4256 /* Don't autoneg if forcing a value */
4257 adapter->hw.fc.disable_fc_autoneg = TRUE;
4258 ixgbe_fc_enable(&adapter->hw);
4263 ** Control advertised link speed:
4265 ** 0x1 - advertise 100 Mb
4266 ** 0x2 - advertise 1G
4267 ** 0x4 - advertise 10G
4270 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4272 int error = 0, requested;
4273 struct adapter *adapter;
4275 struct ixgbe_hw *hw;
4276 ixgbe_link_speed speed = 0;
4278 adapter = (struct adapter *) arg1;
4282 requested = adapter->advertise;
4283 error = sysctl_handle_int(oidp, &requested, 0, req);
4284 if ((error) || (req->newptr == NULL))
4287 /* Checks to validate new value */
4288 if (adapter->advertise == requested) /* no change */
4291 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4292 (hw->phy.multispeed_fiber))) {
4294 "Advertised speed can only be set on copper or "
4295 "multispeed fiber media types.\n");
4299 if (requested < 0x1 || requested > 0x7) {
4301 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4305 if ((requested & 0x1)
4306 && (hw->mac.type != ixgbe_mac_X540)
4307 && (hw->mac.type != ixgbe_mac_X550)) {
4308 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4312 /* Set new value and report new advertised mode */
4313 if (requested & 0x1)
4314 speed |= IXGBE_LINK_SPEED_100_FULL;
4315 if (requested & 0x2)
4316 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4317 if (requested & 0x4)
4318 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4320 hw->mac.autotry_restart = TRUE;
4321 hw->mac.ops.setup_link(hw, speed, TRUE);
4322 adapter->advertise = requested;
4328 * The following two sysctls are for X550 BaseT devices;
4329 * they deal with the external PHY used in them.
4332 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4334 struct adapter *adapter = (struct adapter *) arg1;
4335 struct ixgbe_hw *hw = &adapter->hw;
4338 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4339 device_printf(adapter->dev,
4340 "Device has no supported external thermal sensor.\n");
4344 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4345 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4347 device_printf(adapter->dev,
4348 "Error reading from PHY's current temperature register\n");
4352 /* Shift temp for output */
4355 return (sysctl_handle_int(oidp, NULL, reg, req));
4359 * Reports whether the current PHY temperature is over
4360 * the overtemp threshold.
4361 * - This is reported directly from the PHY
4364 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4366 struct adapter *adapter = (struct adapter *) arg1;
4367 struct ixgbe_hw *hw = &adapter->hw;
4370 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4371 device_printf(adapter->dev,
4372 "Device has no supported external thermal sensor.\n");
4376 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4377 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4379 device_printf(adapter->dev,
4380 "Error reading from PHY's temperature status register\n");
4384 /* Get occurrence bit */
4385 reg = !!(reg & 0x4000);
4386 return (sysctl_handle_int(oidp, 0, reg, req));
4390 ** Thermal Shutdown Trigger (internal MAC)
4391 ** - Set this to 1 to cause an overtemp event to occur
4394 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4396 struct adapter *adapter = (struct adapter *) arg1;
4397 struct ixgbe_hw *hw = &adapter->hw;
4398 int error, fire = 0;
4400 error = sysctl_handle_int(oidp, &fire, 0, req);
4401 if ((error) || (req->newptr == NULL))
4405 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4406 reg |= IXGBE_EICR_TS;
4407 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4414 ** Manage DMA Coalescing.
4416 ** 0/1 - off / on (use default value of 1000)
4418 ** Legal timer values are:
4419 ** 50,100,250,500,1000,2000,5000,10000
4421 ** Turning off interrupt moderation will also turn this off.
4424 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4426 struct adapter *adapter = (struct adapter *) arg1;
4427 struct ixgbe_hw *hw = &adapter->hw;
4428 struct ifnet *ifp = adapter->ifp;
4432 oldval = adapter->dmac;
4433 error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4434 if ((error) || (req->newptr == NULL))
4437 switch (hw->mac.type) {
4438 case ixgbe_mac_X550:
4439 case ixgbe_mac_X550EM_x:
4442 device_printf(adapter->dev,
4443 "DMA Coalescing is only supported on X550 devices\n");
4447 switch (adapter->dmac) {
4451 case 1: /* Enable and use default */
4452 adapter->dmac = 1000;
4462 /* Legal values - allow */
4465 /* Do nothing, illegal value */
4466 adapter->dmac = oldval;
4470 /* Re-initialize hardware if it's already running */
4471 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4472 ixgbe_init(adapter);
4478 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4484 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4486 struct adapter *adapter = (struct adapter *) arg1;
4487 struct ixgbe_hw *hw = &adapter->hw;
4488 int new_wol_enabled;
4491 new_wol_enabled = hw->wol_enabled;
4492 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4493 if ((error) || (req->newptr == NULL))
4495 if (new_wol_enabled == hw->wol_enabled)
4498 if (new_wol_enabled > 0 && !adapter->wol_support)
4501 hw->wol_enabled = !!(new_wol_enabled);
4507 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4508 * if supported by the adapter.
4514 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4516 struct adapter *adapter = (struct adapter *) arg1;
4517 struct ifnet *ifp = adapter->ifp;
4518 int new_eee_enabled, error = 0;
4520 new_eee_enabled = adapter->eee_enabled;
4521 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4522 if ((error) || (req->newptr == NULL))
4524 if (new_eee_enabled == adapter->eee_enabled)
4527 if (new_eee_enabled > 0 && !adapter->eee_support)
4530 adapter->eee_enabled = !!(new_eee_enabled);
4532 /* Re-initialize hardware if it's already running */
4533 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4534 ixgbe_init(adapter);
4540 * Read-only sysctl indicating whether EEE support was negotiated
4544 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4546 struct adapter *adapter = (struct adapter *) arg1;
4547 struct ixgbe_hw *hw = &adapter->hw;
4550 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4552 return (sysctl_handle_int(oidp, 0, status, req));
4556 * Read-only sysctl indicating whether RX Link is in LPI state.
4559 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4561 struct adapter *adapter = (struct adapter *) arg1;
4562 struct ixgbe_hw *hw = &adapter->hw;
4565 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4566 IXGBE_EEE_RX_LPI_STATUS);
4568 return (sysctl_handle_int(oidp, 0, status, req));
4572 * Read-only sysctl indicating whether TX Link is in LPI state.
4575 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4577 struct adapter *adapter = (struct adapter *) arg1;
4578 struct ixgbe_hw *hw = &adapter->hw;
4581 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4582 IXGBE_EEE_TX_LPI_STATUS);
4584 return (sysctl_handle_int(oidp, 0, status, req));
4588 * Sysctl to enable/disable the types of packets that the
4589 * adapter will wake up on upon receipt.
4590 * WUFC - Wake Up Filter Control
4592 * 0x1 - Link Status Change
4593 * 0x2 - Magic Packet
4594 * 0x4 - Direct Exact
4595 * 0x8 - Directed Multicast
4597 * 0x20 - ARP/IPv4 Request Packet
4598 * 0x40 - Direct IPv4 Packet
4599 * 0x80 - Direct IPv6 Packet
4601 * Setting another flag will cause the sysctl to return an
4605 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4607 struct adapter *adapter = (struct adapter *) arg1;
4611 new_wufc = adapter->wufc;
4613 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4614 if ((error) || (req->newptr == NULL))
4616 if (new_wufc == adapter->wufc)
4619 if (new_wufc & 0xffffff00)
4623 new_wufc |= (0xffffff & adapter->wufc);
4624 adapter->wufc = new_wufc;
4631 ** Enable the hardware to drop packets when the buffer is
4632 ** full. This is useful when multiqueue,so that no single
4633 ** queue being full stalls the entire RX engine. We only
4634 ** enable this when Multiqueue AND when Flow Control is
4638 ixgbe_enable_rx_drop(struct adapter *adapter)
4640 struct ixgbe_hw *hw = &adapter->hw;
4642 for (int i = 0; i < adapter->num_queues; i++) {
4643 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4644 srrctl |= IXGBE_SRRCTL_DROP_EN;
4645 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4650 ixgbe_disable_rx_drop(struct adapter *adapter)
4652 struct ixgbe_hw *hw = &adapter->hw;
4654 for (int i = 0; i < adapter->num_queues; i++) {
4655 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4656 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4657 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4662 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4666 switch (adapter->hw.mac.type) {
4667 case ixgbe_mac_82598EB:
4668 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4669 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4671 case ixgbe_mac_82599EB:
4672 case ixgbe_mac_X540:
4673 case ixgbe_mac_X550:
4674 case ixgbe_mac_X550EM_x:
4675 mask = (queues & 0xFFFFFFFF);
4676 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4677 mask = (queues >> 32);
4678 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);