1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /*********************************************************************
44 * Set this to one to display debug statistics
45 *********************************************************************/
46 int ixgbe_display_debug_stats = 0;
48 /*********************************************************************
50 *********************************************************************/
51 char ixgbe_driver_version[] = "2.8.3";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixgbe_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
95 /* required last entry */
99 /*********************************************************************
100 * Table of branding strings
101 *********************************************************************/
103 static char *ixgbe_strings[] = {
104 "Intel(R) PRO/10GbE PCI-Express Network Driver"
107 /*********************************************************************
108 * Function prototypes
109 *********************************************************************/
110 static int ixgbe_probe(device_t);
111 static int ixgbe_attach(device_t);
112 static int ixgbe_detach(device_t);
113 static int ixgbe_shutdown(device_t);
114 static int ixgbe_suspend(device_t);
115 static int ixgbe_resume(device_t);
116 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
117 static void ixgbe_init(void *);
118 static void ixgbe_init_locked(struct adapter *);
119 static void ixgbe_stop(void *);
120 #if __FreeBSD_version >= 1100036
121 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
123 static void ixgbe_add_media_types(struct adapter *);
124 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
125 static int ixgbe_media_change(struct ifnet *);
126 static void ixgbe_identify_hardware(struct adapter *);
127 static int ixgbe_allocate_pci_resources(struct adapter *);
128 static void ixgbe_get_slot_info(struct ixgbe_hw *);
129 static int ixgbe_allocate_msix(struct adapter *);
130 static int ixgbe_allocate_legacy(struct adapter *);
131 static int ixgbe_setup_msix(struct adapter *);
132 static void ixgbe_free_pci_resources(struct adapter *);
133 static void ixgbe_local_timer(void *);
134 static int ixgbe_setup_interface(device_t, struct adapter *);
135 static void ixgbe_config_dmac(struct adapter *);
136 static void ixgbe_config_delay_values(struct adapter *);
137 static void ixgbe_config_link(struct adapter *);
138 static void ixgbe_check_eee_support(struct adapter *);
139 static void ixgbe_check_wol_support(struct adapter *);
140 static int ixgbe_setup_low_power_mode(struct adapter *);
141 static void ixgbe_rearm_queues(struct adapter *, u64);
143 static void ixgbe_initialize_transmit_units(struct adapter *);
144 static void ixgbe_initialize_receive_units(struct adapter *);
145 static void ixgbe_enable_rx_drop(struct adapter *);
146 static void ixgbe_disable_rx_drop(struct adapter *);
148 static void ixgbe_enable_intr(struct adapter *);
149 static void ixgbe_disable_intr(struct adapter *);
150 static void ixgbe_update_stats_counters(struct adapter *);
151 static void ixgbe_set_promisc(struct adapter *);
152 static void ixgbe_set_multi(struct adapter *);
153 static void ixgbe_update_link_status(struct adapter *);
154 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
155 static void ixgbe_configure_ivars(struct adapter *);
156 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
158 static void ixgbe_setup_vlan_hw_support(struct adapter *);
159 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
160 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
162 static void ixgbe_add_device_sysctls(struct adapter *);
163 static void ixgbe_add_hw_stats(struct adapter *);
165 /* Sysctl handlers */
166 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
167 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
168 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
169 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
170 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
171 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
172 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
173 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
174 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
175 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
176 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
177 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
179 /* Support for pluggable optic modules */
180 static bool ixgbe_sfp_probe(struct adapter *);
181 static void ixgbe_setup_optics(struct adapter *);
183 /* Legacy (single vector interrupt handler */
184 static void ixgbe_legacy_irq(void *);
186 /* The MSI/X Interrupt handlers */
187 static void ixgbe_msix_que(void *);
188 static void ixgbe_msix_link(void *);
190 /* Deferred interrupt tasklets */
191 static void ixgbe_handle_que(void *, int);
192 static void ixgbe_handle_link(void *, int);
193 static void ixgbe_handle_msf(void *, int);
194 static void ixgbe_handle_mod(void *, int);
195 static void ixgbe_handle_phy(void *, int);
198 static void ixgbe_reinit_fdir(void *, int);
201 /*********************************************************************
202 * FreeBSD Device Interface Entry Points
203 *********************************************************************/
205 static device_method_t ix_methods[] = {
206 /* Device interface */
207 DEVMETHOD(device_probe, ixgbe_probe),
208 DEVMETHOD(device_attach, ixgbe_attach),
209 DEVMETHOD(device_detach, ixgbe_detach),
210 DEVMETHOD(device_shutdown, ixgbe_shutdown),
211 DEVMETHOD(device_suspend, ixgbe_suspend),
212 DEVMETHOD(device_resume, ixgbe_resume),
216 static driver_t ix_driver = {
217 "ix", ix_methods, sizeof(struct adapter),
220 devclass_t ix_devclass;
221 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
223 MODULE_DEPEND(ix, pci, 1, 1, 1);
224 MODULE_DEPEND(ix, ether, 1, 1, 1);
227 ** TUNEABLE PARAMETERS:
230 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
231 "IXGBE driver parameters");
234 ** AIM: Adaptive Interrupt Moderation
235 ** which means that the interrupt rate
236 ** is varied over time based on the
237 ** traffic for that interrupt vector
239 static int ixgbe_enable_aim = TRUE;
240 TUNABLE_INT("hw.ix.enable_aim", &ixgbe_enable_aim);
241 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
242 "Enable adaptive interrupt moderation");
244 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
245 TUNABLE_INT("hw.ix.max_interrupt_rate", &ixgbe_max_interrupt_rate);
246 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
247 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
249 /* How many packets rxeof tries to clean at a time */
250 static int ixgbe_rx_process_limit = 256;
251 TUNABLE_INT("hw.ix.rx_process_limit", &ixgbe_rx_process_limit);
252 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
253 &ixgbe_rx_process_limit, 0,
254 "Maximum number of received packets to process at a time,"
255 "-1 means unlimited");
257 /* How many packets txeof tries to clean at a time */
258 static int ixgbe_tx_process_limit = 256;
259 TUNABLE_INT("hw.ix.tx_process_limit", &ixgbe_tx_process_limit);
260 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
261 &ixgbe_tx_process_limit, 0,
262 "Maximum number of sent packets to process at a time,"
263 "-1 means unlimited");
266 ** Smart speed setting, default to on
267 ** this only works as a compile option
268 ** right now as its during attach, set
269 ** this to 'ixgbe_smart_speed_off' to
272 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
275 * MSIX should be the default for best performance,
276 * but this allows it to be forced off for testing.
278 static int ixgbe_enable_msix = 1;
279 TUNABLE_INT("hw.ix.enable_msix", &ixgbe_enable_msix);
280 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
281 "Enable MSI-X interrupts");
284 * Number of Queues, can be set to 0,
285 * it then autoconfigures based on the
286 * number of cpus with a max of 8. This
287 * can be overriden manually here.
289 static int ixgbe_num_queues = 0;
290 TUNABLE_INT("hw.ix.num_queues", &ixgbe_num_queues);
291 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
292 "Number of queues to configure up to a maximum of 8; "
293 "0 indicates autoconfigure");
296 ** Number of TX descriptors per ring,
297 ** setting higher than RX as this seems
298 ** the better performing choice.
300 static int ixgbe_txd = PERFORM_TXD;
301 TUNABLE_INT("hw.ix.txd", &ixgbe_txd);
302 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
303 "Number of transmit descriptors per queue");
305 /* Number of RX descriptors per ring */
306 static int ixgbe_rxd = PERFORM_RXD;
307 TUNABLE_INT("hw.ix.rxd", &ixgbe_rxd);
308 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
309 "Number of receive descriptors per queue");
312 ** Defining this on will allow the use
313 ** of unsupported SFP+ modules, note that
314 ** doing so you are on your own :)
316 static int allow_unsupported_sfp = FALSE;
317 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
319 /* Keep running tab on them for sanity check */
320 static int ixgbe_total_ports;
324 ** Flow Director actually 'steals'
325 ** part of the packet buffer as its
326 ** filter pool, this variable controls
328 ** 0 = 64K, 1 = 128K, 2 = 256K
330 static int fdir_pballoc = 1;
335 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
336 * be a reference on how to implement netmap support in a driver.
337 * Additional comments are in ixgbe_netmap.h .
339 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
340 * that extend the standard driver.
342 #include <dev/netmap/ixgbe_netmap.h>
343 #endif /* DEV_NETMAP */
345 /*********************************************************************
346 * Device identification routine
348 * ixgbe_probe determines if the driver should be loaded on
349 * adapter based on PCI vendor/device id of the adapter.
351 * return BUS_PROBE_DEFAULT on success, positive on failure
352 *********************************************************************/
355 ixgbe_probe(device_t dev)
357 ixgbe_vendor_info_t *ent;
359 u16 pci_vendor_id = 0;
360 u16 pci_device_id = 0;
361 u16 pci_subvendor_id = 0;
362 u16 pci_subdevice_id = 0;
363 char adapter_name[256];
365 INIT_DEBUGOUT("ixgbe_probe: begin");
367 pci_vendor_id = pci_get_vendor(dev);
368 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
371 pci_device_id = pci_get_device(dev);
372 pci_subvendor_id = pci_get_subvendor(dev);
373 pci_subdevice_id = pci_get_subdevice(dev);
375 ent = ixgbe_vendor_info_array;
376 while (ent->vendor_id != 0) {
377 if ((pci_vendor_id == ent->vendor_id) &&
378 (pci_device_id == ent->device_id) &&
380 ((pci_subvendor_id == ent->subvendor_id) ||
381 (ent->subvendor_id == 0)) &&
383 ((pci_subdevice_id == ent->subdevice_id) ||
384 (ent->subdevice_id == 0))) {
385 sprintf(adapter_name, "%s, Version - %s",
386 ixgbe_strings[ent->index],
387 ixgbe_driver_version);
388 device_set_desc_copy(dev, adapter_name);
390 return (BUS_PROBE_DEFAULT);
397 /*********************************************************************
398 * Device initialization routine
400 * The attach entry point is called when the driver is being loaded.
401 * This routine identifies the type of hardware, allocates all resources
402 * and initializes the hardware.
404 * return 0 on success, positive on failure
405 *********************************************************************/
408 ixgbe_attach(device_t dev)
410 struct adapter *adapter;
416 INIT_DEBUGOUT("ixgbe_attach: begin");
418 /* Allocate, clear, and link in our adapter structure */
419 adapter = device_get_softc(dev);
420 adapter->dev = adapter->osdep.dev = dev;
424 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
426 /* Set up the timer callout */
427 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
429 /* Determine hardware revision */
430 ixgbe_identify_hardware(adapter);
432 /* Do base PCI setup - map BAR0 */
433 if (ixgbe_allocate_pci_resources(adapter)) {
434 device_printf(dev, "Allocation of PCI resources failed\n");
439 /* Do descriptor calc and sanity checks */
440 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
441 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
442 device_printf(dev, "TXD config issue, using default!\n");
443 adapter->num_tx_desc = DEFAULT_TXD;
445 adapter->num_tx_desc = ixgbe_txd;
448 ** With many RX rings it is easy to exceed the
449 ** system mbuf allocation. Tuning nmbclusters
450 ** can alleviate this.
452 if (nmbclusters > 0) {
454 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
455 if (s > nmbclusters) {
456 device_printf(dev, "RX Descriptors exceed "
457 "system mbuf max, using default instead!\n");
458 ixgbe_rxd = DEFAULT_RXD;
462 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
463 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
464 device_printf(dev, "RXD config issue, using default!\n");
465 adapter->num_rx_desc = DEFAULT_RXD;
467 adapter->num_rx_desc = ixgbe_rxd;
469 /* Allocate our TX/RX Queues */
470 if (ixgbe_allocate_queues(adapter)) {
475 /* Allocate multicast array memory. */
476 adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
477 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
478 if (adapter->mta == NULL) {
479 device_printf(dev, "Can not allocate multicast setup array\n");
484 /* Initialize the shared code */
485 hw->allow_unsupported_sfp = allow_unsupported_sfp;
486 error = ixgbe_init_shared_code(hw);
487 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
489 ** No optics in this port, set up
490 ** so the timer routine will probe
491 ** for later insertion.
493 adapter->sfp_probe = TRUE;
495 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
496 device_printf(dev,"Unsupported SFP+ module detected!\n");
500 device_printf(dev,"Unable to initialize the shared code\n");
505 /* Make sure we have a good EEPROM before we read from it */
506 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
507 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
512 error = ixgbe_init_hw(hw);
514 case IXGBE_ERR_EEPROM_VERSION:
515 device_printf(dev, "This device is a pre-production adapter/"
516 "LOM. Please be aware there may be issues associated "
517 "with your hardware.\n If you are experiencing problems "
518 "please contact your Intel or hardware representative "
519 "who provided you with this hardware.\n");
521 case IXGBE_ERR_SFP_NOT_SUPPORTED:
522 device_printf(dev,"Unsupported SFP+ Module\n");
525 case IXGBE_ERR_SFP_NOT_PRESENT:
526 device_printf(dev,"No SFP+ Module found\n");
532 /* Detect and set physical type */
533 ixgbe_setup_optics(adapter);
535 if ((adapter->msix > 1) && (ixgbe_enable_msix))
536 error = ixgbe_allocate_msix(adapter);
538 error = ixgbe_allocate_legacy(adapter);
542 /* Setup OS specific network interface */
543 if (ixgbe_setup_interface(dev, adapter) != 0)
546 /* Initialize statistics */
547 ixgbe_update_stats_counters(adapter);
549 /* Register for VLAN events */
550 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
551 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
552 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
553 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
555 /* Check PCIE slot type/speed/width */
556 ixgbe_get_slot_info(hw);
558 /* Set an initial default flow control value */
559 adapter->fc = ixgbe_fc_full;
561 /* Check for certain supported features */
562 ixgbe_check_wol_support(adapter);
563 ixgbe_check_eee_support(adapter);
566 ixgbe_add_device_sysctls(adapter);
567 ixgbe_add_hw_stats(adapter);
569 /* let hardware know driver is loaded */
570 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
571 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
572 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
575 ixgbe_netmap_attach(adapter);
576 #endif /* DEV_NETMAP */
577 INIT_DEBUGOUT("ixgbe_attach: end");
581 ixgbe_free_transmit_structures(adapter);
582 ixgbe_free_receive_structures(adapter);
584 if (adapter->ifp != NULL)
585 if_free(adapter->ifp);
586 ixgbe_free_pci_resources(adapter);
587 free(adapter->mta, M_DEVBUF);
591 /*********************************************************************
592 * Device removal routine
594 * The detach entry point is called when the driver is being removed.
595 * This routine stops the adapter and deallocates all the resources
596 * that were allocated for driver operation.
598 * return 0 on success, positive on failure
599 *********************************************************************/
602 ixgbe_detach(device_t dev)
604 struct adapter *adapter = device_get_softc(dev);
605 struct ix_queue *que = adapter->queues;
606 struct tx_ring *txr = adapter->tx_rings;
609 INIT_DEBUGOUT("ixgbe_detach: begin");
611 /* Make sure VLANS are not using driver */
612 if (adapter->ifp->if_vlantrunk != NULL) {
613 device_printf(dev,"Vlan in use, detach first\n");
617 /* Stop the adapter */
618 IXGBE_CORE_LOCK(adapter);
619 ixgbe_setup_low_power_mode(adapter);
620 IXGBE_CORE_UNLOCK(adapter);
622 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
624 #ifndef IXGBE_LEGACY_TX
625 taskqueue_drain(que->tq, &txr->txq_task);
627 taskqueue_drain(que->tq, &que->que_task);
628 taskqueue_free(que->tq);
632 /* Drain the Link queue */
634 taskqueue_drain(adapter->tq, &adapter->link_task);
635 taskqueue_drain(adapter->tq, &adapter->mod_task);
636 taskqueue_drain(adapter->tq, &adapter->msf_task);
637 taskqueue_drain(adapter->tq, &adapter->phy_task);
639 taskqueue_drain(adapter->tq, &adapter->fdir_task);
641 taskqueue_free(adapter->tq);
644 /* let hardware know driver is unloading */
645 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
646 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
647 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
649 /* Unregister VLAN events */
650 if (adapter->vlan_attach != NULL)
651 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
652 if (adapter->vlan_detach != NULL)
653 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
655 ether_ifdetach(adapter->ifp);
656 callout_drain(&adapter->timer);
658 netmap_detach(adapter->ifp);
659 #endif /* DEV_NETMAP */
660 ixgbe_free_pci_resources(adapter);
661 bus_generic_detach(dev);
662 if_free(adapter->ifp);
664 ixgbe_free_transmit_structures(adapter);
665 ixgbe_free_receive_structures(adapter);
666 free(adapter->mta, M_DEVBUF);
668 IXGBE_CORE_LOCK_DESTROY(adapter);
672 /*********************************************************************
674 * Shutdown entry point
676 **********************************************************************/
679 ixgbe_shutdown(device_t dev)
681 struct adapter *adapter = device_get_softc(dev);
684 INIT_DEBUGOUT("ixgbe_shutdown: begin");
686 IXGBE_CORE_LOCK(adapter);
687 error = ixgbe_setup_low_power_mode(adapter);
688 IXGBE_CORE_UNLOCK(adapter);
694 * Methods for going from:
695 * D0 -> D3: ixgbe_suspend
696 * D3 -> D0: ixgbe_resume
699 ixgbe_suspend(device_t dev)
701 struct adapter *adapter = device_get_softc(dev);
704 INIT_DEBUGOUT("ixgbe_suspend: begin");
706 IXGBE_CORE_LOCK(adapter);
708 error = ixgbe_setup_low_power_mode(adapter);
710 /* Save state and power down */
712 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
714 IXGBE_CORE_UNLOCK(adapter);
720 ixgbe_resume(device_t dev)
722 struct adapter *adapter = device_get_softc(dev);
723 struct ifnet *ifp = adapter->ifp;
724 struct ixgbe_hw *hw = &adapter->hw;
727 INIT_DEBUGOUT("ixgbe_resume: begin");
729 IXGBE_CORE_LOCK(adapter);
731 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
732 pci_restore_state(dev);
734 /* Read & clear WUS register */
735 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
737 device_printf(dev, "Woken up by (WUS): %#010x\n",
738 IXGBE_READ_REG(hw, IXGBE_WUS));
739 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
740 /* And clear WUFC until next low-power transition */
741 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
744 * Required after D3->D0 transition;
745 * will re-advertise all previous advertised speeds
747 if (ifp->if_flags & IFF_UP)
748 ixgbe_init_locked(adapter);
750 IXGBE_CORE_UNLOCK(adapter);
752 INIT_DEBUGOUT("ixgbe_resume: end");
757 /*********************************************************************
760 * ixgbe_ioctl is called when the user wants to configure the
763 * return 0 on success, positive on failure
764 **********************************************************************/
767 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
769 struct adapter *adapter = ifp->if_softc;
770 struct ifreq *ifr = (struct ifreq *) data;
771 #if defined(INET) || defined(INET6)
772 struct ifaddr *ifa = (struct ifaddr *)data;
773 bool avoid_reset = FALSE;
781 if (ifa->ifa_addr->sa_family == AF_INET)
785 if (ifa->ifa_addr->sa_family == AF_INET6)
788 #if defined(INET) || defined(INET6)
790 ** Calling init results in link renegotiation,
791 ** so we avoid doing it when possible.
794 ifp->if_flags |= IFF_UP;
795 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
797 if (!(ifp->if_flags & IFF_NOARP))
798 arp_ifinit(ifp, ifa);
800 error = ether_ioctl(ifp, command, data);
804 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
805 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
808 IXGBE_CORE_LOCK(adapter);
809 ifp->if_mtu = ifr->ifr_mtu;
810 adapter->max_frame_size =
811 ifp->if_mtu + IXGBE_MTU_HDR;
812 ixgbe_init_locked(adapter);
813 IXGBE_CORE_UNLOCK(adapter);
817 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
818 IXGBE_CORE_LOCK(adapter);
819 if (ifp->if_flags & IFF_UP) {
820 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
821 if ((ifp->if_flags ^ adapter->if_flags) &
822 (IFF_PROMISC | IFF_ALLMULTI)) {
823 ixgbe_set_promisc(adapter);
826 ixgbe_init_locked(adapter);
828 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
830 adapter->if_flags = ifp->if_flags;
831 IXGBE_CORE_UNLOCK(adapter);
835 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
836 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
837 IXGBE_CORE_LOCK(adapter);
838 ixgbe_disable_intr(adapter);
839 ixgbe_set_multi(adapter);
840 ixgbe_enable_intr(adapter);
841 IXGBE_CORE_UNLOCK(adapter);
846 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
847 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
851 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
852 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
853 if (mask & IFCAP_HWCSUM)
854 ifp->if_capenable ^= IFCAP_HWCSUM;
855 if (mask & IFCAP_TSO4)
856 ifp->if_capenable ^= IFCAP_TSO4;
857 if (mask & IFCAP_TSO6)
858 ifp->if_capenable ^= IFCAP_TSO6;
859 if (mask & IFCAP_LRO)
860 ifp->if_capenable ^= IFCAP_LRO;
861 if (mask & IFCAP_VLAN_HWTAGGING)
862 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
863 if (mask & IFCAP_VLAN_HWFILTER)
864 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
865 if (mask & IFCAP_VLAN_HWTSO)
866 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
867 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
868 IXGBE_CORE_LOCK(adapter);
869 ixgbe_init_locked(adapter);
870 IXGBE_CORE_UNLOCK(adapter);
872 VLAN_CAPABILITIES(ifp);
875 #if __FreeBSD_version >= 1100036
878 struct ixgbe_hw *hw = &adapter->hw;
881 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
882 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
885 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
889 if (i2c.len > sizeof(i2c.data)) {
894 for (i = 0; i < i2c.len; i++)
895 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
896 i2c.dev_addr, &i2c.data[i]);
897 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
902 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
903 error = ether_ioctl(ifp, command, data);
910 /*********************************************************************
913 * This routine is used in two ways. It is used by the stack as
914 * init entry point in network interface structure. It is also used
915 * by the driver as a hw/sw initialization routine to get to a
918 * return 0 on success, positive on failure
919 **********************************************************************/
920 #define IXGBE_MHADD_MFS_SHIFT 16
923 ixgbe_init_locked(struct adapter *adapter)
925 struct ifnet *ifp = adapter->ifp;
926 device_t dev = adapter->dev;
927 struct ixgbe_hw *hw = &adapter->hw;
928 u32 k, txdctl, mhadd, gpie;
931 mtx_assert(&adapter->core_mtx, MA_OWNED);
932 INIT_DEBUGOUT("ixgbe_init_locked: begin");
933 hw->adapter_stopped = FALSE;
934 ixgbe_stop_adapter(hw);
935 callout_stop(&adapter->timer);
937 /* reprogram the RAR[0] in case user changed it. */
938 ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
940 /* Get the latest mac address, User can use a LAA */
941 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
942 IXGBE_ETH_LENGTH_OF_ADDRESS);
943 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
944 hw->addr_ctrl.rar_used_count = 1;
946 /* Set the various hardware offload abilities */
947 ifp->if_hwassist = 0;
948 if (ifp->if_capenable & IFCAP_TSO)
949 ifp->if_hwassist |= CSUM_TSO;
950 if (ifp->if_capenable & IFCAP_TXCSUM) {
951 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
952 #if __FreeBSD_version >= 800000
953 if (hw->mac.type != ixgbe_mac_82598EB)
954 ifp->if_hwassist |= CSUM_SCTP;
958 /* Prepare transmit descriptors and buffers */
959 if (ixgbe_setup_transmit_structures(adapter)) {
960 device_printf(dev, "Could not setup transmit structures\n");
966 ixgbe_initialize_transmit_units(adapter);
968 /* Setup Multicast table */
969 ixgbe_set_multi(adapter);
972 ** Determine the correct mbuf pool
973 ** for doing jumbo frames
975 if (adapter->max_frame_size <= 2048)
976 adapter->rx_mbuf_sz = MCLBYTES;
977 else if (adapter->max_frame_size <= 4096)
978 adapter->rx_mbuf_sz = MJUMPAGESIZE;
979 else if (adapter->max_frame_size <= 9216)
980 adapter->rx_mbuf_sz = MJUM9BYTES;
982 adapter->rx_mbuf_sz = MJUM16BYTES;
984 /* Prepare receive descriptors and buffers */
985 if (ixgbe_setup_receive_structures(adapter)) {
986 device_printf(dev, "Could not setup receive structures\n");
991 /* Configure RX settings */
992 ixgbe_initialize_receive_units(adapter);
994 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
996 /* Enable Fan Failure Interrupt */
997 gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
999 /* Add for Module detection */
1000 if (hw->mac.type == ixgbe_mac_82599EB)
1001 gpie |= IXGBE_SDP2_GPIEN;
1004 * Thermal Failure Detection (X540)
1005 * Link Detection (X552)
1007 if (hw->mac.type == ixgbe_mac_X540 ||
1008 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1009 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1010 gpie |= IXGBE_SDP0_GPIEN_X540;
1012 if (adapter->msix > 1) {
1013 /* Enable Enhanced MSIX mode */
1014 gpie |= IXGBE_GPIE_MSIX_MODE;
1015 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1018 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1021 if (ifp->if_mtu > ETHERMTU) {
1022 /* aka IXGBE_MAXFRS on 82599 and newer */
1023 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1024 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1025 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1026 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1029 /* Now enable all the queues */
1030 for (int i = 0; i < adapter->num_queues; i++) {
1031 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1032 txdctl |= IXGBE_TXDCTL_ENABLE;
1033 /* Set WTHRESH to 8, burst writeback */
1034 txdctl |= (8 << 16);
1036 * When the internal queue falls below PTHRESH (32),
1037 * start prefetching as long as there are at least
1038 * HTHRESH (1) buffers ready. The values are taken
1039 * from the Intel linux driver 3.8.21.
1040 * Prefetching enables tx line rate even with 1 queue.
1042 txdctl |= (32 << 0) | (1 << 8);
1043 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1046 for (int i = 0; i < adapter->num_queues; i++) {
1047 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1048 if (hw->mac.type == ixgbe_mac_82598EB) {
1054 rxdctl &= ~0x3FFFFF;
1057 rxdctl |= IXGBE_RXDCTL_ENABLE;
1058 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1059 for (k = 0; k < 10; k++) {
1060 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1061 IXGBE_RXDCTL_ENABLE)
1069 * In netmap mode, we must preserve the buffers made
1070 * available to userspace before the if_init()
1071 * (this is true by default on the TX side, because
1072 * init makes all buffers available to userspace).
1074 * netmap_reset() and the device specific routines
1075 * (e.g. ixgbe_setup_receive_rings()) map these
1076 * buffers at the end of the NIC ring, so here we
1077 * must set the RDT (tail) register to make sure
1078 * they are not overwritten.
1080 * In this driver the NIC ring starts at RDH = 0,
1081 * RDT points to the last slot available for reception (?),
1082 * so RDT = num_rx_desc - 1 means the whole ring is available.
1084 if (ifp->if_capenable & IFCAP_NETMAP) {
1085 struct netmap_adapter *na = NA(adapter->ifp);
1086 struct netmap_kring *kring = &na->rx_rings[i];
1087 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1089 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1091 #endif /* DEV_NETMAP */
1092 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1095 /* Enable Receive engine */
1096 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1097 if (hw->mac.type == ixgbe_mac_82598EB)
1098 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1099 rxctrl |= IXGBE_RXCTRL_RXEN;
1100 ixgbe_enable_rx_dma(hw, rxctrl);
1102 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1104 /* Set up MSI/X routing */
1105 if (ixgbe_enable_msix) {
1106 ixgbe_configure_ivars(adapter);
1107 /* Set up auto-mask */
1108 if (hw->mac.type == ixgbe_mac_82598EB)
1109 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1111 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1112 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1114 } else { /* Simple settings for Legacy/MSI */
1115 ixgbe_set_ivar(adapter, 0, 0, 0);
1116 ixgbe_set_ivar(adapter, 0, 0, 1);
1117 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1121 /* Init Flow director */
1122 if (hw->mac.type != ixgbe_mac_82598EB) {
1123 u32 hdrm = 32 << fdir_pballoc;
1125 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1126 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1131 ** Check on any SFP devices that
1132 ** need to be kick-started
1134 if (hw->phy.type == ixgbe_phy_none) {
1135 int err = hw->phy.ops.identify(hw);
1136 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1138 "Unsupported SFP+ module type was detected.\n");
1143 /* Set moderation on the Link interrupt */
1144 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1146 /* Configure Energy Efficient Ethernet for supported devices */
1147 if (adapter->eee_support)
1148 ixgbe_setup_eee(hw, adapter->eee_enabled);
1150 /* Config/Enable Link */
1151 ixgbe_config_link(adapter);
1153 /* Hardware Packet Buffer & Flow Control setup */
1154 ixgbe_config_delay_values(adapter);
1156 /* Initialize the FC settings */
1159 /* Set up VLAN support and filter */
1160 ixgbe_setup_vlan_hw_support(adapter);
1162 /* Setup DMA Coalescing */
1163 ixgbe_config_dmac(adapter);
1165 /* And now turn on interrupts */
1166 ixgbe_enable_intr(adapter);
1168 /* Now inform the stack we're ready */
1169 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1175 ixgbe_init(void *arg)
1177 struct adapter *adapter = arg;
1179 IXGBE_CORE_LOCK(adapter);
1180 ixgbe_init_locked(adapter);
1181 IXGBE_CORE_UNLOCK(adapter);
1186 ixgbe_config_delay_values(struct adapter *adapter)
1188 struct ixgbe_hw *hw = &adapter->hw;
1189 u32 rxpb, frame, size, tmp;
1191 frame = adapter->max_frame_size;
1193 /* Calculate High Water */
1194 switch (hw->mac.type) {
1195 case ixgbe_mac_X540:
1196 case ixgbe_mac_X550:
1197 case ixgbe_mac_X550EM_x:
1198 tmp = IXGBE_DV_X540(frame, frame);
1201 tmp = IXGBE_DV(frame, frame);
1204 size = IXGBE_BT2KB(tmp);
1205 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1206 hw->fc.high_water[0] = rxpb - size;
1208 /* Now calculate Low Water */
1209 switch (hw->mac.type) {
1210 case ixgbe_mac_X540:
1211 case ixgbe_mac_X550:
1212 case ixgbe_mac_X550EM_x:
1213 tmp = IXGBE_LOW_DV_X540(frame);
1216 tmp = IXGBE_LOW_DV(frame);
1219 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1221 hw->fc.requested_mode = adapter->fc;
1222 hw->fc.pause_time = IXGBE_FC_PAUSE;
1223 hw->fc.send_xon = TRUE;
1228 ** MSIX Interrupt Handlers and Tasklets
1233 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1235 struct ixgbe_hw *hw = &adapter->hw;
1236 u64 queue = (u64)(1 << vector);
1239 if (hw->mac.type == ixgbe_mac_82598EB) {
1240 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1241 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1243 mask = (queue & 0xFFFFFFFF);
1245 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1246 mask = (queue >> 32);
1248 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1253 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1255 struct ixgbe_hw *hw = &adapter->hw;
1256 u64 queue = (u64)(1 << vector);
1259 if (hw->mac.type == ixgbe_mac_82598EB) {
1260 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1261 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1263 mask = (queue & 0xFFFFFFFF);
1265 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1266 mask = (queue >> 32);
1268 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1273 ixgbe_handle_que(void *context, int pending)
1275 struct ix_queue *que = context;
1276 struct adapter *adapter = que->adapter;
1277 struct tx_ring *txr = que->txr;
1278 struct ifnet *ifp = adapter->ifp;
1281 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1282 more = ixgbe_rxeof(que);
1285 #ifndef IXGBE_LEGACY_TX
1286 if (!drbr_empty(ifp, txr->br))
1287 ixgbe_mq_start_locked(ifp, txr);
1289 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1290 ixgbe_start_locked(txr, ifp);
1292 IXGBE_TX_UNLOCK(txr);
1295 /* Reenable this interrupt */
1296 if (que->res != NULL)
1297 ixgbe_enable_queue(adapter, que->msix);
1299 ixgbe_enable_intr(adapter);
1304 /*********************************************************************
1306 * Legacy Interrupt Service routine
1308 **********************************************************************/
1311 ixgbe_legacy_irq(void *arg)
1313 struct ix_queue *que = arg;
1314 struct adapter *adapter = que->adapter;
1315 struct ixgbe_hw *hw = &adapter->hw;
1316 struct ifnet *ifp = adapter->ifp;
1317 struct tx_ring *txr = adapter->tx_rings;
1322 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1325 if (reg_eicr == 0) {
1326 ixgbe_enable_intr(adapter);
1330 more = ixgbe_rxeof(que);
1334 #ifdef IXGBE_LEGACY_TX
1335 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1336 ixgbe_start_locked(txr, ifp);
1338 if (!drbr_empty(ifp, txr->br))
1339 ixgbe_mq_start_locked(ifp, txr);
1341 IXGBE_TX_UNLOCK(txr);
1343 /* Check for fan failure */
1344 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1345 (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1346 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1347 "REPLACE IMMEDIATELY!!\n");
1348 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1351 /* Link status change */
1352 if (reg_eicr & IXGBE_EICR_LSC)
1353 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1355 /* External PHY interrupt */
1356 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1357 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1358 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1361 taskqueue_enqueue(que->tq, &que->que_task);
1363 ixgbe_enable_intr(adapter);
1368 /*********************************************************************
1370 * MSIX Queue Interrupt Service routine
1372 **********************************************************************/
1374 ixgbe_msix_que(void *arg)
1376 struct ix_queue *que = arg;
1377 struct adapter *adapter = que->adapter;
1378 struct ifnet *ifp = adapter->ifp;
1379 struct tx_ring *txr = que->txr;
1380 struct rx_ring *rxr = que->rxr;
1384 /* Protect against spurious interrupts */
1385 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1388 ixgbe_disable_queue(adapter, que->msix);
1391 more = ixgbe_rxeof(que);
1395 #ifdef IXGBE_LEGACY_TX
1396 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1397 ixgbe_start_locked(txr, ifp);
1399 if (!drbr_empty(ifp, txr->br))
1400 ixgbe_mq_start_locked(ifp, txr);
1402 IXGBE_TX_UNLOCK(txr);
1406 if (ixgbe_enable_aim == FALSE)
1409 ** Do Adaptive Interrupt Moderation:
1410 ** - Write out last calculated setting
1411 ** - Calculate based on average size over
1412 ** the last interval.
1414 if (que->eitr_setting)
1415 IXGBE_WRITE_REG(&adapter->hw,
1416 IXGBE_EITR(que->msix), que->eitr_setting);
1418 que->eitr_setting = 0;
1420 /* Idle, do nothing */
1421 if ((txr->bytes == 0) && (rxr->bytes == 0))
1424 if ((txr->bytes) && (txr->packets))
1425 newitr = txr->bytes/txr->packets;
1426 if ((rxr->bytes) && (rxr->packets))
1427 newitr = max(newitr,
1428 (rxr->bytes / rxr->packets));
1429 newitr += 24; /* account for hardware frame, crc */
1431 /* set an upper boundary */
1432 newitr = min(newitr, 3000);
1434 /* Be nice to the mid range */
1435 if ((newitr > 300) && (newitr < 1200))
1436 newitr = (newitr / 3);
1438 newitr = (newitr / 2);
1440 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1441 newitr |= newitr << 16;
1443 newitr |= IXGBE_EITR_CNT_WDIS;
1445 /* save for next interrupt */
1446 que->eitr_setting = newitr;
1456 taskqueue_enqueue(que->tq, &que->que_task);
1458 ixgbe_enable_queue(adapter, que->msix);
1464 ixgbe_msix_link(void *arg)
1466 struct adapter *adapter = arg;
1467 struct ixgbe_hw *hw = &adapter->hw;
1468 u32 reg_eicr, mod_mask;
1470 ++adapter->link_irq;
1472 /* First get the cause */
1473 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1474 /* Be sure the queue bits are not cleared */
1475 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1476 /* Clear interrupt with write */
1477 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1479 /* Link status change */
1480 if (reg_eicr & IXGBE_EICR_LSC)
1481 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1483 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1485 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1486 /* This is probably overkill :) */
1487 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1489 /* Disable the interrupt */
1490 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1491 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1494 if (reg_eicr & IXGBE_EICR_ECC) {
1495 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1496 "Please Reboot!!\n");
1497 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1500 /* Check for over temp condition */
1501 if (reg_eicr & IXGBE_EICR_TS) {
1502 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1503 "PHY IS SHUT DOWN!!\n");
1504 device_printf(adapter->dev, "System shutdown required!\n");
1505 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1509 /* Pluggable optics-related interrupt */
1510 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1511 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1513 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1515 if (ixgbe_is_sfp(hw)) {
1516 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1517 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1518 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1519 } else if (reg_eicr & mod_mask) {
1520 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1521 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1525 /* Check for fan failure */
1526 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1527 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1528 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1529 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1530 "REPLACE IMMEDIATELY!!\n");
1533 /* External PHY interrupt */
1534 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1535 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1536 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1537 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1540 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1544 /*********************************************************************
1546 * Media Ioctl callback
1548 * This routine is called whenever the user queries the status of
1549 * the interface using ifconfig.
1551 **********************************************************************/
1553 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1555 struct adapter *adapter = ifp->if_softc;
1556 struct ixgbe_hw *hw = &adapter->hw;
1559 INIT_DEBUGOUT("ixgbe_media_status: begin");
1560 IXGBE_CORE_LOCK(adapter);
1561 ixgbe_update_link_status(adapter);
1563 ifmr->ifm_status = IFM_AVALID;
1564 ifmr->ifm_active = IFM_ETHER;
1566 if (!adapter->link_active) {
1567 IXGBE_CORE_UNLOCK(adapter);
1571 ifmr->ifm_status |= IFM_ACTIVE;
1572 layer = ixgbe_get_supported_physical_layer(hw);
1574 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1575 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1576 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1577 switch (adapter->link_speed) {
1578 case IXGBE_LINK_SPEED_10GB_FULL:
1579 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1581 case IXGBE_LINK_SPEED_1GB_FULL:
1582 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1584 case IXGBE_LINK_SPEED_100_FULL:
1585 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1588 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1589 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1590 switch (adapter->link_speed) {
1591 case IXGBE_LINK_SPEED_10GB_FULL:
1592 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1595 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1596 switch (adapter->link_speed) {
1597 case IXGBE_LINK_SPEED_10GB_FULL:
1598 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1600 case IXGBE_LINK_SPEED_1GB_FULL:
1601 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1604 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1605 switch (adapter->link_speed) {
1606 case IXGBE_LINK_SPEED_10GB_FULL:
1607 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1609 case IXGBE_LINK_SPEED_1GB_FULL:
1610 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1613 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1614 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1615 switch (adapter->link_speed) {
1616 case IXGBE_LINK_SPEED_10GB_FULL:
1617 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1619 case IXGBE_LINK_SPEED_1GB_FULL:
1620 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1623 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1624 switch (adapter->link_speed) {
1625 case IXGBE_LINK_SPEED_10GB_FULL:
1626 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1630 ** XXX: These need to use the proper media types once
1633 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1634 switch (adapter->link_speed) {
1635 case IXGBE_LINK_SPEED_10GB_FULL:
1636 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1638 case IXGBE_LINK_SPEED_2_5GB_FULL:
1639 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1641 case IXGBE_LINK_SPEED_1GB_FULL:
1642 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1645 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1646 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1647 switch (adapter->link_speed) {
1648 case IXGBE_LINK_SPEED_10GB_FULL:
1649 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1651 case IXGBE_LINK_SPEED_2_5GB_FULL:
1652 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1654 case IXGBE_LINK_SPEED_1GB_FULL:
1655 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1659 /* If nothing is recognized... */
1660 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1661 ifmr->ifm_active |= IFM_UNKNOWN;
1663 #if __FreeBSD_version >= 900025
1664 /* Display current flow control setting used on link */
1665 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1666 hw->fc.current_mode == ixgbe_fc_full)
1667 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1668 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1669 hw->fc.current_mode == ixgbe_fc_full)
1670 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1673 IXGBE_CORE_UNLOCK(adapter);
1678 /*********************************************************************
1680 * Media Ioctl callback
1682 * This routine is called when the user changes speed/duplex using
1683 * media/mediopt option with ifconfig.
1685 **********************************************************************/
1687 ixgbe_media_change(struct ifnet * ifp)
1689 struct adapter *adapter = ifp->if_softc;
1690 struct ifmedia *ifm = &adapter->media;
1691 struct ixgbe_hw *hw = &adapter->hw;
1692 ixgbe_link_speed speed = 0;
1694 INIT_DEBUGOUT("ixgbe_media_change: begin");
1696 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1699 if (hw->phy.media_type == ixgbe_media_type_backplane)
1703 ** We don't actually need to check against the supported
1704 ** media types of the adapter; ifmedia will take care of
1707 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1710 speed |= IXGBE_LINK_SPEED_100_FULL;
1712 case IFM_10G_SR: /* KR, too */
1714 case IFM_10G_CX4: /* KX4 */
1715 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1716 case IFM_10G_TWINAX:
1717 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1720 speed |= IXGBE_LINK_SPEED_100_FULL;
1723 case IFM_1000_CX: /* KX */
1724 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1727 speed |= IXGBE_LINK_SPEED_100_FULL;
1733 hw->mac.autotry_restart = TRUE;
1734 hw->mac.ops.setup_link(hw, speed, TRUE);
1735 adapter->advertise =
1736 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1737 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1738 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1743 device_printf(adapter->dev, "Invalid media type!\n");
1748 ixgbe_set_promisc(struct adapter *adapter)
1751 struct ifnet *ifp = adapter->ifp;
1754 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1755 reg_rctl &= (~IXGBE_FCTRL_UPE);
1756 if (ifp->if_flags & IFF_ALLMULTI)
1757 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1759 struct ifmultiaddr *ifma;
1760 #if __FreeBSD_version < 800000
1763 if_maddr_rlock(ifp);
1765 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1766 if (ifma->ifma_addr->sa_family != AF_LINK)
1768 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1772 #if __FreeBSD_version < 800000
1773 IF_ADDR_UNLOCK(ifp);
1775 if_maddr_runlock(ifp);
1778 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1779 reg_rctl &= (~IXGBE_FCTRL_MPE);
1780 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1782 if (ifp->if_flags & IFF_PROMISC) {
1783 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1784 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1785 } else if (ifp->if_flags & IFF_ALLMULTI) {
1786 reg_rctl |= IXGBE_FCTRL_MPE;
1787 reg_rctl &= ~IXGBE_FCTRL_UPE;
1788 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1794 /*********************************************************************
1797 * This routine is called whenever multicast address list is updated.
1799 **********************************************************************/
1800 #define IXGBE_RAR_ENTRIES 16
1803 ixgbe_set_multi(struct adapter *adapter)
1808 struct ifmultiaddr *ifma;
1810 struct ifnet *ifp = adapter->ifp;
1812 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1815 bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1816 MAX_NUM_MULTICAST_ADDRESSES);
1818 #if __FreeBSD_version < 800000
1821 if_maddr_rlock(ifp);
1823 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1824 if (ifma->ifma_addr->sa_family != AF_LINK)
1826 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1828 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1829 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1830 IXGBE_ETH_LENGTH_OF_ADDRESS);
1833 #if __FreeBSD_version < 800000
1834 IF_ADDR_UNLOCK(ifp);
1836 if_maddr_runlock(ifp);
1839 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1840 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1841 if (ifp->if_flags & IFF_PROMISC)
1842 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1843 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1844 ifp->if_flags & IFF_ALLMULTI) {
1845 fctrl |= IXGBE_FCTRL_MPE;
1846 fctrl &= ~IXGBE_FCTRL_UPE;
1848 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1850 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1852 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1854 ixgbe_update_mc_addr_list(&adapter->hw,
1855 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1862 * This is an iterator function now needed by the multicast
1863 * shared code. It simply feeds the shared code routine the
1864 * addresses in the array of ixgbe_set_multi() one by one.
1867 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1869 u8 *addr = *update_ptr;
1873 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1874 *update_ptr = newptr;
1879 /*********************************************************************
1882 * This routine checks for link status,updates statistics,
1883 * and runs the watchdog check.
1885 **********************************************************************/
1888 ixgbe_local_timer(void *arg)
1890 struct adapter *adapter = arg;
1891 device_t dev = adapter->dev;
1892 struct ix_queue *que = adapter->queues;
1896 mtx_assert(&adapter->core_mtx, MA_OWNED);
1898 /* Check for pluggable optics */
1899 if (adapter->sfp_probe)
1900 if (!ixgbe_sfp_probe(adapter))
1901 goto out; /* Nothing to do */
1903 ixgbe_update_link_status(adapter);
1904 ixgbe_update_stats_counters(adapter);
1907 ** Check the TX queues status
1908 ** - mark hung queues so we don't schedule on them
1909 ** - watchdog only if all queues show hung
1911 for (int i = 0; i < adapter->num_queues; i++, que++) {
1912 /* Keep track of queues with work for soft irq */
1914 queues |= ((u64)1 << que->me);
1916 ** Each time txeof runs without cleaning, but there
1917 ** are uncleaned descriptors it increments busy. If
1918 ** we get to the MAX we declare it hung.
1920 if (que->busy == IXGBE_QUEUE_HUNG) {
1922 /* Mark the queue as inactive */
1923 adapter->active_queues &= ~((u64)1 << que->me);
1926 /* Check if we've come back from hung */
1927 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1928 adapter->active_queues |= ((u64)1 << que->me);
1930 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1931 device_printf(dev,"Warning queue %d "
1932 "appears to be hung!\n", i);
1933 que->txr->busy = IXGBE_QUEUE_HUNG;
1939 /* Only truly watchdog if all queues show hung */
1940 if (hung == adapter->num_queues)
1942 else if (queues != 0) { /* Force an IRQ on queues with work */
1943 ixgbe_rearm_queues(adapter, queues);
1947 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1951 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1952 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1953 adapter->watchdog_events++;
1954 ixgbe_init_locked(adapter);
1958 ** Note: this routine updates the OS on the link state
1959 ** the real check of the hardware only happens with
1960 ** a link interrupt.
1963 ixgbe_update_link_status(struct adapter *adapter)
1965 struct ifnet *ifp = adapter->ifp;
1966 device_t dev = adapter->dev;
1968 if (adapter->link_up){
1969 if (adapter->link_active == FALSE) {
1971 device_printf(dev,"Link is up %d Gbps %s \n",
1972 ((adapter->link_speed == 128)? 10:1),
1974 adapter->link_active = TRUE;
1975 /* Update any Flow Control changes */
1976 ixgbe_fc_enable(&adapter->hw);
1977 /* Update DMA coalescing config */
1978 ixgbe_config_dmac(adapter);
1979 if_link_state_change(ifp, LINK_STATE_UP);
1981 } else { /* Link down */
1982 if (adapter->link_active == TRUE) {
1984 device_printf(dev,"Link is Down\n");
1985 if_link_state_change(ifp, LINK_STATE_DOWN);
1986 adapter->link_active = FALSE;
1994 /*********************************************************************
1996 * This routine disables all traffic on the adapter by issuing a
1997 * global reset on the MAC and deallocates TX/RX buffers.
1999 **********************************************************************/
2002 ixgbe_stop(void *arg)
2005 struct adapter *adapter = arg;
2006 struct ixgbe_hw *hw = &adapter->hw;
2009 mtx_assert(&adapter->core_mtx, MA_OWNED);
2011 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2012 ixgbe_disable_intr(adapter);
2013 callout_stop(&adapter->timer);
2015 /* Let the stack know...*/
2016 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2019 hw->adapter_stopped = FALSE;
2020 ixgbe_stop_adapter(hw);
2021 if (hw->mac.type == ixgbe_mac_82599EB)
2022 ixgbe_stop_mac_link_on_d3_82599(hw);
2023 /* Turn off the laser - noop with no optics */
2024 ixgbe_disable_tx_laser(hw);
2026 /* Update the stack */
2027 adapter->link_up = FALSE;
2028 ixgbe_update_link_status(adapter);
2030 /* reprogram the RAR[0] in case user changed it. */
2031 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2037 /*********************************************************************
2039 * Determine hardware revision.
2041 **********************************************************************/
2043 ixgbe_identify_hardware(struct adapter *adapter)
2045 device_t dev = adapter->dev;
2046 struct ixgbe_hw *hw = &adapter->hw;
2048 /* Save off the information about this board */
2049 hw->vendor_id = pci_get_vendor(dev);
2050 hw->device_id = pci_get_device(dev);
2051 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2052 hw->subsystem_vendor_id =
2053 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2054 hw->subsystem_device_id =
2055 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2058 ** Make sure BUSMASTER is set
2060 pci_enable_busmaster(dev);
2062 /* We need this here to set the num_segs below */
2063 ixgbe_set_mac_type(hw);
2065 /* Pick up the 82599 settings */
2066 if (hw->mac.type != ixgbe_mac_82598EB) {
2067 hw->phy.smart_speed = ixgbe_smart_speed;
2068 adapter->num_segs = IXGBE_82599_SCATTER;
2070 adapter->num_segs = IXGBE_82598_SCATTER;
2075 /*********************************************************************
2077 * Determine optic type
2079 **********************************************************************/
2081 ixgbe_setup_optics(struct adapter *adapter)
2083 struct ixgbe_hw *hw = &adapter->hw;
2086 layer = ixgbe_get_supported_physical_layer(hw);
2088 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2089 adapter->optics = IFM_10G_T;
2093 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2094 adapter->optics = IFM_1000_T;
2098 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2099 adapter->optics = IFM_1000_SX;
2103 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2104 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2105 adapter->optics = IFM_10G_LR;
2109 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2110 adapter->optics = IFM_10G_SR;
2114 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2115 adapter->optics = IFM_10G_TWINAX;
2119 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2120 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2121 adapter->optics = IFM_10G_CX4;
2125 /* If we get here just set the default */
2126 adapter->optics = IFM_ETHER | IFM_AUTO;
2130 /*********************************************************************
2132 * Setup the Legacy or MSI Interrupt handler
2134 **********************************************************************/
2136 ixgbe_allocate_legacy(struct adapter *adapter)
2138 device_t dev = adapter->dev;
2139 struct ix_queue *que = adapter->queues;
2140 #ifndef IXGBE_LEGACY_TX
2141 struct tx_ring *txr = adapter->tx_rings;
2146 if (adapter->msix == 1)
2149 /* We allocate a single interrupt resource */
2150 adapter->res = bus_alloc_resource_any(dev,
2151 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2152 if (adapter->res == NULL) {
2153 device_printf(dev, "Unable to allocate bus resource: "
2159 * Try allocating a fast interrupt and the associated deferred
2160 * processing contexts.
2162 #ifndef IXGBE_LEGACY_TX
2163 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2165 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2166 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2167 taskqueue_thread_enqueue, &que->tq);
2168 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2169 device_get_nameunit(adapter->dev));
2171 /* Tasklets for Link, SFP and Multispeed Fiber */
2172 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2173 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2174 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2175 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2177 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2179 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2180 taskqueue_thread_enqueue, &adapter->tq);
2181 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2182 device_get_nameunit(adapter->dev));
2184 if ((error = bus_setup_intr(dev, adapter->res,
2185 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2186 que, &adapter->tag)) != 0) {
2187 device_printf(dev, "Failed to register fast interrupt "
2188 "handler: %d\n", error);
2189 taskqueue_free(que->tq);
2190 taskqueue_free(adapter->tq);
2195 /* For simplicity in the handlers */
2196 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2202 /*********************************************************************
2204 * Setup MSIX Interrupt resources and handlers
2206 **********************************************************************/
2208 ixgbe_allocate_msix(struct adapter *adapter)
2210 device_t dev = adapter->dev;
2211 struct ix_queue *que = adapter->queues;
2212 struct tx_ring *txr = adapter->tx_rings;
2213 int error, rid, vector = 0;
2216 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2218 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2219 RF_SHAREABLE | RF_ACTIVE);
2220 if (que->res == NULL) {
2221 device_printf(dev,"Unable to allocate"
2222 " bus resource: que interrupt [%d]\n", vector);
2225 /* Set the handler function */
2226 error = bus_setup_intr(dev, que->res,
2227 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2228 ixgbe_msix_que, que, &que->tag);
2231 device_printf(dev, "Failed to register QUE handler");
2234 #if __FreeBSD_version >= 800504
2235 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2238 adapter->active_queues |= (u64)(1 << que->msix);
2240 * Bind the msix vector, and thus the
2241 * rings to the corresponding cpu.
2243 * This just happens to match the default RSS round-robin
2244 * bucket -> queue -> CPU allocation.
2246 if (adapter->num_queues > 1)
2249 if (adapter->num_queues > 1)
2250 bus_bind_intr(dev, que->res, cpu_id);
2252 #ifndef IXGBE_LEGACY_TX
2253 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2255 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2256 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2257 taskqueue_thread_enqueue, &que->tq);
2258 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2259 device_get_nameunit(adapter->dev));
2264 adapter->res = bus_alloc_resource_any(dev,
2265 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2266 if (!adapter->res) {
2267 device_printf(dev,"Unable to allocate"
2268 " bus resource: Link interrupt [%d]\n", rid);
2271 /* Set the link handler function */
2272 error = bus_setup_intr(dev, adapter->res,
2273 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2274 ixgbe_msix_link, adapter, &adapter->tag);
2276 adapter->res = NULL;
2277 device_printf(dev, "Failed to register LINK handler");
2280 #if __FreeBSD_version >= 800504
2281 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2283 adapter->vector = vector;
2284 /* Tasklets for Link, SFP and Multispeed Fiber */
2285 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2286 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2287 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2288 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2290 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2292 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2293 taskqueue_thread_enqueue, &adapter->tq);
2294 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2295 device_get_nameunit(adapter->dev));
2301 * Setup Either MSI/X or MSI
2304 ixgbe_setup_msix(struct adapter *adapter)
2306 device_t dev = adapter->dev;
2307 int rid, want, queues, msgs;
2309 /* Override by tuneable */
2310 if (ixgbe_enable_msix == 0)
2313 /* First try MSI/X */
2314 msgs = pci_msix_count(dev);
2317 rid = PCIR_BAR(MSIX_82598_BAR);
2318 adapter->msix_mem = bus_alloc_resource_any(dev,
2319 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2320 if (adapter->msix_mem == NULL) {
2321 rid += 4; /* 82599 maps in higher BAR */
2322 adapter->msix_mem = bus_alloc_resource_any(dev,
2323 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2325 if (adapter->msix_mem == NULL) {
2326 /* May not be enabled */
2327 device_printf(adapter->dev,
2328 "Unable to map MSIX table \n");
2332 /* Figure out a reasonable auto config value */
2333 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2335 if (ixgbe_num_queues != 0)
2336 queues = ixgbe_num_queues;
2337 /* Set max queues to 8 when autoconfiguring */
2338 else if ((ixgbe_num_queues == 0) && (queues > 8))
2341 /* reflect correct sysctl value */
2342 ixgbe_num_queues = queues;
2345 ** Want one vector (RX/TX pair) per queue
2346 ** plus an additional for Link.
2352 device_printf(adapter->dev,
2353 "MSIX Configuration Problem, "
2354 "%d vectors but %d queues wanted!\n",
2358 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2359 device_printf(adapter->dev,
2360 "Using MSIX interrupts with %d vectors\n", msgs);
2361 adapter->num_queues = queues;
2365 ** If MSIX alloc failed or provided us with
2366 ** less than needed, free and fall through to MSI
2368 pci_release_msi(dev);
2371 if (adapter->msix_mem != NULL) {
2372 bus_release_resource(dev, SYS_RES_MEMORY,
2373 rid, adapter->msix_mem);
2374 adapter->msix_mem = NULL;
2377 if (pci_alloc_msi(dev, &msgs) == 0) {
2378 device_printf(adapter->dev,"Using an MSI interrupt\n");
2381 device_printf(adapter->dev,"Using a Legacy interrupt\n");
2387 ixgbe_allocate_pci_resources(struct adapter *adapter)
2390 device_t dev = adapter->dev;
2393 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2396 if (!(adapter->pci_mem)) {
2397 device_printf(dev,"Unable to allocate bus resource: memory\n");
2401 adapter->osdep.mem_bus_space_tag =
2402 rman_get_bustag(adapter->pci_mem);
2403 adapter->osdep.mem_bus_space_handle =
2404 rman_get_bushandle(adapter->pci_mem);
2405 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2407 /* Legacy defaults */
2408 adapter->num_queues = 1;
2409 adapter->hw.back = &adapter->osdep;
2412 ** Now setup MSI or MSI/X, should
2413 ** return us the number of supported
2414 ** vectors. (Will be 1 for MSI)
2416 adapter->msix = ixgbe_setup_msix(adapter);
2421 ixgbe_free_pci_resources(struct adapter * adapter)
2423 struct ix_queue *que = adapter->queues;
2424 device_t dev = adapter->dev;
2427 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2428 memrid = PCIR_BAR(MSIX_82598_BAR);
2430 memrid = PCIR_BAR(MSIX_82599_BAR);
2433 ** There is a slight possibility of a failure mode
2434 ** in attach that will result in entering this function
2435 ** before interrupt resources have been initialized, and
2436 ** in that case we do not want to execute the loops below
2437 ** We can detect this reliably by the state of the adapter
2440 if (adapter->res == NULL)
2444 ** Release all msix queue resources:
2446 for (int i = 0; i < adapter->num_queues; i++, que++) {
2447 rid = que->msix + 1;
2448 if (que->tag != NULL) {
2449 bus_teardown_intr(dev, que->res, que->tag);
2452 if (que->res != NULL)
2453 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2457 /* Clean the Legacy or Link interrupt last */
2458 if (adapter->vector) /* we are doing MSIX */
2459 rid = adapter->vector + 1;
2461 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2463 if (adapter->tag != NULL) {
2464 bus_teardown_intr(dev, adapter->res, adapter->tag);
2465 adapter->tag = NULL;
2467 if (adapter->res != NULL)
2468 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2472 pci_release_msi(dev);
2474 if (adapter->msix_mem != NULL)
2475 bus_release_resource(dev, SYS_RES_MEMORY,
2476 memrid, adapter->msix_mem);
2478 if (adapter->pci_mem != NULL)
2479 bus_release_resource(dev, SYS_RES_MEMORY,
2480 PCIR_BAR(0), adapter->pci_mem);
2485 /*********************************************************************
2487 * Setup networking device structure and register an interface.
2489 **********************************************************************/
2491 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2495 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2497 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2499 device_printf(dev, "can not allocate ifnet structure\n");
2502 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2503 if_initbaudrate(ifp, IF_Gbps(10));
2504 ifp->if_init = ixgbe_init;
2505 ifp->if_softc = adapter;
2506 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2507 ifp->if_ioctl = ixgbe_ioctl;
2508 /* TSO parameters */
2509 ifp->if_hw_tsomax = 65518;
2510 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2511 ifp->if_hw_tsomaxsegsize = 2048;
2512 #ifndef IXGBE_LEGACY_TX
2513 ifp->if_transmit = ixgbe_mq_start;
2514 ifp->if_qflush = ixgbe_qflush;
2516 ifp->if_start = ixgbe_start;
2517 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2518 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2519 IFQ_SET_READY(&ifp->if_snd);
2522 ether_ifattach(ifp, adapter->hw.mac.addr);
2524 adapter->max_frame_size =
2525 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2528 * Tell the upper layer(s) we support long frames.
2530 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2532 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2533 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2534 ifp->if_capabilities |= IFCAP_LRO;
2535 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2539 ifp->if_capenable = ifp->if_capabilities;
2542 ** Don't turn this on by default, if vlans are
2543 ** created on another pseudo device (eg. lagg)
2544 ** then vlan events are not passed thru, breaking
2545 ** operation, but with HW FILTER off it works. If
2546 ** using vlans directly on the ixgbe driver you can
2547 ** enable this and get full hardware tag filtering.
2549 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2552 * Specify the media types supported by this adapter and register
2553 * callbacks to update media and link information
2555 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2556 ixgbe_media_status);
2558 ixgbe_add_media_types(adapter);
2560 /* Autoselect media by default */
2561 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2567 ixgbe_add_media_types(struct adapter *adapter)
2569 struct ixgbe_hw *hw = &adapter->hw;
2570 device_t dev = adapter->dev;
2573 layer = ixgbe_get_supported_physical_layer(hw);
2575 /* Media types with matching FreeBSD media defines */
2576 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2577 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2578 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2579 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2580 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2581 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2583 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2584 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2585 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2587 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2588 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2589 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2590 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2591 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2592 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2593 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2594 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2597 ** Other (no matching FreeBSD media type):
2598 ** To workaround this, we'll assign these completely
2599 ** inappropriate media types.
2601 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2602 device_printf(dev, "Media supported: 10GbaseKR\n");
2603 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2604 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2606 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2607 device_printf(dev, "Media supported: 10GbaseKX4\n");
2608 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2609 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2611 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2612 device_printf(dev, "Media supported: 1000baseKX\n");
2613 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2614 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2616 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2617 /* Someday, someone will care about you... */
2618 device_printf(dev, "Media supported: 1000baseBX\n");
2621 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2622 ifmedia_add(&adapter->media,
2623 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2624 ifmedia_add(&adapter->media,
2625 IFM_ETHER | IFM_1000_T, 0, NULL);
2628 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2632 ixgbe_config_link(struct adapter *adapter)
2634 struct ixgbe_hw *hw = &adapter->hw;
2635 u32 autoneg, err = 0;
2636 bool sfp, negotiate;
2638 sfp = ixgbe_is_sfp(hw);
2641 if (hw->phy.multispeed_fiber) {
2642 hw->mac.ops.setup_sfp(hw);
2643 ixgbe_enable_tx_laser(hw);
2644 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2646 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2648 if (hw->mac.ops.check_link)
2649 err = ixgbe_check_link(hw, &adapter->link_speed,
2650 &adapter->link_up, FALSE);
2653 autoneg = hw->phy.autoneg_advertised;
2654 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2655 err = hw->mac.ops.get_link_capabilities(hw,
2656 &autoneg, &negotiate);
2659 if (hw->mac.ops.setup_link)
2660 err = hw->mac.ops.setup_link(hw,
2661 autoneg, adapter->link_up);
2668 /*********************************************************************
2670 * Enable transmit units.
2672 **********************************************************************/
2674 ixgbe_initialize_transmit_units(struct adapter *adapter)
2676 struct tx_ring *txr = adapter->tx_rings;
2677 struct ixgbe_hw *hw = &adapter->hw;
2679 /* Setup the Base and Length of the Tx Descriptor Ring */
2681 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2682 u64 tdba = txr->txdma.dma_paddr;
2685 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2686 (tdba & 0x00000000ffffffffULL));
2687 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2688 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2689 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2691 /* Setup the HW Tx Head and Tail descriptor pointers */
2692 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2693 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2695 /* Cache the tail address */
2696 txr->tail = IXGBE_TDT(txr->me);
2698 /* Set the processing limit */
2699 txr->process_limit = ixgbe_tx_process_limit;
2701 /* Disable Head Writeback */
2702 switch (hw->mac.type) {
2703 case ixgbe_mac_82598EB:
2704 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2706 case ixgbe_mac_82599EB:
2707 case ixgbe_mac_X540:
2709 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2712 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2713 switch (hw->mac.type) {
2714 case ixgbe_mac_82598EB:
2715 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2717 case ixgbe_mac_82599EB:
2718 case ixgbe_mac_X540:
2720 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2726 if (hw->mac.type != ixgbe_mac_82598EB) {
2727 u32 dmatxctl, rttdcs;
2728 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2729 dmatxctl |= IXGBE_DMATXCTL_TE;
2730 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2731 /* Disable arbiter to set MTQC */
2732 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2733 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2734 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2735 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2736 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2737 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2744 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2746 struct ixgbe_hw *hw = &adapter->hw;
2748 int i, j, queue_id, table_size;
2750 uint32_t rss_key[10];
2756 /* set up random bits */
2757 arc4rand(&rss_key, sizeof(rss_key), 0);
2759 /* Set multiplier for RETA setup and table size based on MAC */
2762 switch (adapter->hw.mac.type) {
2763 case ixgbe_mac_82598EB:
2766 case ixgbe_mac_X550:
2767 case ixgbe_mac_X550EM_x:
2774 /* Set up the redirection table */
2775 for (i = 0, j = 0; i < table_size; i++, j++) {
2776 if (j == adapter->num_queues) j = 0;
2777 queue_id = (j * index_mult);
2779 * The low 8 bits are for hash value (n+0);
2780 * The next 8 bits are for hash value (n+1), etc.
2783 reta = reta | ( ((uint32_t) queue_id) << 24);
2786 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2788 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2793 /* Now fill our hash function seeds */
2794 for (int i = 0; i < 10; i++)
2795 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2797 /* Perform hash on these packet types */
2799 * Disable UDP - IP fragments aren't currently being handled
2800 * and so we end up with a mix of 2-tuple and 4-tuple
2803 mrqc = IXGBE_MRQC_RSSEN
2804 | IXGBE_MRQC_RSS_FIELD_IPV4
2805 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2807 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2809 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2810 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2811 | IXGBE_MRQC_RSS_FIELD_IPV6
2812 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2814 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2815 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2818 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2822 /*********************************************************************
2824 * Setup receive registers and features.
2826 **********************************************************************/
2827 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2829 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2832 ixgbe_initialize_receive_units(struct adapter *adapter)
2834 struct rx_ring *rxr = adapter->rx_rings;
2835 struct ixgbe_hw *hw = &adapter->hw;
2836 struct ifnet *ifp = adapter->ifp;
2837 u32 bufsz, fctrl, srrctl, rxcsum;
2842 * Make sure receives are disabled while
2843 * setting up the descriptor ring
2845 ixgbe_disable_rx(hw);
2847 /* Enable broadcasts */
2848 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2849 fctrl |= IXGBE_FCTRL_BAM;
2850 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2851 fctrl |= IXGBE_FCTRL_DPF;
2852 fctrl |= IXGBE_FCTRL_PMCF;
2854 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2856 /* Set for Jumbo Frames? */
2857 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2858 if (ifp->if_mtu > ETHERMTU)
2859 hlreg |= IXGBE_HLREG0_JUMBOEN;
2861 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2863 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2864 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2865 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2867 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2868 #endif /* DEV_NETMAP */
2869 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2871 bufsz = (adapter->rx_mbuf_sz +
2872 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2874 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2875 u64 rdba = rxr->rxdma.dma_paddr;
2877 /* Setup the Base and Length of the Rx Descriptor Ring */
2878 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2879 (rdba & 0x00000000ffffffffULL));
2880 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2881 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2882 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2884 /* Set up the SRRCTL register */
2885 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2886 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2887 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2889 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2892 * Set DROP_EN iff we have no flow control and >1 queue.
2893 * Note that srrctl was cleared shortly before during reset,
2894 * so we do not need to clear the bit, but do it just in case
2895 * this code is moved elsewhere.
2897 if (adapter->num_queues > 1 &&
2898 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2899 srrctl |= IXGBE_SRRCTL_DROP_EN;
2901 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2904 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2906 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2907 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2908 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2910 /* Set the processing limit */
2911 rxr->process_limit = ixgbe_rx_process_limit;
2913 /* Set the driver rx tail address */
2914 rxr->tail = IXGBE_RDT(rxr->me);
2917 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2918 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2919 IXGBE_PSRTYPE_UDPHDR |
2920 IXGBE_PSRTYPE_IPV4HDR |
2921 IXGBE_PSRTYPE_IPV6HDR;
2922 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2925 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2927 ixgbe_initialise_rss_mapping(adapter);
2929 if (adapter->num_queues > 1) {
2930 /* RSS and RX IPP Checksum are mutually exclusive */
2931 rxcsum |= IXGBE_RXCSUM_PCSD;
2934 if (ifp->if_capenable & IFCAP_RXCSUM)
2935 rxcsum |= IXGBE_RXCSUM_PCSD;
2937 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2938 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2940 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2947 ** This routine is run via an vlan config EVENT,
2948 ** it enables us to use the HW Filter table since
2949 ** we can get the vlan id. This just creates the
2950 ** entry in the soft version of the VFTA, init will
2951 ** repopulate the real table.
2954 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2956 struct adapter *adapter = ifp->if_softc;
2959 if (ifp->if_softc != arg) /* Not our event */
2962 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2965 IXGBE_CORE_LOCK(adapter);
2966 index = (vtag >> 5) & 0x7F;
2968 adapter->shadow_vfta[index] |= (1 << bit);
2969 ++adapter->num_vlans;
2970 ixgbe_setup_vlan_hw_support(adapter);
2971 IXGBE_CORE_UNLOCK(adapter);
2975 ** This routine is run via an vlan
2976 ** unconfig EVENT, remove our entry
2977 ** in the soft vfta.
2980 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2982 struct adapter *adapter = ifp->if_softc;
2985 if (ifp->if_softc != arg)
2988 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2991 IXGBE_CORE_LOCK(adapter);
2992 index = (vtag >> 5) & 0x7F;
2994 adapter->shadow_vfta[index] &= ~(1 << bit);
2995 --adapter->num_vlans;
2996 /* Re-init to load the changes */
2997 ixgbe_setup_vlan_hw_support(adapter);
2998 IXGBE_CORE_UNLOCK(adapter);
3002 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3004 struct ifnet *ifp = adapter->ifp;
3005 struct ixgbe_hw *hw = &adapter->hw;
3006 struct rx_ring *rxr;
3011 ** We get here thru init_locked, meaning
3012 ** a soft reset, this has already cleared
3013 ** the VFTA and other state, so if there
3014 ** have been no vlan's registered do nothing.
3016 if (adapter->num_vlans == 0)
3019 /* Setup the queues for vlans */
3020 for (int i = 0; i < adapter->num_queues; i++) {
3021 rxr = &adapter->rx_rings[i];
3022 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3023 if (hw->mac.type != ixgbe_mac_82598EB) {
3024 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3025 ctrl |= IXGBE_RXDCTL_VME;
3026 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3028 rxr->vtag_strip = TRUE;
3031 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3034 ** A soft reset zero's out the VFTA, so
3035 ** we need to repopulate it now.
3037 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3038 if (adapter->shadow_vfta[i] != 0)
3039 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3040 adapter->shadow_vfta[i]);
3042 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3043 /* Enable the Filter Table if enabled */
3044 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3045 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3046 ctrl |= IXGBE_VLNCTRL_VFE;
3048 if (hw->mac.type == ixgbe_mac_82598EB)
3049 ctrl |= IXGBE_VLNCTRL_VME;
3050 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3054 ixgbe_enable_intr(struct adapter *adapter)
3056 struct ixgbe_hw *hw = &adapter->hw;
3057 struct ix_queue *que = adapter->queues;
3060 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3061 /* Enable Fan Failure detection */
3062 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3063 mask |= IXGBE_EIMS_GPI_SDP1;
3065 switch (adapter->hw.mac.type) {
3066 case ixgbe_mac_82599EB:
3067 mask |= IXGBE_EIMS_ECC;
3068 /* Temperature sensor on some adapters */
3069 mask |= IXGBE_EIMS_GPI_SDP0;
3070 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3071 mask |= IXGBE_EIMS_GPI_SDP1;
3072 mask |= IXGBE_EIMS_GPI_SDP2;
3074 mask |= IXGBE_EIMS_FLOW_DIR;
3077 case ixgbe_mac_X540:
3078 /* Detect if Thermal Sensor is enabled */
3079 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3080 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3081 mask |= IXGBE_EIMS_TS;
3082 mask |= IXGBE_EIMS_ECC;
3084 mask |= IXGBE_EIMS_FLOW_DIR;
3087 case ixgbe_mac_X550:
3088 case ixgbe_mac_X550EM_x:
3089 /* MAC thermal sensor is automatically enabled */
3090 mask |= IXGBE_EIMS_TS;
3091 /* Some devices use SDP0 for important information */
3092 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3093 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3094 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3095 mask |= IXGBE_EIMS_ECC;
3097 mask |= IXGBE_EIMS_FLOW_DIR;
3104 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3106 /* With MSI-X we use auto clear */
3107 if (adapter->msix_mem) {
3108 mask = IXGBE_EIMS_ENABLE_MASK;
3109 /* Don't autoclear Link */
3110 mask &= ~IXGBE_EIMS_OTHER;
3111 mask &= ~IXGBE_EIMS_LSC;
3112 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3116 ** Now enable all queues, this is done separately to
3117 ** allow for handling the extended (beyond 32) MSIX
3118 ** vectors that can be used by 82599
3120 for (int i = 0; i < adapter->num_queues; i++, que++)
3121 ixgbe_enable_queue(adapter, que->msix);
3123 IXGBE_WRITE_FLUSH(hw);
3129 ixgbe_disable_intr(struct adapter *adapter)
3131 if (adapter->msix_mem)
3132 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3133 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3134 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3136 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3137 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3138 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3140 IXGBE_WRITE_FLUSH(&adapter->hw);
3145 ** Get the width and transaction speed of
3146 ** the slot this adapter is plugged into.
3149 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3151 device_t dev = ((struct ixgbe_osdep *)hw->back)->dev;
3152 struct ixgbe_mac_info *mac = &hw->mac;
3156 /* For most devices simply call the shared code routine */
3157 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3158 ixgbe_get_bus_info(hw);
3159 /* These devices don't use PCI-E */
3160 switch (hw->mac.type) {
3161 case ixgbe_mac_X550EM_x:
3169 ** For the Quad port adapter we need to parse back
3170 ** up the PCI tree to find the speed of the expansion
3171 ** slot into which this adapter is plugged. A bit more work.
3173 dev = device_get_parent(device_get_parent(dev));
3175 device_printf(dev, "parent pcib = %x,%x,%x\n",
3176 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3178 dev = device_get_parent(device_get_parent(dev));
3180 device_printf(dev, "slot pcib = %x,%x,%x\n",
3181 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3183 /* Now get the PCI Express Capabilities offset */
3184 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3185 /* ...and read the Link Status Register */
3186 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3187 switch (link & IXGBE_PCI_LINK_WIDTH) {
3188 case IXGBE_PCI_LINK_WIDTH_1:
3189 hw->bus.width = ixgbe_bus_width_pcie_x1;
3191 case IXGBE_PCI_LINK_WIDTH_2:
3192 hw->bus.width = ixgbe_bus_width_pcie_x2;
3194 case IXGBE_PCI_LINK_WIDTH_4:
3195 hw->bus.width = ixgbe_bus_width_pcie_x4;
3197 case IXGBE_PCI_LINK_WIDTH_8:
3198 hw->bus.width = ixgbe_bus_width_pcie_x8;
3201 hw->bus.width = ixgbe_bus_width_unknown;
3205 switch (link & IXGBE_PCI_LINK_SPEED) {
3206 case IXGBE_PCI_LINK_SPEED_2500:
3207 hw->bus.speed = ixgbe_bus_speed_2500;
3209 case IXGBE_PCI_LINK_SPEED_5000:
3210 hw->bus.speed = ixgbe_bus_speed_5000;
3212 case IXGBE_PCI_LINK_SPEED_8000:
3213 hw->bus.speed = ixgbe_bus_speed_8000;
3216 hw->bus.speed = ixgbe_bus_speed_unknown;
3220 mac->ops.set_lan_id(hw);
3223 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3224 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3225 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3226 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3227 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3228 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3229 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3232 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3233 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3234 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3235 device_printf(dev, "PCI-Express bandwidth available"
3236 " for this card\n is not sufficient for"
3237 " optimal performance.\n");
3238 device_printf(dev, "For optimal performance a x8 "
3239 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3241 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3242 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3243 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3244 device_printf(dev, "PCI-Express bandwidth available"
3245 " for this card\n is not sufficient for"
3246 " optimal performance.\n");
3247 device_printf(dev, "For optimal performance a x8 "
3248 "PCIE Gen3 slot is required.\n");
3256 ** Setup the correct IVAR register for a particular MSIX interrupt
3257 ** (yes this is all very magic and confusing :)
3258 ** - entry is the register array entry
3259 ** - vector is the MSIX vector for this queue
3260 ** - type is RX/TX/MISC
3263 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3265 struct ixgbe_hw *hw = &adapter->hw;
3268 vector |= IXGBE_IVAR_ALLOC_VAL;
3270 switch (hw->mac.type) {
3272 case ixgbe_mac_82598EB:
3274 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3276 entry += (type * 64);
3277 index = (entry >> 2) & 0x1F;
3278 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3279 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3280 ivar |= (vector << (8 * (entry & 0x3)));
3281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3284 case ixgbe_mac_82599EB:
3285 case ixgbe_mac_X540:
3286 case ixgbe_mac_X550:
3287 case ixgbe_mac_X550EM_x:
3288 if (type == -1) { /* MISC IVAR */
3289 index = (entry & 1) * 8;
3290 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3291 ivar &= ~(0xFF << index);
3292 ivar |= (vector << index);
3293 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3294 } else { /* RX/TX IVARS */
3295 index = (16 * (entry & 1)) + (8 * type);
3296 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3297 ivar &= ~(0xFF << index);
3298 ivar |= (vector << index);
3299 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3308 ixgbe_configure_ivars(struct adapter *adapter)
3310 struct ix_queue *que = adapter->queues;
3313 if (ixgbe_max_interrupt_rate > 0)
3314 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3317 ** Disable DMA coalescing if interrupt moderation is
3324 for (int i = 0; i < adapter->num_queues; i++, que++) {
3325 /* First the RX queue entry */
3326 ixgbe_set_ivar(adapter, i, que->msix, 0);
3327 /* ... and the TX */
3328 ixgbe_set_ivar(adapter, i, que->msix, 1);
3329 /* Set an Initial EITR value */
3330 IXGBE_WRITE_REG(&adapter->hw,
3331 IXGBE_EITR(que->msix), newitr);
3334 /* For the Link interrupt */
3335 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3339 ** ixgbe_sfp_probe - called in the local timer to
3340 ** determine if a port had optics inserted.
3342 static bool ixgbe_sfp_probe(struct adapter *adapter)
3344 struct ixgbe_hw *hw = &adapter->hw;
3345 device_t dev = adapter->dev;
3346 bool result = FALSE;
3348 if ((hw->phy.type == ixgbe_phy_nl) &&
3349 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3350 s32 ret = hw->phy.ops.identify_sfp(hw);
3353 ret = hw->phy.ops.reset(hw);
3354 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3355 device_printf(dev,"Unsupported SFP+ module detected!");
3356 printf(" Reload driver with supported module.\n");
3357 adapter->sfp_probe = FALSE;
3360 device_printf(dev,"SFP+ module detected!\n");
3361 /* We now have supported optics */
3362 adapter->sfp_probe = FALSE;
3363 /* Set the optics type so system reports correctly */
3364 ixgbe_setup_optics(adapter);
3372 ** Tasklet handler for MSIX Link interrupts
3373 ** - do outside interrupt since it might sleep
3376 ixgbe_handle_link(void *context, int pending)
3378 struct adapter *adapter = context;
3380 ixgbe_check_link(&adapter->hw,
3381 &adapter->link_speed, &adapter->link_up, 0);
3382 ixgbe_update_link_status(adapter);
3386 ** Tasklet for handling SFP module interrupts
3389 ixgbe_handle_mod(void *context, int pending)
3391 struct adapter *adapter = context;
3392 struct ixgbe_hw *hw = &adapter->hw;
3393 device_t dev = adapter->dev;
3396 err = hw->phy.ops.identify_sfp(hw);
3397 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3399 "Unsupported SFP+ module type was detected.\n");
3402 err = hw->mac.ops.setup_sfp(hw);
3403 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3405 "Setup failure - unsupported SFP+ module type.\n");
3408 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3414 ** Tasklet for handling MSF (multispeed fiber) interrupts
3417 ixgbe_handle_msf(void *context, int pending)
3419 struct adapter *adapter = context;
3420 struct ixgbe_hw *hw = &adapter->hw;
3425 err = hw->phy.ops.identify_sfp(hw);
3427 ixgbe_setup_optics(adapter);
3428 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3431 autoneg = hw->phy.autoneg_advertised;
3432 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3433 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3434 if (hw->mac.ops.setup_link)
3435 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3437 ifmedia_removeall(&adapter->media);
3438 ixgbe_add_media_types(adapter);
3443 ** Tasklet for handling interrupts from an external PHY
3446 ixgbe_handle_phy(void *context, int pending)
3448 struct adapter *adapter = context;
3449 struct ixgbe_hw *hw = &adapter->hw;
3452 error = hw->phy.ops.handle_lasi(hw);
3453 if (error == IXGBE_ERR_OVERTEMP)
3454 device_printf(adapter->dev,
3455 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3456 " PHY will downshift to lower power state!\n");
3458 device_printf(adapter->dev,
3459 "Error handling LASI interrupt: %d\n",
3466 ** Tasklet for reinitializing the Flow Director filter table
3469 ixgbe_reinit_fdir(void *context, int pending)
3471 struct adapter *adapter = context;
3472 struct ifnet *ifp = adapter->ifp;
3474 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3476 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3477 adapter->fdir_reinit = 0;
3478 /* re-enable flow director interrupts */
3479 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3480 /* Restart the interface */
3481 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3486 /*********************************************************************
3488 * Configure DMA Coalescing
3490 **********************************************************************/
3492 ixgbe_config_dmac(struct adapter *adapter)
3494 struct ixgbe_hw *hw = &adapter->hw;
3495 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3497 if (hw->mac.type < ixgbe_mac_X550 ||
3498 !hw->mac.ops.dmac_config)
3501 if (dcfg->watchdog_timer ^ adapter->dmac ||
3502 dcfg->link_speed ^ adapter->link_speed) {
3503 dcfg->watchdog_timer = adapter->dmac;
3504 dcfg->fcoe_en = false;
3505 dcfg->link_speed = adapter->link_speed;
3508 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3509 dcfg->watchdog_timer, dcfg->link_speed);
3511 hw->mac.ops.dmac_config(hw);
3516 * Checks whether the adapter supports Energy Efficient Ethernet
3517 * or not, based on device ID.
3520 ixgbe_check_eee_support(struct adapter *adapter)
3522 struct ixgbe_hw *hw = &adapter->hw;
3524 adapter->eee_support = adapter->eee_enabled =
3525 (hw->device_id == IXGBE_DEV_ID_X550T ||
3526 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
3530 * Checks whether the adapter's ports are capable of
3531 * Wake On LAN by reading the adapter's NVM.
3533 * Sets each port's hw->wol_enabled value depending
3534 * on the value read here.
3537 ixgbe_check_wol_support(struct adapter *adapter)
3539 struct ixgbe_hw *hw = &adapter->hw;
3542 /* Find out WoL support for port */
3543 adapter->wol_support = hw->wol_enabled = 0;
3544 ixgbe_get_device_caps(hw, &dev_caps);
3545 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3546 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3548 adapter->wol_support = hw->wol_enabled = 1;
3550 /* Save initial wake up filter configuration */
3551 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3557 * Prepare the adapter/port for LPLU and/or WoL
3560 ixgbe_setup_low_power_mode(struct adapter *adapter)
3562 struct ixgbe_hw *hw = &adapter->hw;
3563 device_t dev = adapter->dev;
3566 mtx_assert(&adapter->core_mtx, MA_OWNED);
3568 /* Limit power management flow to X550EM baseT */
3569 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3570 && hw->phy.ops.enter_lplu) {
3571 /* Turn off support for APM wakeup. (Using ACPI instead) */
3572 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3573 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3576 * Clear Wake Up Status register to prevent any previous wakeup
3577 * events from waking us up immediately after we suspend.
3579 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3582 * Program the Wakeup Filter Control register with user filter
3585 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3587 /* Enable wakeups and power management in Wakeup Control */
3588 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3589 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3591 /* X550EM baseT adapters need a special LPLU flow */
3592 hw->phy.reset_disable = true;
3593 ixgbe_stop(adapter);
3594 error = hw->phy.ops.enter_lplu(hw);
3597 "Error entering LPLU: %d\n", error);
3598 hw->phy.reset_disable = false;
3600 /* Just stop for other adapters */
3601 ixgbe_stop(adapter);
3607 /**********************************************************************
3609 * Update the board statistics counters.
3611 **********************************************************************/
3613 ixgbe_update_stats_counters(struct adapter *adapter)
3615 struct ixgbe_hw *hw = &adapter->hw;
3616 u32 missed_rx = 0, bprc, lxon, lxoff, total;
3617 u64 total_missed_rx = 0;
3619 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3620 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3621 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3622 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3624 for (int i = 0; i < 16; i++) {
3625 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3626 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3627 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3629 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3630 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3631 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3633 /* Hardware workaround, gprc counts missed packets */
3634 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3635 adapter->stats.pf.gprc -= missed_rx;
3637 if (hw->mac.type != ixgbe_mac_82598EB) {
3638 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3639 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3640 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3641 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3642 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3643 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3644 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3645 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3647 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3648 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3649 /* 82598 only has a counter in the high register */
3650 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3651 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3652 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3656 * Workaround: mprc hardware is incorrectly counting
3657 * broadcasts, so for now we subtract those.
3659 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3660 adapter->stats.pf.bprc += bprc;
3661 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3662 if (hw->mac.type == ixgbe_mac_82598EB)
3663 adapter->stats.pf.mprc -= bprc;
3665 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3666 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3667 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3668 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3669 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3670 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3672 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3673 adapter->stats.pf.lxontxc += lxon;
3674 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3675 adapter->stats.pf.lxofftxc += lxoff;
3676 total = lxon + lxoff;
3678 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3679 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3680 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3681 adapter->stats.pf.gptc -= total;
3682 adapter->stats.pf.mptc -= total;
3683 adapter->stats.pf.ptc64 -= total;
3684 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3686 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3687 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3688 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3689 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3690 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3691 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3692 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3693 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3694 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3695 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3696 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3697 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3698 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3699 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3700 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3701 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3702 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3703 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3704 /* Only read FCOE on 82599 */
3705 if (hw->mac.type != ixgbe_mac_82598EB) {
3706 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3707 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3708 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3709 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3710 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3713 /* Fill out the OS statistics structure */
3714 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3715 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3716 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3717 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3718 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3719 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3720 IXGBE_SET_COLLISIONS(adapter, 0);
3721 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3722 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3723 + adapter->stats.pf.rlec);
3726 #if __FreeBSD_version >= 1100036
3728 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3730 struct adapter *adapter;
3731 struct tx_ring *txr;
3734 adapter = if_getsoftc(ifp);
3737 case IFCOUNTER_IPACKETS:
3738 return (adapter->ipackets);
3739 case IFCOUNTER_OPACKETS:
3740 return (adapter->opackets);
3741 case IFCOUNTER_IBYTES:
3742 return (adapter->ibytes);
3743 case IFCOUNTER_OBYTES:
3744 return (adapter->obytes);
3745 case IFCOUNTER_IMCASTS:
3746 return (adapter->imcasts);
3747 case IFCOUNTER_OMCASTS:
3748 return (adapter->omcasts);
3749 case IFCOUNTER_COLLISIONS:
3751 case IFCOUNTER_IQDROPS:
3752 return (adapter->iqdrops);
3753 case IFCOUNTER_OQDROPS:
3755 txr = adapter->tx_rings;
3756 for (int i = 0; i < adapter->num_queues; i++, txr++)
3757 rv += txr->br->br_drops;
3759 case IFCOUNTER_IERRORS:
3760 return (adapter->ierrors);
3762 return (if_get_counter_default(ifp, cnt));
3767 /** ixgbe_sysctl_tdh_handler - Handler function
3768 * Retrieves the TDH value from the hardware
3771 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3775 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3778 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3779 error = sysctl_handle_int(oidp, &val, 0, req);
3780 if (error || !req->newptr)
3785 /** ixgbe_sysctl_tdt_handler - Handler function
3786 * Retrieves the TDT value from the hardware
3789 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3793 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3796 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3797 error = sysctl_handle_int(oidp, &val, 0, req);
3798 if (error || !req->newptr)
3803 /** ixgbe_sysctl_rdh_handler - Handler function
3804 * Retrieves the RDH value from the hardware
3807 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3811 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3814 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3815 error = sysctl_handle_int(oidp, &val, 0, req);
3816 if (error || !req->newptr)
3821 /** ixgbe_sysctl_rdt_handler - Handler function
3822 * Retrieves the RDT value from the hardware
3825 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3829 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3832 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3833 error = sysctl_handle_int(oidp, &val, 0, req);
3834 if (error || !req->newptr)
3840 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3843 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3844 unsigned int reg, usec, rate;
3846 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3847 usec = ((reg & 0x0FF8) >> 3);
3849 rate = 500000 / usec;
3852 error = sysctl_handle_int(oidp, &rate, 0, req);
3853 if (error || !req->newptr)
3855 reg &= ~0xfff; /* default, no limitation */
3856 ixgbe_max_interrupt_rate = 0;
3857 if (rate > 0 && rate < 500000) {
3860 ixgbe_max_interrupt_rate = rate;
3861 reg |= ((4000000/rate) & 0xff8 );
3863 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3868 ixgbe_add_device_sysctls(struct adapter *adapter)
3870 device_t dev = adapter->dev;
3871 struct ixgbe_hw *hw = &adapter->hw;
3872 struct sysctl_oid_list *child;
3873 struct sysctl_ctx_list *ctx;
3875 ctx = device_get_sysctl_ctx(dev);
3876 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3878 /* Sysctls for all devices */
3879 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
3880 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3881 ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3883 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
3885 &ixgbe_enable_aim, 1, "Interrupt Moderation");
3887 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
3888 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3889 ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3891 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
3892 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3893 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
3895 /* for X550 devices */
3896 if (hw->mac.type >= ixgbe_mac_X550)
3897 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
3898 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3899 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
3901 /* for X550T and X550EM backplane devices */
3902 if (hw->device_id == IXGBE_DEV_ID_X550T ||
3903 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
3904 struct sysctl_oid *eee_node;
3905 struct sysctl_oid_list *eee_list;
3907 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
3909 "Energy Efficient Ethernet sysctls");
3910 eee_list = SYSCTL_CHILDREN(eee_node);
3912 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
3913 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3914 ixgbe_sysctl_eee_enable, "I",
3915 "Enable or Disable EEE");
3917 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
3918 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3919 ixgbe_sysctl_eee_negotiated, "I",
3920 "EEE negotiated on link");
3922 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
3923 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3924 ixgbe_sysctl_eee_tx_lpi_status, "I",
3925 "Whether or not TX link is in LPI state");
3927 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
3928 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3929 ixgbe_sysctl_eee_rx_lpi_status, "I",
3930 "Whether or not RX link is in LPI state");
3933 /* for certain 10GBaseT devices */
3934 if (hw->device_id == IXGBE_DEV_ID_X550T ||
3935 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3936 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
3937 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3938 ixgbe_sysctl_wol_enable, "I",
3939 "Enable/Disable Wake on LAN");
3941 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
3942 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3943 ixgbe_sysctl_wufc, "I",
3944 "Enable/Disable Wake Up Filters");
3947 /* for X550EM 10GBaseT devices */
3948 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3949 struct sysctl_oid *phy_node;
3950 struct sysctl_oid_list *phy_list;
3952 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
3954 "External PHY sysctls");
3955 phy_list = SYSCTL_CHILDREN(phy_node);
3957 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
3958 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3959 ixgbe_sysctl_phy_temp, "I",
3960 "Current External PHY Temperature (Celsius)");
3962 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
3963 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3964 ixgbe_sysctl_phy_overtemp_occurred, "I",
3965 "External PHY High Temperature Event Occurred");
3970 * Add sysctl variables, one per statistic, to the system.
3973 ixgbe_add_hw_stats(struct adapter *adapter)
3975 device_t dev = adapter->dev;
3977 struct tx_ring *txr = adapter->tx_rings;
3978 struct rx_ring *rxr = adapter->rx_rings;
3980 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3981 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3982 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3983 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3985 struct sysctl_oid *stat_node, *queue_node;
3986 struct sysctl_oid_list *stat_list, *queue_list;
3988 #define QUEUE_NAME_LEN 32
3989 char namebuf[QUEUE_NAME_LEN];
3991 /* Driver Statistics */
3992 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3993 CTLFLAG_RD, &adapter->dropped_pkts,
3994 "Driver dropped packets");
3995 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3996 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3997 "m_defrag() failed");
3998 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3999 CTLFLAG_RD, &adapter->watchdog_events,
4000 "Watchdog timeouts");
4001 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4002 CTLFLAG_RD, &adapter->link_irq,
4003 "Link MSIX IRQ Handled");
4005 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4006 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4007 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4008 CTLFLAG_RD, NULL, "Queue Name");
4009 queue_list = SYSCTL_CHILDREN(queue_node);
4011 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4012 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4013 sizeof(&adapter->queues[i]),
4014 ixgbe_sysctl_interrupt_rate_handler, "IU",
4016 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4017 CTLFLAG_RD, &(adapter->queues[i].irqs),
4018 "irqs on this queue");
4019 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4020 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4021 ixgbe_sysctl_tdh_handler, "IU",
4022 "Transmit Descriptor Head");
4023 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4024 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4025 ixgbe_sysctl_tdt_handler, "IU",
4026 "Transmit Descriptor Tail");
4027 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4028 CTLFLAG_RD, &txr->tso_tx,
4030 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4031 CTLFLAG_RD, &txr->no_tx_dma_setup,
4032 "Driver tx dma failure in xmit");
4033 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4034 CTLFLAG_RD, &txr->no_desc_avail,
4035 "Queue No Descriptor Available");
4036 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4037 CTLFLAG_RD, &txr->total_packets,
4038 "Queue Packets Transmitted");
4039 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4040 CTLFLAG_RD, &txr->br->br_drops,
4041 "Packets dropped in buf_ring");
4044 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4045 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4046 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4047 CTLFLAG_RD, NULL, "Queue Name");
4048 queue_list = SYSCTL_CHILDREN(queue_node);
4050 struct lro_ctrl *lro = &rxr->lro;
4052 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4053 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4054 CTLFLAG_RD, NULL, "Queue Name");
4055 queue_list = SYSCTL_CHILDREN(queue_node);
4057 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4058 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4059 ixgbe_sysctl_rdh_handler, "IU",
4060 "Receive Descriptor Head");
4061 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4062 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4063 ixgbe_sysctl_rdt_handler, "IU",
4064 "Receive Descriptor Tail");
4065 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4066 CTLFLAG_RD, &rxr->rx_packets,
4067 "Queue Packets Received");
4068 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4069 CTLFLAG_RD, &rxr->rx_bytes,
4070 "Queue Bytes Received");
4071 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4072 CTLFLAG_RD, &rxr->rx_copies,
4073 "Copied RX Frames");
4074 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4075 CTLFLAG_RD, &lro->lro_queued, 0,
4077 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4078 CTLFLAG_RD, &lro->lro_flushed, 0,
4082 /* MAC stats get the own sub node */
4084 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4085 CTLFLAG_RD, NULL, "MAC Statistics");
4086 stat_list = SYSCTL_CHILDREN(stat_node);
4088 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4089 CTLFLAG_RD, &stats->crcerrs,
4091 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4092 CTLFLAG_RD, &stats->illerrc,
4093 "Illegal Byte Errors");
4094 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4095 CTLFLAG_RD, &stats->errbc,
4097 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4098 CTLFLAG_RD, &stats->mspdc,
4099 "MAC Short Packets Discarded");
4100 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4101 CTLFLAG_RD, &stats->mlfc,
4102 "MAC Local Faults");
4103 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4104 CTLFLAG_RD, &stats->mrfc,
4105 "MAC Remote Faults");
4106 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4107 CTLFLAG_RD, &stats->rlec,
4108 "Receive Length Errors");
4110 /* Flow Control stats */
4111 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4112 CTLFLAG_RD, &stats->lxontxc,
4113 "Link XON Transmitted");
4114 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4115 CTLFLAG_RD, &stats->lxonrxc,
4116 "Link XON Received");
4117 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4118 CTLFLAG_RD, &stats->lxofftxc,
4119 "Link XOFF Transmitted");
4120 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4121 CTLFLAG_RD, &stats->lxoffrxc,
4122 "Link XOFF Received");
4124 /* Packet Reception Stats */
4125 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4126 CTLFLAG_RD, &stats->tor,
4127 "Total Octets Received");
4128 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4129 CTLFLAG_RD, &stats->gorc,
4130 "Good Octets Received");
4131 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4132 CTLFLAG_RD, &stats->tpr,
4133 "Total Packets Received");
4134 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4135 CTLFLAG_RD, &stats->gprc,
4136 "Good Packets Received");
4137 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4138 CTLFLAG_RD, &stats->mprc,
4139 "Multicast Packets Received");
4140 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4141 CTLFLAG_RD, &stats->bprc,
4142 "Broadcast Packets Received");
4143 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4144 CTLFLAG_RD, &stats->prc64,
4145 "64 byte frames received ");
4146 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4147 CTLFLAG_RD, &stats->prc127,
4148 "65-127 byte frames received");
4149 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4150 CTLFLAG_RD, &stats->prc255,
4151 "128-255 byte frames received");
4152 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4153 CTLFLAG_RD, &stats->prc511,
4154 "256-511 byte frames received");
4155 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4156 CTLFLAG_RD, &stats->prc1023,
4157 "512-1023 byte frames received");
4158 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4159 CTLFLAG_RD, &stats->prc1522,
4160 "1023-1522 byte frames received");
4161 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4162 CTLFLAG_RD, &stats->ruc,
4163 "Receive Undersized");
4164 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4165 CTLFLAG_RD, &stats->rfc,
4166 "Fragmented Packets Received ");
4167 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4168 CTLFLAG_RD, &stats->roc,
4169 "Oversized Packets Received");
4170 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4171 CTLFLAG_RD, &stats->rjc,
4173 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4174 CTLFLAG_RD, &stats->mngprc,
4175 "Management Packets Received");
4176 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4177 CTLFLAG_RD, &stats->mngptc,
4178 "Management Packets Dropped");
4179 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4180 CTLFLAG_RD, &stats->xec,
4183 /* Packet Transmission Stats */
4184 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4185 CTLFLAG_RD, &stats->gotc,
4186 "Good Octets Transmitted");
4187 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4188 CTLFLAG_RD, &stats->tpt,
4189 "Total Packets Transmitted");
4190 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4191 CTLFLAG_RD, &stats->gptc,
4192 "Good Packets Transmitted");
4193 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4194 CTLFLAG_RD, &stats->bptc,
4195 "Broadcast Packets Transmitted");
4196 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4197 CTLFLAG_RD, &stats->mptc,
4198 "Multicast Packets Transmitted");
4199 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4200 CTLFLAG_RD, &stats->mngptc,
4201 "Management Packets Transmitted");
4202 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4203 CTLFLAG_RD, &stats->ptc64,
4204 "64 byte frames transmitted ");
4205 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4206 CTLFLAG_RD, &stats->ptc127,
4207 "65-127 byte frames transmitted");
4208 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4209 CTLFLAG_RD, &stats->ptc255,
4210 "128-255 byte frames transmitted");
4211 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4212 CTLFLAG_RD, &stats->ptc511,
4213 "256-511 byte frames transmitted");
4214 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4215 CTLFLAG_RD, &stats->ptc1023,
4216 "512-1023 byte frames transmitted");
4217 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4218 CTLFLAG_RD, &stats->ptc1522,
4219 "1024-1522 byte frames transmitted");
4223 ** Set flow control using sysctl:
4224 ** Flow control values:
4231 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4234 struct adapter *adapter = (struct adapter *) arg1;
4237 error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4238 if ((error) || (req->newptr == NULL))
4241 /* Don't bother if it's not changed */
4242 if (adapter->fc == last)
4245 switch (adapter->fc) {
4246 case ixgbe_fc_rx_pause:
4247 case ixgbe_fc_tx_pause:
4249 adapter->hw.fc.requested_mode = adapter->fc;
4250 if (adapter->num_queues > 1)
4251 ixgbe_disable_rx_drop(adapter);
4254 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4255 if (adapter->num_queues > 1)
4256 ixgbe_enable_rx_drop(adapter);
4262 /* Don't autoneg if forcing a value */
4263 adapter->hw.fc.disable_fc_autoneg = TRUE;
4264 ixgbe_fc_enable(&adapter->hw);
4269 ** Control advertised link speed:
4271 ** 0x1 - advertise 100 Mb
4272 ** 0x2 - advertise 1G
4273 ** 0x4 - advertise 10G
4276 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4278 int error = 0, requested;
4279 struct adapter *adapter;
4281 struct ixgbe_hw *hw;
4282 ixgbe_link_speed speed = 0;
4284 adapter = (struct adapter *) arg1;
4288 requested = adapter->advertise;
4289 error = sysctl_handle_int(oidp, &requested, 0, req);
4290 if ((error) || (req->newptr == NULL))
4293 /* Checks to validate new value */
4294 if (adapter->advertise == requested) /* no change */
4297 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4298 (hw->phy.multispeed_fiber))) {
4300 "Advertised speed can only be set on copper or "
4301 "multispeed fiber media types.\n");
4305 if (requested < 0x1 || requested > 0x7) {
4307 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4311 if ((requested & 0x1)
4312 && (hw->mac.type != ixgbe_mac_X540)
4313 && (hw->mac.type != ixgbe_mac_X550)) {
4314 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4318 /* Set new value and report new advertised mode */
4319 if (requested & 0x1)
4320 speed |= IXGBE_LINK_SPEED_100_FULL;
4321 if (requested & 0x2)
4322 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4323 if (requested & 0x4)
4324 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4326 hw->mac.autotry_restart = TRUE;
4327 hw->mac.ops.setup_link(hw, speed, TRUE);
4328 adapter->advertise = requested;
4334 * The following two sysctls are for X550 BaseT devices;
4335 * they deal with the external PHY used in them.
4338 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4340 struct adapter *adapter = (struct adapter *) arg1;
4341 struct ixgbe_hw *hw = &adapter->hw;
4344 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4345 device_printf(adapter->dev,
4346 "Device has no supported external thermal sensor.\n");
4350 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4351 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4353 device_printf(adapter->dev,
4354 "Error reading from PHY's current temperature register\n");
4358 /* Shift temp for output */
4361 return (sysctl_handle_int(oidp, NULL, reg, req));
4365 * Reports whether the current PHY temperature is over
4366 * the overtemp threshold.
4367 * - This is reported directly from the PHY
4370 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4372 struct adapter *adapter = (struct adapter *) arg1;
4373 struct ixgbe_hw *hw = &adapter->hw;
4376 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4377 device_printf(adapter->dev,
4378 "Device has no supported external thermal sensor.\n");
4382 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4383 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4385 device_printf(adapter->dev,
4386 "Error reading from PHY's temperature status register\n");
4390 /* Get occurrence bit */
4391 reg = !!(reg & 0x4000);
4392 return (sysctl_handle_int(oidp, 0, reg, req));
4396 ** Thermal Shutdown Trigger (internal MAC)
4397 ** - Set this to 1 to cause an overtemp event to occur
4400 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4402 struct adapter *adapter = (struct adapter *) arg1;
4403 struct ixgbe_hw *hw = &adapter->hw;
4404 int error, fire = 0;
4406 error = sysctl_handle_int(oidp, &fire, 0, req);
4407 if ((error) || (req->newptr == NULL))
4411 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4412 reg |= IXGBE_EICR_TS;
4413 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4420 ** Manage DMA Coalescing.
4422 ** 0/1 - off / on (use default value of 1000)
4424 ** Legal timer values are:
4425 ** 50,100,250,500,1000,2000,5000,10000
4427 ** Turning off interrupt moderation will also turn this off.
4430 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4432 struct adapter *adapter = (struct adapter *) arg1;
4433 struct ixgbe_hw *hw = &adapter->hw;
4434 struct ifnet *ifp = adapter->ifp;
4438 oldval = adapter->dmac;
4439 error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4440 if ((error) || (req->newptr == NULL))
4443 switch (hw->mac.type) {
4444 case ixgbe_mac_X550:
4445 case ixgbe_mac_X550EM_x:
4448 device_printf(adapter->dev,
4449 "DMA Coalescing is only supported on X550 devices\n");
4453 switch (adapter->dmac) {
4457 case 1: /* Enable and use default */
4458 adapter->dmac = 1000;
4468 /* Legal values - allow */
4471 /* Do nothing, illegal value */
4472 adapter->dmac = oldval;
4476 /* Re-initialize hardware if it's already running */
4477 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4478 ixgbe_init(adapter);
4484 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4490 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4492 struct adapter *adapter = (struct adapter *) arg1;
4493 struct ixgbe_hw *hw = &adapter->hw;
4494 int new_wol_enabled;
4497 new_wol_enabled = hw->wol_enabled;
4498 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4499 if ((error) || (req->newptr == NULL))
4501 if (new_wol_enabled == hw->wol_enabled)
4504 if (new_wol_enabled > 0 && !adapter->wol_support)
4507 hw->wol_enabled = !!(new_wol_enabled);
4513 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4514 * if supported by the adapter.
4520 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4522 struct adapter *adapter = (struct adapter *) arg1;
4523 struct ifnet *ifp = adapter->ifp;
4524 int new_eee_enabled, error = 0;
4526 new_eee_enabled = adapter->eee_enabled;
4527 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4528 if ((error) || (req->newptr == NULL))
4530 if (new_eee_enabled == adapter->eee_enabled)
4533 if (new_eee_enabled > 0 && !adapter->eee_support)
4536 adapter->eee_enabled = !!(new_eee_enabled);
4538 /* Re-initialize hardware if it's already running */
4539 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4540 ixgbe_init(adapter);
4546 * Read-only sysctl indicating whether EEE support was negotiated
4550 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4552 struct adapter *adapter = (struct adapter *) arg1;
4553 struct ixgbe_hw *hw = &adapter->hw;
4556 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4558 return (sysctl_handle_int(oidp, 0, status, req));
4562 * Read-only sysctl indicating whether RX Link is in LPI state.
4565 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4567 struct adapter *adapter = (struct adapter *) arg1;
4568 struct ixgbe_hw *hw = &adapter->hw;
4571 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4572 IXGBE_EEE_RX_LPI_STATUS);
4574 return (sysctl_handle_int(oidp, 0, status, req));
4578 * Read-only sysctl indicating whether TX Link is in LPI state.
4581 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4583 struct adapter *adapter = (struct adapter *) arg1;
4584 struct ixgbe_hw *hw = &adapter->hw;
4587 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4588 IXGBE_EEE_TX_LPI_STATUS);
4590 return (sysctl_handle_int(oidp, 0, status, req));
4594 * Sysctl to enable/disable the types of packets that the
4595 * adapter will wake up on upon receipt.
4596 * WUFC - Wake Up Filter Control
4598 * 0x1 - Link Status Change
4599 * 0x2 - Magic Packet
4600 * 0x4 - Direct Exact
4601 * 0x8 - Directed Multicast
4603 * 0x20 - ARP/IPv4 Request Packet
4604 * 0x40 - Direct IPv4 Packet
4605 * 0x80 - Direct IPv6 Packet
4607 * Setting another flag will cause the sysctl to return an
4611 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4613 struct adapter *adapter = (struct adapter *) arg1;
4617 new_wufc = adapter->wufc;
4619 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4620 if ((error) || (req->newptr == NULL))
4622 if (new_wufc == adapter->wufc)
4625 if (new_wufc & 0xffffff00)
4629 new_wufc |= (0xffffff & adapter->wufc);
4630 adapter->wufc = new_wufc;
4637 ** Enable the hardware to drop packets when the buffer is
4638 ** full. This is useful when multiqueue,so that no single
4639 ** queue being full stalls the entire RX engine. We only
4640 ** enable this when Multiqueue AND when Flow Control is
4644 ixgbe_enable_rx_drop(struct adapter *adapter)
4646 struct ixgbe_hw *hw = &adapter->hw;
4648 for (int i = 0; i < adapter->num_queues; i++) {
4649 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4650 srrctl |= IXGBE_SRRCTL_DROP_EN;
4651 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4656 ixgbe_disable_rx_drop(struct adapter *adapter)
4658 struct ixgbe_hw *hw = &adapter->hw;
4660 for (int i = 0; i < adapter->num_queues; i++) {
4661 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4662 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4663 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4668 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4672 switch (adapter->hw.mac.type) {
4673 case ixgbe_mac_82598EB:
4674 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4675 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4677 case ixgbe_mac_82599EB:
4678 case ixgbe_mac_X540:
4679 case ixgbe_mac_X550:
4680 case ixgbe_mac_X550EM_x:
4681 mask = (queues & 0xFFFFFFFF);
4682 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4683 mask = (queues >> 32);
4684 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);