1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
45 #include <net/rss_config.h>
46 #include <netinet/in_rss.h>
49 /*********************************************************************
50 * Set this to one to display debug statistics
51 *********************************************************************/
52 int ixgbe_display_debug_stats = 0;
54 /*********************************************************************
56 *********************************************************************/
57 char ixgbe_driver_version[] = "2.8.3";
59 /*********************************************************************
62 * Used by probe to select devices to load on
63 * Last field stores an index into ixgbe_strings
64 * Last entry must be all 0s
66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
67 *********************************************************************/
69 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
101 /* required last entry */
105 /*********************************************************************
106 * Table of branding strings
107 *********************************************************************/
109 static char *ixgbe_strings[] = {
110 "Intel(R) PRO/10GbE PCI-Express Network Driver"
113 /*********************************************************************
114 * Function prototypes
115 *********************************************************************/
116 static int ixgbe_probe(device_t);
117 static int ixgbe_attach(device_t);
118 static int ixgbe_detach(device_t);
119 static int ixgbe_shutdown(device_t);
120 static int ixgbe_suspend(device_t);
121 static int ixgbe_resume(device_t);
122 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
123 static void ixgbe_init(void *);
124 static void ixgbe_init_locked(struct adapter *);
125 static void ixgbe_stop(void *);
126 #if __FreeBSD_version >= 1100036
127 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
129 static void ixgbe_add_media_types(struct adapter *);
130 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
131 static int ixgbe_media_change(struct ifnet *);
132 static void ixgbe_identify_hardware(struct adapter *);
133 static int ixgbe_allocate_pci_resources(struct adapter *);
134 static void ixgbe_get_slot_info(struct ixgbe_hw *);
135 static int ixgbe_allocate_msix(struct adapter *);
136 static int ixgbe_allocate_legacy(struct adapter *);
137 static int ixgbe_setup_msix(struct adapter *);
138 static void ixgbe_free_pci_resources(struct adapter *);
139 static void ixgbe_local_timer(void *);
140 static int ixgbe_setup_interface(device_t, struct adapter *);
141 static void ixgbe_config_dmac(struct adapter *);
142 static void ixgbe_config_delay_values(struct adapter *);
143 static void ixgbe_config_link(struct adapter *);
144 static void ixgbe_check_eee_support(struct adapter *);
145 static void ixgbe_check_wol_support(struct adapter *);
146 static int ixgbe_setup_low_power_mode(struct adapter *);
147 static void ixgbe_rearm_queues(struct adapter *, u64);
149 static void ixgbe_initialize_transmit_units(struct adapter *);
150 static void ixgbe_initialize_receive_units(struct adapter *);
151 static void ixgbe_enable_rx_drop(struct adapter *);
152 static void ixgbe_disable_rx_drop(struct adapter *);
154 static void ixgbe_enable_intr(struct adapter *);
155 static void ixgbe_disable_intr(struct adapter *);
156 static void ixgbe_update_stats_counters(struct adapter *);
157 static void ixgbe_set_promisc(struct adapter *);
158 static void ixgbe_set_multi(struct adapter *);
159 static void ixgbe_update_link_status(struct adapter *);
160 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
161 static void ixgbe_configure_ivars(struct adapter *);
162 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
164 static void ixgbe_setup_vlan_hw_support(struct adapter *);
165 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
166 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
168 static void ixgbe_add_device_sysctls(struct adapter *);
169 static void ixgbe_add_hw_stats(struct adapter *);
171 /* Sysctl handlers */
172 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
173 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
174 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
175 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
176 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
177 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
178 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
179 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
180 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
181 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
182 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
183 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
185 /* Support for pluggable optic modules */
186 static bool ixgbe_sfp_probe(struct adapter *);
187 static void ixgbe_setup_optics(struct adapter *);
189 /* Legacy (single vector interrupt handler */
190 static void ixgbe_legacy_irq(void *);
192 /* The MSI/X Interrupt handlers */
193 static void ixgbe_msix_que(void *);
194 static void ixgbe_msix_link(void *);
196 /* Deferred interrupt tasklets */
197 static void ixgbe_handle_que(void *, int);
198 static void ixgbe_handle_link(void *, int);
199 static void ixgbe_handle_msf(void *, int);
200 static void ixgbe_handle_mod(void *, int);
201 static void ixgbe_handle_phy(void *, int);
204 static void ixgbe_reinit_fdir(void *, int);
207 /*********************************************************************
208 * FreeBSD Device Interface Entry Points
209 *********************************************************************/
211 static device_method_t ix_methods[] = {
212 /* Device interface */
213 DEVMETHOD(device_probe, ixgbe_probe),
214 DEVMETHOD(device_attach, ixgbe_attach),
215 DEVMETHOD(device_detach, ixgbe_detach),
216 DEVMETHOD(device_shutdown, ixgbe_shutdown),
217 DEVMETHOD(device_suspend, ixgbe_suspend),
218 DEVMETHOD(device_resume, ixgbe_resume),
222 static driver_t ix_driver = {
223 "ix", ix_methods, sizeof(struct adapter),
226 devclass_t ix_devclass;
227 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
229 MODULE_DEPEND(ix, pci, 1, 1, 1);
230 MODULE_DEPEND(ix, ether, 1, 1, 1);
233 ** TUNEABLE PARAMETERS:
236 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
237 "IXGBE driver parameters");
240 ** AIM: Adaptive Interrupt Moderation
241 ** which means that the interrupt rate
242 ** is varied over time based on the
243 ** traffic for that interrupt vector
245 static int ixgbe_enable_aim = TRUE;
246 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
247 "Enable adaptive interrupt moderation");
249 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
250 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
251 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
253 /* How many packets rxeof tries to clean at a time */
254 static int ixgbe_rx_process_limit = 256;
255 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
256 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
257 &ixgbe_rx_process_limit, 0,
258 "Maximum number of received packets to process at a time,"
259 "-1 means unlimited");
261 /* How many packets txeof tries to clean at a time */
262 static int ixgbe_tx_process_limit = 256;
263 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
264 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
265 &ixgbe_tx_process_limit, 0,
266 "Maximum number of sent packets to process at a time,"
267 "-1 means unlimited");
270 ** Smart speed setting, default to on
271 ** this only works as a compile option
272 ** right now as its during attach, set
273 ** this to 'ixgbe_smart_speed_off' to
276 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
279 * MSIX should be the default for best performance,
280 * but this allows it to be forced off for testing.
282 static int ixgbe_enable_msix = 1;
283 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
284 "Enable MSI-X interrupts");
287 * Number of Queues, can be set to 0,
288 * it then autoconfigures based on the
289 * number of cpus with a max of 8. This
290 * can be overriden manually here.
292 static int ixgbe_num_queues = 0;
293 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
294 "Number of queues to configure, 0 indicates autoconfigure");
297 ** Number of TX descriptors per ring,
298 ** setting higher than RX as this seems
299 ** the better performing choice.
301 static int ixgbe_txd = PERFORM_TXD;
302 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
303 "Number of transmit descriptors per queue");
305 /* Number of RX descriptors per ring */
306 static int ixgbe_rxd = PERFORM_RXD;
307 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
308 "Number of receive descriptors per queue");
311 ** Defining this on will allow the use
312 ** of unsupported SFP+ modules, note that
313 ** doing so you are on your own :)
315 static int allow_unsupported_sfp = FALSE;
316 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
318 /* Keep running tab on them for sanity check */
319 static int ixgbe_total_ports;
323 ** Flow Director actually 'steals'
324 ** part of the packet buffer as its
325 ** filter pool, this variable controls
327 ** 0 = 64K, 1 = 128K, 2 = 256K
329 static int fdir_pballoc = 1;
334 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
335 * be a reference on how to implement netmap support in a driver.
336 * Additional comments are in ixgbe_netmap.h .
338 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
339 * that extend the standard driver.
341 #include <dev/netmap/ixgbe_netmap.h>
342 #endif /* DEV_NETMAP */
344 /*********************************************************************
345 * Device identification routine
347 * ixgbe_probe determines if the driver should be loaded on
348 * adapter based on PCI vendor/device id of the adapter.
350 * return BUS_PROBE_DEFAULT on success, positive on failure
351 *********************************************************************/
354 ixgbe_probe(device_t dev)
356 ixgbe_vendor_info_t *ent;
358 u16 pci_vendor_id = 0;
359 u16 pci_device_id = 0;
360 u16 pci_subvendor_id = 0;
361 u16 pci_subdevice_id = 0;
362 char adapter_name[256];
364 INIT_DEBUGOUT("ixgbe_probe: begin");
366 pci_vendor_id = pci_get_vendor(dev);
367 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
370 pci_device_id = pci_get_device(dev);
371 pci_subvendor_id = pci_get_subvendor(dev);
372 pci_subdevice_id = pci_get_subdevice(dev);
374 ent = ixgbe_vendor_info_array;
375 while (ent->vendor_id != 0) {
376 if ((pci_vendor_id == ent->vendor_id) &&
377 (pci_device_id == ent->device_id) &&
379 ((pci_subvendor_id == ent->subvendor_id) ||
380 (ent->subvendor_id == 0)) &&
382 ((pci_subdevice_id == ent->subdevice_id) ||
383 (ent->subdevice_id == 0))) {
384 sprintf(adapter_name, "%s, Version - %s",
385 ixgbe_strings[ent->index],
386 ixgbe_driver_version);
387 device_set_desc_copy(dev, adapter_name);
389 return (BUS_PROBE_DEFAULT);
396 /*********************************************************************
397 * Device initialization routine
399 * The attach entry point is called when the driver is being loaded.
400 * This routine identifies the type of hardware, allocates all resources
401 * and initializes the hardware.
403 * return 0 on success, positive on failure
404 *********************************************************************/
407 ixgbe_attach(device_t dev)
409 struct adapter *adapter;
415 INIT_DEBUGOUT("ixgbe_attach: begin");
417 /* Allocate, clear, and link in our adapter structure */
418 adapter = device_get_softc(dev);
419 adapter->dev = adapter->osdep.dev = dev;
423 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
425 /* Set up the timer callout */
426 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
428 /* Determine hardware revision */
429 ixgbe_identify_hardware(adapter);
431 /* Do base PCI setup - map BAR0 */
432 if (ixgbe_allocate_pci_resources(adapter)) {
433 device_printf(dev, "Allocation of PCI resources failed\n");
438 /* Do descriptor calc and sanity checks */
439 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
440 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
441 device_printf(dev, "TXD config issue, using default!\n");
442 adapter->num_tx_desc = DEFAULT_TXD;
444 adapter->num_tx_desc = ixgbe_txd;
447 ** With many RX rings it is easy to exceed the
448 ** system mbuf allocation. Tuning nmbclusters
449 ** can alleviate this.
451 if (nmbclusters > 0) {
453 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
454 if (s > nmbclusters) {
455 device_printf(dev, "RX Descriptors exceed "
456 "system mbuf max, using default instead!\n");
457 ixgbe_rxd = DEFAULT_RXD;
461 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
462 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
463 device_printf(dev, "RXD config issue, using default!\n");
464 adapter->num_rx_desc = DEFAULT_RXD;
466 adapter->num_rx_desc = ixgbe_rxd;
468 /* Allocate our TX/RX Queues */
469 if (ixgbe_allocate_queues(adapter)) {
474 /* Allocate multicast array memory. */
475 adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
476 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
477 if (adapter->mta == NULL) {
478 device_printf(dev, "Can not allocate multicast setup array\n");
483 /* Initialize the shared code */
484 hw->allow_unsupported_sfp = allow_unsupported_sfp;
485 error = ixgbe_init_shared_code(hw);
486 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
488 ** No optics in this port, set up
489 ** so the timer routine will probe
490 ** for later insertion.
492 adapter->sfp_probe = TRUE;
494 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
495 device_printf(dev,"Unsupported SFP+ module detected!\n");
499 device_printf(dev,"Unable to initialize the shared code\n");
504 /* Make sure we have a good EEPROM before we read from it */
505 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
506 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
511 error = ixgbe_init_hw(hw);
513 case IXGBE_ERR_EEPROM_VERSION:
514 device_printf(dev, "This device is a pre-production adapter/"
515 "LOM. Please be aware there may be issues associated "
516 "with your hardware.\n If you are experiencing problems "
517 "please contact your Intel or hardware representative "
518 "who provided you with this hardware.\n");
520 case IXGBE_ERR_SFP_NOT_SUPPORTED:
521 device_printf(dev,"Unsupported SFP+ Module\n");
524 case IXGBE_ERR_SFP_NOT_PRESENT:
525 device_printf(dev,"No SFP+ Module found\n");
531 /* Detect and set physical type */
532 ixgbe_setup_optics(adapter);
534 if ((adapter->msix > 1) && (ixgbe_enable_msix))
535 error = ixgbe_allocate_msix(adapter);
537 error = ixgbe_allocate_legacy(adapter);
541 /* Setup OS specific network interface */
542 if (ixgbe_setup_interface(dev, adapter) != 0)
545 /* Initialize statistics */
546 ixgbe_update_stats_counters(adapter);
548 /* Register for VLAN events */
549 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
550 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
551 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
552 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
554 /* Check PCIE slot type/speed/width */
555 ixgbe_get_slot_info(hw);
558 /* Set an initial default flow control value */
559 adapter->fc = ixgbe_fc_full;
561 /* Check for certain supported features */
562 ixgbe_check_wol_support(adapter);
563 ixgbe_check_eee_support(adapter);
566 ixgbe_add_device_sysctls(adapter);
567 ixgbe_add_hw_stats(adapter);
569 /* let hardware know driver is loaded */
570 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
571 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
572 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
575 ixgbe_netmap_attach(adapter);
576 #endif /* DEV_NETMAP */
577 INIT_DEBUGOUT("ixgbe_attach: end");
581 ixgbe_free_transmit_structures(adapter);
582 ixgbe_free_receive_structures(adapter);
584 if (adapter->ifp != NULL)
585 if_free(adapter->ifp);
586 ixgbe_free_pci_resources(adapter);
587 free(adapter->mta, M_DEVBUF);
591 /*********************************************************************
592 * Device removal routine
594 * The detach entry point is called when the driver is being removed.
595 * This routine stops the adapter and deallocates all the resources
596 * that were allocated for driver operation.
598 * return 0 on success, positive on failure
599 *********************************************************************/
602 ixgbe_detach(device_t dev)
604 struct adapter *adapter = device_get_softc(dev);
605 struct ix_queue *que = adapter->queues;
606 struct tx_ring *txr = adapter->tx_rings;
609 INIT_DEBUGOUT("ixgbe_detach: begin");
611 /* Make sure VLANS are not using driver */
612 if (adapter->ifp->if_vlantrunk != NULL) {
613 device_printf(dev,"Vlan in use, detach first\n");
617 /* Stop the adapter */
618 IXGBE_CORE_LOCK(adapter);
619 ixgbe_setup_low_power_mode(adapter);
620 IXGBE_CORE_UNLOCK(adapter);
622 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
624 #ifndef IXGBE_LEGACY_TX
625 taskqueue_drain(que->tq, &txr->txq_task);
627 taskqueue_drain(que->tq, &que->que_task);
628 taskqueue_free(que->tq);
632 /* Drain the Link queue */
634 taskqueue_drain(adapter->tq, &adapter->link_task);
635 taskqueue_drain(adapter->tq, &adapter->mod_task);
636 taskqueue_drain(adapter->tq, &adapter->msf_task);
637 taskqueue_drain(adapter->tq, &adapter->phy_task);
639 taskqueue_drain(adapter->tq, &adapter->fdir_task);
641 taskqueue_free(adapter->tq);
644 /* let hardware know driver is unloading */
645 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
646 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
647 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
649 /* Unregister VLAN events */
650 if (adapter->vlan_attach != NULL)
651 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
652 if (adapter->vlan_detach != NULL)
653 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
655 ether_ifdetach(adapter->ifp);
656 callout_drain(&adapter->timer);
658 netmap_detach(adapter->ifp);
659 #endif /* DEV_NETMAP */
660 ixgbe_free_pci_resources(adapter);
661 bus_generic_detach(dev);
662 if_free(adapter->ifp);
664 ixgbe_free_transmit_structures(adapter);
665 ixgbe_free_receive_structures(adapter);
666 free(adapter->mta, M_DEVBUF);
668 IXGBE_CORE_LOCK_DESTROY(adapter);
672 /*********************************************************************
674 * Shutdown entry point
676 **********************************************************************/
679 ixgbe_shutdown(device_t dev)
681 struct adapter *adapter = device_get_softc(dev);
684 INIT_DEBUGOUT("ixgbe_shutdown: begin");
686 IXGBE_CORE_LOCK(adapter);
687 error = ixgbe_setup_low_power_mode(adapter);
688 IXGBE_CORE_UNLOCK(adapter);
694 * Methods for going from:
695 * D0 -> D3: ixgbe_suspend
696 * D3 -> D0: ixgbe_resume
699 ixgbe_suspend(device_t dev)
701 struct adapter *adapter = device_get_softc(dev);
704 INIT_DEBUGOUT("ixgbe_suspend: begin");
706 IXGBE_CORE_LOCK(adapter);
708 error = ixgbe_setup_low_power_mode(adapter);
710 /* Save state and power down */
712 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
714 IXGBE_CORE_UNLOCK(adapter);
720 ixgbe_resume(device_t dev)
722 struct adapter *adapter = device_get_softc(dev);
723 struct ifnet *ifp = adapter->ifp;
724 struct ixgbe_hw *hw = &adapter->hw;
727 INIT_DEBUGOUT("ixgbe_resume: begin");
729 IXGBE_CORE_LOCK(adapter);
731 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
732 pci_restore_state(dev);
734 /* Read & clear WUS register */
735 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
737 device_printf(dev, "Woken up by (WUS): %#010x\n",
738 IXGBE_READ_REG(hw, IXGBE_WUS));
739 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
740 /* And clear WUFC until next low-power transition */
741 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
744 * Required after D3->D0 transition;
745 * will re-advertise all previous advertised speeds
747 if (ifp->if_flags & IFF_UP)
748 ixgbe_init_locked(adapter);
750 IXGBE_CORE_UNLOCK(adapter);
752 INIT_DEBUGOUT("ixgbe_resume: end");
757 /*********************************************************************
760 * ixgbe_ioctl is called when the user wants to configure the
763 * return 0 on success, positive on failure
764 **********************************************************************/
767 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
769 struct adapter *adapter = ifp->if_softc;
770 struct ifreq *ifr = (struct ifreq *) data;
771 #if defined(INET) || defined(INET6)
772 struct ifaddr *ifa = (struct ifaddr *)data;
773 bool avoid_reset = FALSE;
781 if (ifa->ifa_addr->sa_family == AF_INET)
785 if (ifa->ifa_addr->sa_family == AF_INET6)
788 #if defined(INET) || defined(INET6)
790 ** Calling init results in link renegotiation,
791 ** so we avoid doing it when possible.
794 ifp->if_flags |= IFF_UP;
795 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
797 if (!(ifp->if_flags & IFF_NOARP))
798 arp_ifinit(ifp, ifa);
800 error = ether_ioctl(ifp, command, data);
804 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
805 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
808 IXGBE_CORE_LOCK(adapter);
809 ifp->if_mtu = ifr->ifr_mtu;
810 adapter->max_frame_size =
811 ifp->if_mtu + IXGBE_MTU_HDR;
812 ixgbe_init_locked(adapter);
813 IXGBE_CORE_UNLOCK(adapter);
817 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
818 IXGBE_CORE_LOCK(adapter);
819 if (ifp->if_flags & IFF_UP) {
820 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
821 if ((ifp->if_flags ^ adapter->if_flags) &
822 (IFF_PROMISC | IFF_ALLMULTI)) {
823 ixgbe_set_promisc(adapter);
826 ixgbe_init_locked(adapter);
828 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
830 adapter->if_flags = ifp->if_flags;
831 IXGBE_CORE_UNLOCK(adapter);
835 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
836 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
837 IXGBE_CORE_LOCK(adapter);
838 ixgbe_disable_intr(adapter);
839 ixgbe_set_multi(adapter);
840 ixgbe_enable_intr(adapter);
841 IXGBE_CORE_UNLOCK(adapter);
846 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
847 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
851 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
852 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
853 if (mask & IFCAP_HWCSUM)
854 ifp->if_capenable ^= IFCAP_HWCSUM;
855 if (mask & IFCAP_TSO4)
856 ifp->if_capenable ^= IFCAP_TSO4;
857 if (mask & IFCAP_TSO6)
858 ifp->if_capenable ^= IFCAP_TSO6;
859 if (mask & IFCAP_LRO)
860 ifp->if_capenable ^= IFCAP_LRO;
861 if (mask & IFCAP_VLAN_HWTAGGING)
862 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
863 if (mask & IFCAP_VLAN_HWFILTER)
864 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
865 if (mask & IFCAP_VLAN_HWTSO)
866 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
867 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
868 IXGBE_CORE_LOCK(adapter);
869 ixgbe_init_locked(adapter);
870 IXGBE_CORE_UNLOCK(adapter);
872 VLAN_CAPABILITIES(ifp);
875 #if __FreeBSD_version >= 1100036
878 struct ixgbe_hw *hw = &adapter->hw;
881 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
882 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
885 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
889 if (i2c.len > sizeof(i2c.data)) {
894 for (i = 0; i < i2c.len; i++)
895 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
896 i2c.dev_addr, &i2c.data[i]);
897 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
902 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
903 error = ether_ioctl(ifp, command, data);
910 /*********************************************************************
913 * This routine is used in two ways. It is used by the stack as
914 * init entry point in network interface structure. It is also used
915 * by the driver as a hw/sw initialization routine to get to a
918 * return 0 on success, positive on failure
919 **********************************************************************/
920 #define IXGBE_MHADD_MFS_SHIFT 16
923 ixgbe_init_locked(struct adapter *adapter)
925 struct ifnet *ifp = adapter->ifp;
926 device_t dev = adapter->dev;
927 struct ixgbe_hw *hw = &adapter->hw;
928 u32 k, txdctl, mhadd, gpie;
931 mtx_assert(&adapter->core_mtx, MA_OWNED);
932 INIT_DEBUGOUT("ixgbe_init_locked: begin");
933 hw->adapter_stopped = FALSE;
934 ixgbe_stop_adapter(hw);
935 callout_stop(&adapter->timer);
937 /* reprogram the RAR[0] in case user changed it. */
938 ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
940 /* Get the latest mac address, User can use a LAA */
941 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
942 IXGBE_ETH_LENGTH_OF_ADDRESS);
943 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
944 hw->addr_ctrl.rar_used_count = 1;
946 /* Set the various hardware offload abilities */
947 ifp->if_hwassist = 0;
948 if (ifp->if_capenable & IFCAP_TSO)
949 ifp->if_hwassist |= CSUM_TSO;
950 if (ifp->if_capenable & IFCAP_TXCSUM) {
951 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
952 #if __FreeBSD_version >= 800000
953 if (hw->mac.type != ixgbe_mac_82598EB)
954 ifp->if_hwassist |= CSUM_SCTP;
958 /* Prepare transmit descriptors and buffers */
959 if (ixgbe_setup_transmit_structures(adapter)) {
960 device_printf(dev, "Could not setup transmit structures\n");
966 ixgbe_initialize_transmit_units(adapter);
968 /* Setup Multicast table */
969 ixgbe_set_multi(adapter);
972 ** Determine the correct mbuf pool
973 ** for doing jumbo frames
975 if (adapter->max_frame_size <= 2048)
976 adapter->rx_mbuf_sz = MCLBYTES;
977 else if (adapter->max_frame_size <= 4096)
978 adapter->rx_mbuf_sz = MJUMPAGESIZE;
979 else if (adapter->max_frame_size <= 9216)
980 adapter->rx_mbuf_sz = MJUM9BYTES;
982 adapter->rx_mbuf_sz = MJUM16BYTES;
984 /* Prepare receive descriptors and buffers */
985 if (ixgbe_setup_receive_structures(adapter)) {
986 device_printf(dev, "Could not setup receive structures\n");
991 /* Configure RX settings */
992 ixgbe_initialize_receive_units(adapter);
994 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
996 /* Enable Fan Failure Interrupt */
997 gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
999 /* Add for Module detection */
1000 if (hw->mac.type == ixgbe_mac_82599EB)
1001 gpie |= IXGBE_SDP2_GPIEN;
1004 * Thermal Failure Detection (X540)
1005 * Link Detection (X552)
1007 if (hw->mac.type == ixgbe_mac_X540 ||
1008 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1009 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1010 gpie |= IXGBE_SDP0_GPIEN_X540;
1012 if (adapter->msix > 1) {
1013 /* Enable Enhanced MSIX mode */
1014 gpie |= IXGBE_GPIE_MSIX_MODE;
1015 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1018 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1021 if (ifp->if_mtu > ETHERMTU) {
1022 /* aka IXGBE_MAXFRS on 82599 and newer */
1023 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1024 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1025 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1026 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1029 /* Now enable all the queues */
1030 for (int i = 0; i < adapter->num_queues; i++) {
1031 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1032 txdctl |= IXGBE_TXDCTL_ENABLE;
1033 /* Set WTHRESH to 8, burst writeback */
1034 txdctl |= (8 << 16);
1036 * When the internal queue falls below PTHRESH (32),
1037 * start prefetching as long as there are at least
1038 * HTHRESH (1) buffers ready. The values are taken
1039 * from the Intel linux driver 3.8.21.
1040 * Prefetching enables tx line rate even with 1 queue.
1042 txdctl |= (32 << 0) | (1 << 8);
1043 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1046 for (int i = 0; i < adapter->num_queues; i++) {
1047 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1048 if (hw->mac.type == ixgbe_mac_82598EB) {
1054 rxdctl &= ~0x3FFFFF;
1057 rxdctl |= IXGBE_RXDCTL_ENABLE;
1058 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1059 for (k = 0; k < 10; k++) {
1060 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1061 IXGBE_RXDCTL_ENABLE)
1069 * In netmap mode, we must preserve the buffers made
1070 * available to userspace before the if_init()
1071 * (this is true by default on the TX side, because
1072 * init makes all buffers available to userspace).
1074 * netmap_reset() and the device specific routines
1075 * (e.g. ixgbe_setup_receive_rings()) map these
1076 * buffers at the end of the NIC ring, so here we
1077 * must set the RDT (tail) register to make sure
1078 * they are not overwritten.
1080 * In this driver the NIC ring starts at RDH = 0,
1081 * RDT points to the last slot available for reception (?),
1082 * so RDT = num_rx_desc - 1 means the whole ring is available.
1084 if (ifp->if_capenable & IFCAP_NETMAP) {
1085 struct netmap_adapter *na = NA(adapter->ifp);
1086 struct netmap_kring *kring = &na->rx_rings[i];
1087 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1089 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1091 #endif /* DEV_NETMAP */
1092 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1095 /* Enable Receive engine */
1096 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1097 if (hw->mac.type == ixgbe_mac_82598EB)
1098 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1099 rxctrl |= IXGBE_RXCTRL_RXEN;
1100 ixgbe_enable_rx_dma(hw, rxctrl);
1102 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1104 /* Set up MSI/X routing */
1105 if (ixgbe_enable_msix) {
1106 ixgbe_configure_ivars(adapter);
1107 /* Set up auto-mask */
1108 if (hw->mac.type == ixgbe_mac_82598EB)
1109 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1111 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1112 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1114 } else { /* Simple settings for Legacy/MSI */
1115 ixgbe_set_ivar(adapter, 0, 0, 0);
1116 ixgbe_set_ivar(adapter, 0, 0, 1);
1117 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1121 /* Init Flow director */
1122 if (hw->mac.type != ixgbe_mac_82598EB) {
1123 u32 hdrm = 32 << fdir_pballoc;
1125 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1126 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1131 ** Check on any SFP devices that
1132 ** need to be kick-started
1134 if (hw->phy.type == ixgbe_phy_none) {
1135 int err = hw->phy.ops.identify(hw);
1136 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1138 "Unsupported SFP+ module type was detected.\n");
1143 /* Set moderation on the Link interrupt */
1144 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1146 /* Configure Energy Efficient Ethernet for supported devices */
1147 if (adapter->eee_support)
1148 ixgbe_setup_eee(hw, adapter->eee_enabled);
1150 /* Config/Enable Link */
1151 ixgbe_config_link(adapter);
1153 /* Hardware Packet Buffer & Flow Control setup */
1154 ixgbe_config_delay_values(adapter);
1156 /* Initialize the FC settings */
1159 /* Set up VLAN support and filter */
1160 ixgbe_setup_vlan_hw_support(adapter);
1162 /* Setup DMA Coalescing */
1163 ixgbe_config_dmac(adapter);
1165 /* And now turn on interrupts */
1166 ixgbe_enable_intr(adapter);
1168 /* Now inform the stack we're ready */
1169 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1175 ixgbe_init(void *arg)
1177 struct adapter *adapter = arg;
1179 IXGBE_CORE_LOCK(adapter);
1180 ixgbe_init_locked(adapter);
1181 IXGBE_CORE_UNLOCK(adapter);
1186 ixgbe_config_delay_values(struct adapter *adapter)
1188 struct ixgbe_hw *hw = &adapter->hw;
1189 u32 rxpb, frame, size, tmp;
1191 frame = adapter->max_frame_size;
1193 /* Calculate High Water */
1194 switch (hw->mac.type) {
1195 case ixgbe_mac_X540:
1196 case ixgbe_mac_X550:
1197 case ixgbe_mac_X550EM_x:
1198 tmp = IXGBE_DV_X540(frame, frame);
1201 tmp = IXGBE_DV(frame, frame);
1204 size = IXGBE_BT2KB(tmp);
1205 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1206 hw->fc.high_water[0] = rxpb - size;
1208 /* Now calculate Low Water */
1209 switch (hw->mac.type) {
1210 case ixgbe_mac_X540:
1211 case ixgbe_mac_X550:
1212 case ixgbe_mac_X550EM_x:
1213 tmp = IXGBE_LOW_DV_X540(frame);
1216 tmp = IXGBE_LOW_DV(frame);
1219 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1221 hw->fc.requested_mode = adapter->fc;
1222 hw->fc.pause_time = IXGBE_FC_PAUSE;
1223 hw->fc.send_xon = TRUE;
1228 ** MSIX Interrupt Handlers and Tasklets
1233 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1235 struct ixgbe_hw *hw = &adapter->hw;
1236 u64 queue = (u64)(1 << vector);
1239 if (hw->mac.type == ixgbe_mac_82598EB) {
1240 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1241 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1243 mask = (queue & 0xFFFFFFFF);
1245 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1246 mask = (queue >> 32);
1248 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1253 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1255 struct ixgbe_hw *hw = &adapter->hw;
1256 u64 queue = (u64)(1 << vector);
1259 if (hw->mac.type == ixgbe_mac_82598EB) {
1260 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1261 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1263 mask = (queue & 0xFFFFFFFF);
1265 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1266 mask = (queue >> 32);
1268 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1273 ixgbe_handle_que(void *context, int pending)
1275 struct ix_queue *que = context;
1276 struct adapter *adapter = que->adapter;
1277 struct tx_ring *txr = que->txr;
1278 struct ifnet *ifp = adapter->ifp;
1281 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1282 more = ixgbe_rxeof(que);
1285 #ifndef IXGBE_LEGACY_TX
1286 if (!drbr_empty(ifp, txr->br))
1287 ixgbe_mq_start_locked(ifp, txr);
1289 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1290 ixgbe_start_locked(txr, ifp);
1292 IXGBE_TX_UNLOCK(txr);
1295 /* Reenable this interrupt */
1296 if (que->res != NULL)
1297 ixgbe_enable_queue(adapter, que->msix);
1299 ixgbe_enable_intr(adapter);
1304 /*********************************************************************
1306 * Legacy Interrupt Service routine
1308 **********************************************************************/
1311 ixgbe_legacy_irq(void *arg)
1313 struct ix_queue *que = arg;
1314 struct adapter *adapter = que->adapter;
1315 struct ixgbe_hw *hw = &adapter->hw;
1316 struct ifnet *ifp = adapter->ifp;
1317 struct tx_ring *txr = adapter->tx_rings;
1322 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1325 if (reg_eicr == 0) {
1326 ixgbe_enable_intr(adapter);
1330 more = ixgbe_rxeof(que);
1334 #ifdef IXGBE_LEGACY_TX
1335 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1336 ixgbe_start_locked(txr, ifp);
1338 if (!drbr_empty(ifp, txr->br))
1339 ixgbe_mq_start_locked(ifp, txr);
1341 IXGBE_TX_UNLOCK(txr);
1343 /* Check for fan failure */
1344 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1345 (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1346 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1347 "REPLACE IMMEDIATELY!!\n");
1348 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1351 /* Link status change */
1352 if (reg_eicr & IXGBE_EICR_LSC)
1353 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1355 /* External PHY interrupt */
1356 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1357 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1358 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1361 taskqueue_enqueue(que->tq, &que->que_task);
1363 ixgbe_enable_intr(adapter);
1368 /*********************************************************************
1370 * MSIX Queue Interrupt Service routine
1372 **********************************************************************/
1374 ixgbe_msix_que(void *arg)
1376 struct ix_queue *que = arg;
1377 struct adapter *adapter = que->adapter;
1378 struct ifnet *ifp = adapter->ifp;
1379 struct tx_ring *txr = que->txr;
1380 struct rx_ring *rxr = que->rxr;
1384 /* Protect against spurious interrupts */
1385 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1388 ixgbe_disable_queue(adapter, que->msix);
1391 more = ixgbe_rxeof(que);
1395 #ifdef IXGBE_LEGACY_TX
1396 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1397 ixgbe_start_locked(txr, ifp);
1399 if (!drbr_empty(ifp, txr->br))
1400 ixgbe_mq_start_locked(ifp, txr);
1402 IXGBE_TX_UNLOCK(txr);
1406 if (ixgbe_enable_aim == FALSE)
1409 ** Do Adaptive Interrupt Moderation:
1410 ** - Write out last calculated setting
1411 ** - Calculate based on average size over
1412 ** the last interval.
1414 if (que->eitr_setting)
1415 IXGBE_WRITE_REG(&adapter->hw,
1416 IXGBE_EITR(que->msix), que->eitr_setting);
1418 que->eitr_setting = 0;
1420 /* Idle, do nothing */
1421 if ((txr->bytes == 0) && (rxr->bytes == 0))
1424 if ((txr->bytes) && (txr->packets))
1425 newitr = txr->bytes/txr->packets;
1426 if ((rxr->bytes) && (rxr->packets))
1427 newitr = max(newitr,
1428 (rxr->bytes / rxr->packets));
1429 newitr += 24; /* account for hardware frame, crc */
1431 /* set an upper boundary */
1432 newitr = min(newitr, 3000);
1434 /* Be nice to the mid range */
1435 if ((newitr > 300) && (newitr < 1200))
1436 newitr = (newitr / 3);
1438 newitr = (newitr / 2);
1440 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1441 newitr |= newitr << 16;
1443 newitr |= IXGBE_EITR_CNT_WDIS;
1445 /* save for next interrupt */
1446 que->eitr_setting = newitr;
1456 taskqueue_enqueue(que->tq, &que->que_task);
1458 ixgbe_enable_queue(adapter, que->msix);
1464 ixgbe_msix_link(void *arg)
1466 struct adapter *adapter = arg;
1467 struct ixgbe_hw *hw = &adapter->hw;
1468 u32 reg_eicr, mod_mask;
1470 ++adapter->link_irq;
1472 /* First get the cause */
1473 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1474 /* Be sure the queue bits are not cleared */
1475 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1476 /* Clear interrupt with write */
1477 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1479 /* Link status change */
1480 if (reg_eicr & IXGBE_EICR_LSC)
1481 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1483 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1485 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1486 /* This is probably overkill :) */
1487 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1489 /* Disable the interrupt */
1490 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1491 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1494 if (reg_eicr & IXGBE_EICR_ECC) {
1495 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1496 "Please Reboot!!\n");
1497 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1500 /* Check for over temp condition */
1501 if (reg_eicr & IXGBE_EICR_TS) {
1502 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1503 "PHY IS SHUT DOWN!!\n");
1504 device_printf(adapter->dev, "System shutdown required!\n");
1505 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1509 /* Pluggable optics-related interrupt */
1510 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1511 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1513 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1515 if (ixgbe_is_sfp(hw)) {
1516 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1517 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1518 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1519 } else if (reg_eicr & mod_mask) {
1520 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1521 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1525 /* Check for fan failure */
1526 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1527 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1528 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1529 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1530 "REPLACE IMMEDIATELY!!\n");
1533 /* External PHY interrupt */
1534 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1535 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1536 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1537 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1540 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1544 /*********************************************************************
1546 * Media Ioctl callback
1548 * This routine is called whenever the user queries the status of
1549 * the interface using ifconfig.
1551 **********************************************************************/
1553 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1555 struct adapter *adapter = ifp->if_softc;
1556 struct ixgbe_hw *hw = &adapter->hw;
1559 INIT_DEBUGOUT("ixgbe_media_status: begin");
1560 IXGBE_CORE_LOCK(adapter);
1561 ixgbe_update_link_status(adapter);
1563 ifmr->ifm_status = IFM_AVALID;
1564 ifmr->ifm_active = IFM_ETHER;
1566 if (!adapter->link_active) {
1567 IXGBE_CORE_UNLOCK(adapter);
1571 ifmr->ifm_status |= IFM_ACTIVE;
1572 layer = ixgbe_get_supported_physical_layer(hw);
1574 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1575 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1576 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1577 switch (adapter->link_speed) {
1578 case IXGBE_LINK_SPEED_10GB_FULL:
1579 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1581 case IXGBE_LINK_SPEED_1GB_FULL:
1582 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1584 case IXGBE_LINK_SPEED_100_FULL:
1585 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1588 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1589 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1590 switch (adapter->link_speed) {
1591 case IXGBE_LINK_SPEED_10GB_FULL:
1592 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1595 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1596 switch (adapter->link_speed) {
1597 case IXGBE_LINK_SPEED_10GB_FULL:
1598 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1600 case IXGBE_LINK_SPEED_1GB_FULL:
1601 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1604 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1605 switch (adapter->link_speed) {
1606 case IXGBE_LINK_SPEED_10GB_FULL:
1607 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1609 case IXGBE_LINK_SPEED_1GB_FULL:
1610 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1613 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1614 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1615 switch (adapter->link_speed) {
1616 case IXGBE_LINK_SPEED_10GB_FULL:
1617 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1619 case IXGBE_LINK_SPEED_1GB_FULL:
1620 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1623 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1624 switch (adapter->link_speed) {
1625 case IXGBE_LINK_SPEED_10GB_FULL:
1626 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1630 ** XXX: These need to use the proper media types once
1633 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1634 switch (adapter->link_speed) {
1635 case IXGBE_LINK_SPEED_10GB_FULL:
1636 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1638 case IXGBE_LINK_SPEED_2_5GB_FULL:
1639 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1641 case IXGBE_LINK_SPEED_1GB_FULL:
1642 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1645 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1646 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1647 switch (adapter->link_speed) {
1648 case IXGBE_LINK_SPEED_10GB_FULL:
1649 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1651 case IXGBE_LINK_SPEED_2_5GB_FULL:
1652 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1654 case IXGBE_LINK_SPEED_1GB_FULL:
1655 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1659 /* If nothing is recognized... */
1660 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1661 ifmr->ifm_active |= IFM_UNKNOWN;
1663 #if __FreeBSD_version >= 900025
1664 /* Display current flow control setting used on link */
1665 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1666 hw->fc.current_mode == ixgbe_fc_full)
1667 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1668 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1669 hw->fc.current_mode == ixgbe_fc_full)
1670 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1673 IXGBE_CORE_UNLOCK(adapter);
1678 /*********************************************************************
1680 * Media Ioctl callback
1682 * This routine is called when the user changes speed/duplex using
1683 * media/mediopt option with ifconfig.
1685 **********************************************************************/
1687 ixgbe_media_change(struct ifnet * ifp)
1689 struct adapter *adapter = ifp->if_softc;
1690 struct ifmedia *ifm = &adapter->media;
1691 struct ixgbe_hw *hw = &adapter->hw;
1692 ixgbe_link_speed speed = 0;
1694 INIT_DEBUGOUT("ixgbe_media_change: begin");
1696 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1699 if (hw->phy.media_type == ixgbe_media_type_backplane)
1703 ** We don't actually need to check against the supported
1704 ** media types of the adapter; ifmedia will take care of
1707 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1710 speed |= IXGBE_LINK_SPEED_100_FULL;
1712 case IFM_10G_SR: /* KR, too */
1714 case IFM_10G_CX4: /* KX4 */
1715 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1716 case IFM_10G_TWINAX:
1717 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1720 speed |= IXGBE_LINK_SPEED_100_FULL;
1723 case IFM_1000_CX: /* KX */
1724 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1727 speed |= IXGBE_LINK_SPEED_100_FULL;
1733 hw->mac.autotry_restart = TRUE;
1734 hw->mac.ops.setup_link(hw, speed, TRUE);
1735 adapter->advertise =
1736 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1737 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1738 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1743 device_printf(adapter->dev, "Invalid media type!\n");
1748 ixgbe_set_promisc(struct adapter *adapter)
1751 struct ifnet *ifp = adapter->ifp;
1754 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1755 reg_rctl &= (~IXGBE_FCTRL_UPE);
1756 if (ifp->if_flags & IFF_ALLMULTI)
1757 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1759 struct ifmultiaddr *ifma;
1760 #if __FreeBSD_version < 800000
1763 if_maddr_rlock(ifp);
1765 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1766 if (ifma->ifma_addr->sa_family != AF_LINK)
1768 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1772 #if __FreeBSD_version < 800000
1773 IF_ADDR_UNLOCK(ifp);
1775 if_maddr_runlock(ifp);
1778 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1779 reg_rctl &= (~IXGBE_FCTRL_MPE);
1780 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1782 if (ifp->if_flags & IFF_PROMISC) {
1783 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1784 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1785 } else if (ifp->if_flags & IFF_ALLMULTI) {
1786 reg_rctl |= IXGBE_FCTRL_MPE;
1787 reg_rctl &= ~IXGBE_FCTRL_UPE;
1788 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1794 /*********************************************************************
1797 * This routine is called whenever multicast address list is updated.
1799 **********************************************************************/
1800 #define IXGBE_RAR_ENTRIES 16
1803 ixgbe_set_multi(struct adapter *adapter)
1808 struct ifmultiaddr *ifma;
1810 struct ifnet *ifp = adapter->ifp;
1812 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1815 bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1816 MAX_NUM_MULTICAST_ADDRESSES);
1818 #if __FreeBSD_version < 800000
1821 if_maddr_rlock(ifp);
1823 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1824 if (ifma->ifma_addr->sa_family != AF_LINK)
1826 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1828 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1829 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1830 IXGBE_ETH_LENGTH_OF_ADDRESS);
1833 #if __FreeBSD_version < 800000
1834 IF_ADDR_UNLOCK(ifp);
1836 if_maddr_runlock(ifp);
1839 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1840 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1841 if (ifp->if_flags & IFF_PROMISC)
1842 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1843 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1844 ifp->if_flags & IFF_ALLMULTI) {
1845 fctrl |= IXGBE_FCTRL_MPE;
1846 fctrl &= ~IXGBE_FCTRL_UPE;
1848 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1850 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1852 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1854 ixgbe_update_mc_addr_list(&adapter->hw,
1855 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1862 * This is an iterator function now needed by the multicast
1863 * shared code. It simply feeds the shared code routine the
1864 * addresses in the array of ixgbe_set_multi() one by one.
1867 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1869 u8 *addr = *update_ptr;
1873 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1874 *update_ptr = newptr;
1879 /*********************************************************************
1882 * This routine checks for link status,updates statistics,
1883 * and runs the watchdog check.
1885 **********************************************************************/
1888 ixgbe_local_timer(void *arg)
1890 struct adapter *adapter = arg;
1891 device_t dev = adapter->dev;
1892 struct ix_queue *que = adapter->queues;
1896 mtx_assert(&adapter->core_mtx, MA_OWNED);
1898 /* Check for pluggable optics */
1899 if (adapter->sfp_probe)
1900 if (!ixgbe_sfp_probe(adapter))
1901 goto out; /* Nothing to do */
1903 ixgbe_update_link_status(adapter);
1904 ixgbe_update_stats_counters(adapter);
1907 ** Check the TX queues status
1908 ** - mark hung queues so we don't schedule on them
1909 ** - watchdog only if all queues show hung
1911 for (int i = 0; i < adapter->num_queues; i++, que++) {
1912 /* Keep track of queues with work for soft irq */
1914 queues |= ((u64)1 << que->me);
1916 ** Each time txeof runs without cleaning, but there
1917 ** are uncleaned descriptors it increments busy. If
1918 ** we get to the MAX we declare it hung.
1920 if (que->busy == IXGBE_QUEUE_HUNG) {
1922 /* Mark the queue as inactive */
1923 adapter->active_queues &= ~((u64)1 << que->me);
1926 /* Check if we've come back from hung */
1927 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1928 adapter->active_queues |= ((u64)1 << que->me);
1930 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1931 device_printf(dev,"Warning queue %d "
1932 "appears to be hung!\n", i);
1933 que->txr->busy = IXGBE_QUEUE_HUNG;
1939 /* Only truly watchdog if all queues show hung */
1940 if (hung == adapter->num_queues)
1942 else if (queues != 0) { /* Force an IRQ on queues with work */
1943 ixgbe_rearm_queues(adapter, queues);
1947 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1951 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1952 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1953 adapter->watchdog_events++;
1954 ixgbe_init_locked(adapter);
1958 ** Note: this routine updates the OS on the link state
1959 ** the real check of the hardware only happens with
1960 ** a link interrupt.
1963 ixgbe_update_link_status(struct adapter *adapter)
1965 struct ifnet *ifp = adapter->ifp;
1966 device_t dev = adapter->dev;
1968 if (adapter->link_up){
1969 if (adapter->link_active == FALSE) {
1971 device_printf(dev,"Link is up %d Gbps %s \n",
1972 ((adapter->link_speed == 128)? 10:1),
1974 adapter->link_active = TRUE;
1975 /* Update any Flow Control changes */
1976 ixgbe_fc_enable(&adapter->hw);
1977 /* Update DMA coalescing config */
1978 ixgbe_config_dmac(adapter);
1979 if_link_state_change(ifp, LINK_STATE_UP);
1981 } else { /* Link down */
1982 if (adapter->link_active == TRUE) {
1984 device_printf(dev,"Link is Down\n");
1985 if_link_state_change(ifp, LINK_STATE_DOWN);
1986 adapter->link_active = FALSE;
1994 /*********************************************************************
1996 * This routine disables all traffic on the adapter by issuing a
1997 * global reset on the MAC and deallocates TX/RX buffers.
1999 **********************************************************************/
2002 ixgbe_stop(void *arg)
2005 struct adapter *adapter = arg;
2006 struct ixgbe_hw *hw = &adapter->hw;
2009 mtx_assert(&adapter->core_mtx, MA_OWNED);
2011 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2012 ixgbe_disable_intr(adapter);
2013 callout_stop(&adapter->timer);
2015 /* Let the stack know...*/
2016 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2019 hw->adapter_stopped = FALSE;
2020 ixgbe_stop_adapter(hw);
2021 if (hw->mac.type == ixgbe_mac_82599EB)
2022 ixgbe_stop_mac_link_on_d3_82599(hw);
2023 /* Turn off the laser - noop with no optics */
2024 ixgbe_disable_tx_laser(hw);
2026 /* Update the stack */
2027 adapter->link_up = FALSE;
2028 ixgbe_update_link_status(adapter);
2030 /* reprogram the RAR[0] in case user changed it. */
2031 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2037 /*********************************************************************
2039 * Determine hardware revision.
2041 **********************************************************************/
2043 ixgbe_identify_hardware(struct adapter *adapter)
2045 device_t dev = adapter->dev;
2046 struct ixgbe_hw *hw = &adapter->hw;
2048 /* Save off the information about this board */
2049 hw->vendor_id = pci_get_vendor(dev);
2050 hw->device_id = pci_get_device(dev);
2051 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2052 hw->subsystem_vendor_id =
2053 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2054 hw->subsystem_device_id =
2055 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2058 ** Make sure BUSMASTER is set
2060 pci_enable_busmaster(dev);
2062 /* We need this here to set the num_segs below */
2063 ixgbe_set_mac_type(hw);
2065 /* Pick up the 82599 settings */
2066 if (hw->mac.type != ixgbe_mac_82598EB) {
2067 hw->phy.smart_speed = ixgbe_smart_speed;
2068 adapter->num_segs = IXGBE_82599_SCATTER;
2070 adapter->num_segs = IXGBE_82598_SCATTER;
2075 /*********************************************************************
2077 * Determine optic type
2079 **********************************************************************/
2081 ixgbe_setup_optics(struct adapter *adapter)
2083 struct ixgbe_hw *hw = &adapter->hw;
2086 layer = ixgbe_get_supported_physical_layer(hw);
2088 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2089 adapter->optics = IFM_10G_T;
2093 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2094 adapter->optics = IFM_1000_T;
2098 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2099 adapter->optics = IFM_1000_SX;
2103 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2104 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2105 adapter->optics = IFM_10G_LR;
2109 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2110 adapter->optics = IFM_10G_SR;
2114 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2115 adapter->optics = IFM_10G_TWINAX;
2119 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2120 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2121 adapter->optics = IFM_10G_CX4;
2125 /* If we get here just set the default */
2126 adapter->optics = IFM_ETHER | IFM_AUTO;
2130 /*********************************************************************
2132 * Setup the Legacy or MSI Interrupt handler
2134 **********************************************************************/
2136 ixgbe_allocate_legacy(struct adapter *adapter)
2138 device_t dev = adapter->dev;
2139 struct ix_queue *que = adapter->queues;
2140 #ifndef IXGBE_LEGACY_TX
2141 struct tx_ring *txr = adapter->tx_rings;
2146 if (adapter->msix == 1)
2149 /* We allocate a single interrupt resource */
2150 adapter->res = bus_alloc_resource_any(dev,
2151 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2152 if (adapter->res == NULL) {
2153 device_printf(dev, "Unable to allocate bus resource: "
2159 * Try allocating a fast interrupt and the associated deferred
2160 * processing contexts.
2162 #ifndef IXGBE_LEGACY_TX
2163 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2165 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2166 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2167 taskqueue_thread_enqueue, &que->tq);
2168 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2169 device_get_nameunit(adapter->dev));
2171 /* Tasklets for Link, SFP and Multispeed Fiber */
2172 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2173 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2174 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2175 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2177 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2179 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2180 taskqueue_thread_enqueue, &adapter->tq);
2181 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2182 device_get_nameunit(adapter->dev));
2184 if ((error = bus_setup_intr(dev, adapter->res,
2185 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2186 que, &adapter->tag)) != 0) {
2187 device_printf(dev, "Failed to register fast interrupt "
2188 "handler: %d\n", error);
2189 taskqueue_free(que->tq);
2190 taskqueue_free(adapter->tq);
2195 /* For simplicity in the handlers */
2196 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2202 /*********************************************************************
2204 * Setup MSIX Interrupt resources and handlers
2206 **********************************************************************/
2208 ixgbe_allocate_msix(struct adapter *adapter)
2210 device_t dev = adapter->dev;
2211 struct ix_queue *que = adapter->queues;
2212 struct tx_ring *txr = adapter->tx_rings;
2213 int error, rid, vector = 0;
2221 * If we're doing RSS, the number of queues needs to
2222 * match the number of RSS buckets that are configured.
2224 * + If there's more queues than RSS buckets, we'll end
2225 * up with queues that get no traffic.
2227 * + If there's more RSS buckets than queues, we'll end
2228 * up having multiple RSS buckets map to the same queue,
2229 * so there'll be some contention.
2231 if (adapter->num_queues != rss_getnumbuckets()) {
2233 "%s: number of queues (%d) != number of RSS buckets (%d)"
2234 "; performance will be impacted.\n",
2236 adapter->num_queues,
2237 rss_getnumbuckets());
2241 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2243 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2244 RF_SHAREABLE | RF_ACTIVE);
2245 if (que->res == NULL) {
2246 device_printf(dev,"Unable to allocate"
2247 " bus resource: que interrupt [%d]\n", vector);
2250 /* Set the handler function */
2251 error = bus_setup_intr(dev, que->res,
2252 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2253 ixgbe_msix_que, que, &que->tag);
2256 device_printf(dev, "Failed to register QUE handler");
2259 #if __FreeBSD_version >= 800504
2260 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2263 adapter->active_queues |= (u64)(1 << que->msix);
2266 * The queue ID is used as the RSS layer bucket ID.
2267 * We look up the queue ID -> RSS CPU ID and select
2270 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2273 * Bind the msix vector, and thus the
2274 * rings to the corresponding cpu.
2276 * This just happens to match the default RSS round-robin
2277 * bucket -> queue -> CPU allocation.
2279 if (adapter->num_queues > 1)
2282 if (adapter->num_queues > 1)
2283 bus_bind_intr(dev, que->res, cpu_id);
2287 "Bound RSS bucket %d to CPU %d\n",
2292 "Bound queue %d to cpu %d\n",
2295 #ifndef IXGBE_LEGACY_TX
2296 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2298 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2299 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2300 taskqueue_thread_enqueue, &que->tq);
2302 CPU_SETOF(cpu_id, &cpu_mask);
2303 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2306 device_get_nameunit(adapter->dev),
2309 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2310 device_get_nameunit(adapter->dev));
2316 adapter->res = bus_alloc_resource_any(dev,
2317 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2318 if (!adapter->res) {
2319 device_printf(dev,"Unable to allocate"
2320 " bus resource: Link interrupt [%d]\n", rid);
2323 /* Set the link handler function */
2324 error = bus_setup_intr(dev, adapter->res,
2325 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2326 ixgbe_msix_link, adapter, &adapter->tag);
2328 adapter->res = NULL;
2329 device_printf(dev, "Failed to register LINK handler");
2332 #if __FreeBSD_version >= 800504
2333 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2335 adapter->vector = vector;
2336 /* Tasklets for Link, SFP and Multispeed Fiber */
2337 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2338 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2339 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2340 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2342 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2344 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2345 taskqueue_thread_enqueue, &adapter->tq);
2346 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2347 device_get_nameunit(adapter->dev));
2353 * Setup Either MSI/X or MSI
2356 ixgbe_setup_msix(struct adapter *adapter)
2358 device_t dev = adapter->dev;
2359 int rid, want, queues, msgs;
2361 /* Override by tuneable */
2362 if (ixgbe_enable_msix == 0)
2365 /* First try MSI/X */
2366 msgs = pci_msix_count(dev);
2369 rid = PCIR_BAR(MSIX_82598_BAR);
2370 adapter->msix_mem = bus_alloc_resource_any(dev,
2371 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2372 if (adapter->msix_mem == NULL) {
2373 rid += 4; /* 82599 maps in higher BAR */
2374 adapter->msix_mem = bus_alloc_resource_any(dev,
2375 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2377 if (adapter->msix_mem == NULL) {
2378 /* May not be enabled */
2379 device_printf(adapter->dev,
2380 "Unable to map MSIX table \n");
2384 /* Figure out a reasonable auto config value */
2385 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2388 /* If we're doing RSS, clamp at the number of RSS buckets */
2389 if (queues > rss_getnumbuckets())
2390 queues = rss_getnumbuckets();
2393 if (ixgbe_num_queues != 0)
2394 queues = ixgbe_num_queues;
2396 /* reflect correct sysctl value */
2397 ixgbe_num_queues = queues;
2400 ** Want one vector (RX/TX pair) per queue
2401 ** plus an additional for Link.
2407 device_printf(adapter->dev,
2408 "MSIX Configuration Problem, "
2409 "%d vectors but %d queues wanted!\n",
2413 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2414 device_printf(adapter->dev,
2415 "Using MSIX interrupts with %d vectors\n", msgs);
2416 adapter->num_queues = queues;
2420 ** If MSIX alloc failed or provided us with
2421 ** less than needed, free and fall through to MSI
2423 pci_release_msi(dev);
2426 if (adapter->msix_mem != NULL) {
2427 bus_release_resource(dev, SYS_RES_MEMORY,
2428 rid, adapter->msix_mem);
2429 adapter->msix_mem = NULL;
2432 if (pci_alloc_msi(dev, &msgs) == 0) {
2433 device_printf(adapter->dev,"Using an MSI interrupt\n");
2436 device_printf(adapter->dev,"Using a Legacy interrupt\n");
2442 ixgbe_allocate_pci_resources(struct adapter *adapter)
2445 device_t dev = adapter->dev;
2448 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2451 if (!(adapter->pci_mem)) {
2452 device_printf(dev,"Unable to allocate bus resource: memory\n");
2456 adapter->osdep.mem_bus_space_tag =
2457 rman_get_bustag(adapter->pci_mem);
2458 adapter->osdep.mem_bus_space_handle =
2459 rman_get_bushandle(adapter->pci_mem);
2460 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2462 /* Legacy defaults */
2463 adapter->num_queues = 1;
2464 adapter->hw.back = &adapter->osdep;
2467 ** Now setup MSI or MSI/X, should
2468 ** return us the number of supported
2469 ** vectors. (Will be 1 for MSI)
2471 adapter->msix = ixgbe_setup_msix(adapter);
2476 ixgbe_free_pci_resources(struct adapter * adapter)
2478 struct ix_queue *que = adapter->queues;
2479 device_t dev = adapter->dev;
2482 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2483 memrid = PCIR_BAR(MSIX_82598_BAR);
2485 memrid = PCIR_BAR(MSIX_82599_BAR);
2488 ** There is a slight possibility of a failure mode
2489 ** in attach that will result in entering this function
2490 ** before interrupt resources have been initialized, and
2491 ** in that case we do not want to execute the loops below
2492 ** We can detect this reliably by the state of the adapter
2495 if (adapter->res == NULL)
2499 ** Release all msix queue resources:
2501 for (int i = 0; i < adapter->num_queues; i++, que++) {
2502 rid = que->msix + 1;
2503 if (que->tag != NULL) {
2504 bus_teardown_intr(dev, que->res, que->tag);
2507 if (que->res != NULL)
2508 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2512 /* Clean the Legacy or Link interrupt last */
2513 if (adapter->vector) /* we are doing MSIX */
2514 rid = adapter->vector + 1;
2516 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2518 if (adapter->tag != NULL) {
2519 bus_teardown_intr(dev, adapter->res, adapter->tag);
2520 adapter->tag = NULL;
2522 if (adapter->res != NULL)
2523 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2527 pci_release_msi(dev);
2529 if (adapter->msix_mem != NULL)
2530 bus_release_resource(dev, SYS_RES_MEMORY,
2531 memrid, adapter->msix_mem);
2533 if (adapter->pci_mem != NULL)
2534 bus_release_resource(dev, SYS_RES_MEMORY,
2535 PCIR_BAR(0), adapter->pci_mem);
2540 /*********************************************************************
2542 * Setup networking device structure and register an interface.
2544 **********************************************************************/
2546 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2550 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2552 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2554 device_printf(dev, "can not allocate ifnet structure\n");
2557 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2558 ifp->if_baudrate = IF_Gbps(10);
2559 ifp->if_init = ixgbe_init;
2560 ifp->if_softc = adapter;
2561 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2562 ifp->if_ioctl = ixgbe_ioctl;
2563 #if __FreeBSD_version >= 1100036
2564 if_setgetcounterfn(ifp, ixgbe_get_counter);
2566 #if __FreeBSD_version >= 1100045
2567 /* TSO parameters */
2568 ifp->if_hw_tsomax = 65518;
2569 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2570 ifp->if_hw_tsomaxsegsize = 2048;
2572 #ifndef IXGBE_LEGACY_TX
2573 ifp->if_transmit = ixgbe_mq_start;
2574 ifp->if_qflush = ixgbe_qflush;
2576 ifp->if_start = ixgbe_start;
2577 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2578 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2579 IFQ_SET_READY(&ifp->if_snd);
2582 ether_ifattach(ifp, adapter->hw.mac.addr);
2584 adapter->max_frame_size =
2585 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2588 * Tell the upper layer(s) we support long frames.
2590 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2592 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2593 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2594 ifp->if_capabilities |= IFCAP_LRO;
2595 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2599 ifp->if_capenable = ifp->if_capabilities;
2602 ** Don't turn this on by default, if vlans are
2603 ** created on another pseudo device (eg. lagg)
2604 ** then vlan events are not passed thru, breaking
2605 ** operation, but with HW FILTER off it works. If
2606 ** using vlans directly on the ixgbe driver you can
2607 ** enable this and get full hardware tag filtering.
2609 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2612 * Specify the media types supported by this adapter and register
2613 * callbacks to update media and link information
2615 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2616 ixgbe_media_status);
2618 ixgbe_add_media_types(adapter);
2620 /* Autoselect media by default */
2621 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2627 ixgbe_add_media_types(struct adapter *adapter)
2629 struct ixgbe_hw *hw = &adapter->hw;
2630 device_t dev = adapter->dev;
2633 layer = ixgbe_get_supported_physical_layer(hw);
2635 /* Media types with matching FreeBSD media defines */
2636 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2637 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2638 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2639 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2640 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2641 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2643 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2644 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2645 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2647 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2648 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2649 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2650 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2651 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2652 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2653 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2654 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2657 ** Other (no matching FreeBSD media type):
2658 ** To workaround this, we'll assign these completely
2659 ** inappropriate media types.
2661 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2662 device_printf(dev, "Media supported: 10GbaseKR\n");
2663 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2664 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2666 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2667 device_printf(dev, "Media supported: 10GbaseKX4\n");
2668 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2669 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2671 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2672 device_printf(dev, "Media supported: 1000baseKX\n");
2673 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2674 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2676 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2677 /* Someday, someone will care about you... */
2678 device_printf(dev, "Media supported: 1000baseBX\n");
2681 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2682 ifmedia_add(&adapter->media,
2683 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2684 ifmedia_add(&adapter->media,
2685 IFM_ETHER | IFM_1000_T, 0, NULL);
2688 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2692 ixgbe_config_link(struct adapter *adapter)
2694 struct ixgbe_hw *hw = &adapter->hw;
2695 u32 autoneg, err = 0;
2696 bool sfp, negotiate;
2698 sfp = ixgbe_is_sfp(hw);
2701 if (hw->phy.multispeed_fiber) {
2702 hw->mac.ops.setup_sfp(hw);
2703 ixgbe_enable_tx_laser(hw);
2704 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2706 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2708 if (hw->mac.ops.check_link)
2709 err = ixgbe_check_link(hw, &adapter->link_speed,
2710 &adapter->link_up, FALSE);
2713 autoneg = hw->phy.autoneg_advertised;
2714 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2715 err = hw->mac.ops.get_link_capabilities(hw,
2716 &autoneg, &negotiate);
2719 if (hw->mac.ops.setup_link)
2720 err = hw->mac.ops.setup_link(hw,
2721 autoneg, adapter->link_up);
2728 /*********************************************************************
2730 * Enable transmit units.
2732 **********************************************************************/
2734 ixgbe_initialize_transmit_units(struct adapter *adapter)
2736 struct tx_ring *txr = adapter->tx_rings;
2737 struct ixgbe_hw *hw = &adapter->hw;
2739 /* Setup the Base and Length of the Tx Descriptor Ring */
2741 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2742 u64 tdba = txr->txdma.dma_paddr;
2745 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2746 (tdba & 0x00000000ffffffffULL));
2747 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2748 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2749 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2751 /* Setup the HW Tx Head and Tail descriptor pointers */
2752 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2753 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2755 /* Cache the tail address */
2756 txr->tail = IXGBE_TDT(txr->me);
2758 /* Set the processing limit */
2759 txr->process_limit = ixgbe_tx_process_limit;
2761 /* Disable Head Writeback */
2762 switch (hw->mac.type) {
2763 case ixgbe_mac_82598EB:
2764 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2766 case ixgbe_mac_82599EB:
2767 case ixgbe_mac_X540:
2769 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2772 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2773 switch (hw->mac.type) {
2774 case ixgbe_mac_82598EB:
2775 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2777 case ixgbe_mac_82599EB:
2778 case ixgbe_mac_X540:
2780 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2786 if (hw->mac.type != ixgbe_mac_82598EB) {
2787 u32 dmatxctl, rttdcs;
2788 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2789 dmatxctl |= IXGBE_DMATXCTL_TE;
2790 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2791 /* Disable arbiter to set MTQC */
2792 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2793 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2794 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2795 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2796 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2797 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2804 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2806 struct ixgbe_hw *hw = &adapter->hw;
2808 int i, j, queue_id, table_size;
2810 uint32_t rss_key[10];
2813 uint32_t rss_hash_config;
2820 /* Fetch the configured RSS key */
2821 rss_getkey((uint8_t *) &rss_key);
2823 /* set up random bits */
2824 arc4rand(&rss_key, sizeof(rss_key), 0);
2827 /* Set multiplier for RETA setup and table size based on MAC */
2830 switch (adapter->hw.mac.type) {
2831 case ixgbe_mac_82598EB:
2834 case ixgbe_mac_X550:
2835 case ixgbe_mac_X550EM_x:
2842 /* Set up the redirection table */
2843 for (i = 0, j = 0; i < table_size; i++, j++) {
2844 if (j == adapter->num_queues) j = 0;
2847 * Fetch the RSS bucket id for the given indirection entry.
2848 * Cap it at the number of configured buckets (which is
2851 queue_id = rss_get_indirection_to_bucket(i);
2852 queue_id = queue_id % adapter->num_queues;
2854 queue_id = (j * index_mult);
2857 * The low 8 bits are for hash value (n+0);
2858 * The next 8 bits are for hash value (n+1), etc.
2861 reta = reta | ( ((uint32_t) queue_id) << 24);
2864 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2866 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2871 /* Now fill our hash function seeds */
2872 for (int i = 0; i < 10; i++)
2873 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2875 /* Perform hash on these packet types */
2877 mrqc = IXGBE_MRQC_RSSEN;
2878 rss_hash_config = rss_gethashconfig();
2879 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2880 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2881 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2882 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2883 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2884 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2885 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2886 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2887 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2888 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2889 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
2890 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2891 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2892 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2893 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
2894 device_printf(adapter->dev,
2895 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
2896 "but not supported\n", __func__);
2897 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2898 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2899 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
2900 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2903 * Disable UDP - IP fragments aren't currently being handled
2904 * and so we end up with a mix of 2-tuple and 4-tuple
2907 mrqc = IXGBE_MRQC_RSSEN
2908 | IXGBE_MRQC_RSS_FIELD_IPV4
2909 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2911 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2913 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2914 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2915 | IXGBE_MRQC_RSS_FIELD_IPV6
2916 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2918 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2919 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2923 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2927 /*********************************************************************
2929 * Setup receive registers and features.
2931 **********************************************************************/
2932 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2934 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2937 ixgbe_initialize_receive_units(struct adapter *adapter)
2939 struct rx_ring *rxr = adapter->rx_rings;
2940 struct ixgbe_hw *hw = &adapter->hw;
2941 struct ifnet *ifp = adapter->ifp;
2942 u32 bufsz, fctrl, srrctl, rxcsum;
2947 * Make sure receives are disabled while
2948 * setting up the descriptor ring
2950 ixgbe_disable_rx(hw);
2952 /* Enable broadcasts */
2953 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2954 fctrl |= IXGBE_FCTRL_BAM;
2955 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2956 fctrl |= IXGBE_FCTRL_DPF;
2957 fctrl |= IXGBE_FCTRL_PMCF;
2959 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2961 /* Set for Jumbo Frames? */
2962 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2963 if (ifp->if_mtu > ETHERMTU)
2964 hlreg |= IXGBE_HLREG0_JUMBOEN;
2966 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2968 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2969 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2970 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2972 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2973 #endif /* DEV_NETMAP */
2974 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2976 bufsz = (adapter->rx_mbuf_sz +
2977 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2979 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2980 u64 rdba = rxr->rxdma.dma_paddr;
2982 /* Setup the Base and Length of the Rx Descriptor Ring */
2983 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2984 (rdba & 0x00000000ffffffffULL));
2985 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2986 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2987 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2989 /* Set up the SRRCTL register */
2990 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2991 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2992 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2994 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2997 * Set DROP_EN iff we have no flow control and >1 queue.
2998 * Note that srrctl was cleared shortly before during reset,
2999 * so we do not need to clear the bit, but do it just in case
3000 * this code is moved elsewhere.
3002 if (adapter->num_queues > 1 &&
3003 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3004 srrctl |= IXGBE_SRRCTL_DROP_EN;
3006 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3009 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
3011 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3012 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
3013 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3015 /* Set the processing limit */
3016 rxr->process_limit = ixgbe_rx_process_limit;
3018 /* Set the driver rx tail address */
3019 rxr->tail = IXGBE_RDT(rxr->me);
3022 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3023 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3024 IXGBE_PSRTYPE_UDPHDR |
3025 IXGBE_PSRTYPE_IPV4HDR |
3026 IXGBE_PSRTYPE_IPV6HDR;
3027 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3030 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3032 ixgbe_initialise_rss_mapping(adapter);
3034 if (adapter->num_queues > 1) {
3035 /* RSS and RX IPP Checksum are mutually exclusive */
3036 rxcsum |= IXGBE_RXCSUM_PCSD;
3039 if (ifp->if_capenable & IFCAP_RXCSUM)
3040 rxcsum |= IXGBE_RXCSUM_PCSD;
3042 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3043 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3045 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3052 ** This routine is run via an vlan config EVENT,
3053 ** it enables us to use the HW Filter table since
3054 ** we can get the vlan id. This just creates the
3055 ** entry in the soft version of the VFTA, init will
3056 ** repopulate the real table.
3059 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3061 struct adapter *adapter = ifp->if_softc;
3064 if (ifp->if_softc != arg) /* Not our event */
3067 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3070 IXGBE_CORE_LOCK(adapter);
3071 index = (vtag >> 5) & 0x7F;
3073 adapter->shadow_vfta[index] |= (1 << bit);
3074 ++adapter->num_vlans;
3075 ixgbe_setup_vlan_hw_support(adapter);
3076 IXGBE_CORE_UNLOCK(adapter);
3080 ** This routine is run via an vlan
3081 ** unconfig EVENT, remove our entry
3082 ** in the soft vfta.
3085 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3087 struct adapter *adapter = ifp->if_softc;
3090 if (ifp->if_softc != arg)
3093 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3096 IXGBE_CORE_LOCK(adapter);
3097 index = (vtag >> 5) & 0x7F;
3099 adapter->shadow_vfta[index] &= ~(1 << bit);
3100 --adapter->num_vlans;
3101 /* Re-init to load the changes */
3102 ixgbe_setup_vlan_hw_support(adapter);
3103 IXGBE_CORE_UNLOCK(adapter);
3107 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3109 struct ifnet *ifp = adapter->ifp;
3110 struct ixgbe_hw *hw = &adapter->hw;
3111 struct rx_ring *rxr;
3116 ** We get here thru init_locked, meaning
3117 ** a soft reset, this has already cleared
3118 ** the VFTA and other state, so if there
3119 ** have been no vlan's registered do nothing.
3121 if (adapter->num_vlans == 0)
3124 /* Setup the queues for vlans */
3125 for (int i = 0; i < adapter->num_queues; i++) {
3126 rxr = &adapter->rx_rings[i];
3127 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3128 if (hw->mac.type != ixgbe_mac_82598EB) {
3129 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3130 ctrl |= IXGBE_RXDCTL_VME;
3131 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3133 rxr->vtag_strip = TRUE;
3136 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3139 ** A soft reset zero's out the VFTA, so
3140 ** we need to repopulate it now.
3142 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3143 if (adapter->shadow_vfta[i] != 0)
3144 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3145 adapter->shadow_vfta[i]);
3147 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3148 /* Enable the Filter Table if enabled */
3149 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3150 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3151 ctrl |= IXGBE_VLNCTRL_VFE;
3153 if (hw->mac.type == ixgbe_mac_82598EB)
3154 ctrl |= IXGBE_VLNCTRL_VME;
3155 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3159 ixgbe_enable_intr(struct adapter *adapter)
3161 struct ixgbe_hw *hw = &adapter->hw;
3162 struct ix_queue *que = adapter->queues;
3165 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3166 /* Enable Fan Failure detection */
3167 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3168 mask |= IXGBE_EIMS_GPI_SDP1;
3170 switch (adapter->hw.mac.type) {
3171 case ixgbe_mac_82599EB:
3172 mask |= IXGBE_EIMS_ECC;
3173 /* Temperature sensor on some adapters */
3174 mask |= IXGBE_EIMS_GPI_SDP0;
3175 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3176 mask |= IXGBE_EIMS_GPI_SDP1;
3177 mask |= IXGBE_EIMS_GPI_SDP2;
3179 mask |= IXGBE_EIMS_FLOW_DIR;
3182 case ixgbe_mac_X540:
3183 /* Detect if Thermal Sensor is enabled */
3184 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3185 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3186 mask |= IXGBE_EIMS_TS;
3187 mask |= IXGBE_EIMS_ECC;
3189 mask |= IXGBE_EIMS_FLOW_DIR;
3192 case ixgbe_mac_X550:
3193 case ixgbe_mac_X550EM_x:
3194 /* MAC thermal sensor is automatically enabled */
3195 mask |= IXGBE_EIMS_TS;
3196 /* Some devices use SDP0 for important information */
3197 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3198 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3199 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3200 mask |= IXGBE_EIMS_ECC;
3202 mask |= IXGBE_EIMS_FLOW_DIR;
3209 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3211 /* With MSI-X we use auto clear */
3212 if (adapter->msix_mem) {
3213 mask = IXGBE_EIMS_ENABLE_MASK;
3214 /* Don't autoclear Link */
3215 mask &= ~IXGBE_EIMS_OTHER;
3216 mask &= ~IXGBE_EIMS_LSC;
3217 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3221 ** Now enable all queues, this is done separately to
3222 ** allow for handling the extended (beyond 32) MSIX
3223 ** vectors that can be used by 82599
3225 for (int i = 0; i < adapter->num_queues; i++, que++)
3226 ixgbe_enable_queue(adapter, que->msix);
3228 IXGBE_WRITE_FLUSH(hw);
3234 ixgbe_disable_intr(struct adapter *adapter)
3236 if (adapter->msix_mem)
3237 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3238 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3239 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3241 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3242 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3243 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3245 IXGBE_WRITE_FLUSH(&adapter->hw);
3250 ** Get the width and transaction speed of
3251 ** the slot this adapter is plugged into.
3254 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3256 device_t dev = ((struct ixgbe_osdep *)hw->back)->dev;
3257 struct ixgbe_mac_info *mac = &hw->mac;
3261 /* For most devices simply call the shared code routine */
3262 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3263 ixgbe_get_bus_info(hw);
3264 /* These devices don't use PCI-E */
3265 switch (hw->mac.type) {
3266 case ixgbe_mac_X550EM_x:
3274 ** For the Quad port adapter we need to parse back
3275 ** up the PCI tree to find the speed of the expansion
3276 ** slot into which this adapter is plugged. A bit more work.
3278 dev = device_get_parent(device_get_parent(dev));
3280 device_printf(dev, "parent pcib = %x,%x,%x\n",
3281 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3283 dev = device_get_parent(device_get_parent(dev));
3285 device_printf(dev, "slot pcib = %x,%x,%x\n",
3286 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3288 /* Now get the PCI Express Capabilities offset */
3289 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3290 /* ...and read the Link Status Register */
3291 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3292 switch (link & IXGBE_PCI_LINK_WIDTH) {
3293 case IXGBE_PCI_LINK_WIDTH_1:
3294 hw->bus.width = ixgbe_bus_width_pcie_x1;
3296 case IXGBE_PCI_LINK_WIDTH_2:
3297 hw->bus.width = ixgbe_bus_width_pcie_x2;
3299 case IXGBE_PCI_LINK_WIDTH_4:
3300 hw->bus.width = ixgbe_bus_width_pcie_x4;
3302 case IXGBE_PCI_LINK_WIDTH_8:
3303 hw->bus.width = ixgbe_bus_width_pcie_x8;
3306 hw->bus.width = ixgbe_bus_width_unknown;
3310 switch (link & IXGBE_PCI_LINK_SPEED) {
3311 case IXGBE_PCI_LINK_SPEED_2500:
3312 hw->bus.speed = ixgbe_bus_speed_2500;
3314 case IXGBE_PCI_LINK_SPEED_5000:
3315 hw->bus.speed = ixgbe_bus_speed_5000;
3317 case IXGBE_PCI_LINK_SPEED_8000:
3318 hw->bus.speed = ixgbe_bus_speed_8000;
3321 hw->bus.speed = ixgbe_bus_speed_unknown;
3325 mac->ops.set_lan_id(hw);
3328 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3329 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3330 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3331 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3332 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3333 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3334 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3337 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3338 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3339 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3340 device_printf(dev, "PCI-Express bandwidth available"
3341 " for this card\n is not sufficient for"
3342 " optimal performance.\n");
3343 device_printf(dev, "For optimal performance a x8 "
3344 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3346 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3347 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3348 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3349 device_printf(dev, "PCI-Express bandwidth available"
3350 " for this card\n is not sufficient for"
3351 " optimal performance.\n");
3352 device_printf(dev, "For optimal performance a x8 "
3353 "PCIE Gen3 slot is required.\n");
3361 ** Setup the correct IVAR register for a particular MSIX interrupt
3362 ** (yes this is all very magic and confusing :)
3363 ** - entry is the register array entry
3364 ** - vector is the MSIX vector for this queue
3365 ** - type is RX/TX/MISC
3368 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3370 struct ixgbe_hw *hw = &adapter->hw;
3373 vector |= IXGBE_IVAR_ALLOC_VAL;
3375 switch (hw->mac.type) {
3377 case ixgbe_mac_82598EB:
3379 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3381 entry += (type * 64);
3382 index = (entry >> 2) & 0x1F;
3383 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3384 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3385 ivar |= (vector << (8 * (entry & 0x3)));
3386 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3389 case ixgbe_mac_82599EB:
3390 case ixgbe_mac_X540:
3391 case ixgbe_mac_X550:
3392 case ixgbe_mac_X550EM_x:
3393 if (type == -1) { /* MISC IVAR */
3394 index = (entry & 1) * 8;
3395 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3396 ivar &= ~(0xFF << index);
3397 ivar |= (vector << index);
3398 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3399 } else { /* RX/TX IVARS */
3400 index = (16 * (entry & 1)) + (8 * type);
3401 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3402 ivar &= ~(0xFF << index);
3403 ivar |= (vector << index);
3404 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3413 ixgbe_configure_ivars(struct adapter *adapter)
3415 struct ix_queue *que = adapter->queues;
3418 if (ixgbe_max_interrupt_rate > 0)
3419 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3422 ** Disable DMA coalescing if interrupt moderation is
3429 for (int i = 0; i < adapter->num_queues; i++, que++) {
3430 /* First the RX queue entry */
3431 ixgbe_set_ivar(adapter, i, que->msix, 0);
3432 /* ... and the TX */
3433 ixgbe_set_ivar(adapter, i, que->msix, 1);
3434 /* Set an Initial EITR value */
3435 IXGBE_WRITE_REG(&adapter->hw,
3436 IXGBE_EITR(que->msix), newitr);
3439 /* For the Link interrupt */
3440 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3444 ** ixgbe_sfp_probe - called in the local timer to
3445 ** determine if a port had optics inserted.
3447 static bool ixgbe_sfp_probe(struct adapter *adapter)
3449 struct ixgbe_hw *hw = &adapter->hw;
3450 device_t dev = adapter->dev;
3451 bool result = FALSE;
3453 if ((hw->phy.type == ixgbe_phy_nl) &&
3454 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3455 s32 ret = hw->phy.ops.identify_sfp(hw);
3458 ret = hw->phy.ops.reset(hw);
3459 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3460 device_printf(dev,"Unsupported SFP+ module detected!");
3461 printf(" Reload driver with supported module.\n");
3462 adapter->sfp_probe = FALSE;
3465 device_printf(dev,"SFP+ module detected!\n");
3466 /* We now have supported optics */
3467 adapter->sfp_probe = FALSE;
3468 /* Set the optics type so system reports correctly */
3469 ixgbe_setup_optics(adapter);
3477 ** Tasklet handler for MSIX Link interrupts
3478 ** - do outside interrupt since it might sleep
3481 ixgbe_handle_link(void *context, int pending)
3483 struct adapter *adapter = context;
3485 ixgbe_check_link(&adapter->hw,
3486 &adapter->link_speed, &adapter->link_up, 0);
3487 ixgbe_update_link_status(adapter);
3491 ** Tasklet for handling SFP module interrupts
3494 ixgbe_handle_mod(void *context, int pending)
3496 struct adapter *adapter = context;
3497 struct ixgbe_hw *hw = &adapter->hw;
3498 device_t dev = adapter->dev;
3501 err = hw->phy.ops.identify_sfp(hw);
3502 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3504 "Unsupported SFP+ module type was detected.\n");
3507 err = hw->mac.ops.setup_sfp(hw);
3508 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3510 "Setup failure - unsupported SFP+ module type.\n");
3513 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3519 ** Tasklet for handling MSF (multispeed fiber) interrupts
3522 ixgbe_handle_msf(void *context, int pending)
3524 struct adapter *adapter = context;
3525 struct ixgbe_hw *hw = &adapter->hw;
3530 err = hw->phy.ops.identify_sfp(hw);
3532 ixgbe_setup_optics(adapter);
3533 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3536 autoneg = hw->phy.autoneg_advertised;
3537 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3538 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3539 if (hw->mac.ops.setup_link)
3540 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3542 ifmedia_removeall(&adapter->media);
3543 ixgbe_add_media_types(adapter);
3548 ** Tasklet for handling interrupts from an external PHY
3551 ixgbe_handle_phy(void *context, int pending)
3553 struct adapter *adapter = context;
3554 struct ixgbe_hw *hw = &adapter->hw;
3557 error = hw->phy.ops.handle_lasi(hw);
3558 if (error == IXGBE_ERR_OVERTEMP)
3559 device_printf(adapter->dev,
3560 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3561 " PHY will downshift to lower power state!\n");
3563 device_printf(adapter->dev,
3564 "Error handling LASI interrupt: %d\n",
3571 ** Tasklet for reinitializing the Flow Director filter table
3574 ixgbe_reinit_fdir(void *context, int pending)
3576 struct adapter *adapter = context;
3577 struct ifnet *ifp = adapter->ifp;
3579 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3581 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3582 adapter->fdir_reinit = 0;
3583 /* re-enable flow director interrupts */
3584 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3585 /* Restart the interface */
3586 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3591 /*********************************************************************
3593 * Configure DMA Coalescing
3595 **********************************************************************/
3597 ixgbe_config_dmac(struct adapter *adapter)
3599 struct ixgbe_hw *hw = &adapter->hw;
3600 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3602 if (hw->mac.type < ixgbe_mac_X550 ||
3603 !hw->mac.ops.dmac_config)
3606 if (dcfg->watchdog_timer ^ adapter->dmac ||
3607 dcfg->link_speed ^ adapter->link_speed) {
3608 dcfg->watchdog_timer = adapter->dmac;
3609 dcfg->fcoe_en = false;
3610 dcfg->link_speed = adapter->link_speed;
3613 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3614 dcfg->watchdog_timer, dcfg->link_speed);
3616 hw->mac.ops.dmac_config(hw);
3621 * Checks whether the adapter supports Energy Efficient Ethernet
3622 * or not, based on device ID.
3625 ixgbe_check_eee_support(struct adapter *adapter)
3627 struct ixgbe_hw *hw = &adapter->hw;
3629 adapter->eee_support = adapter->eee_enabled =
3630 (hw->device_id == IXGBE_DEV_ID_X550T ||
3631 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
3635 * Checks whether the adapter's ports are capable of
3636 * Wake On LAN by reading the adapter's NVM.
3638 * Sets each port's hw->wol_enabled value depending
3639 * on the value read here.
3642 ixgbe_check_wol_support(struct adapter *adapter)
3644 struct ixgbe_hw *hw = &adapter->hw;
3647 /* Find out WoL support for port */
3648 adapter->wol_support = hw->wol_enabled = 0;
3649 ixgbe_get_device_caps(hw, &dev_caps);
3650 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3651 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3653 adapter->wol_support = hw->wol_enabled = 1;
3655 /* Save initial wake up filter configuration */
3656 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3662 * Prepare the adapter/port for LPLU and/or WoL
3665 ixgbe_setup_low_power_mode(struct adapter *adapter)
3667 struct ixgbe_hw *hw = &adapter->hw;
3668 device_t dev = adapter->dev;
3671 mtx_assert(&adapter->core_mtx, MA_OWNED);
3673 /* Limit power management flow to X550EM baseT */
3674 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3675 && hw->phy.ops.enter_lplu) {
3676 /* Turn off support for APM wakeup. (Using ACPI instead) */
3677 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3678 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3681 * Clear Wake Up Status register to prevent any previous wakeup
3682 * events from waking us up immediately after we suspend.
3684 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3687 * Program the Wakeup Filter Control register with user filter
3690 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3692 /* Enable wakeups and power management in Wakeup Control */
3693 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3694 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3696 /* X550EM baseT adapters need a special LPLU flow */
3697 hw->phy.reset_disable = true;
3698 ixgbe_stop(adapter);
3699 error = hw->phy.ops.enter_lplu(hw);
3702 "Error entering LPLU: %d\n", error);
3703 hw->phy.reset_disable = false;
3705 /* Just stop for other adapters */
3706 ixgbe_stop(adapter);
3712 /**********************************************************************
3714 * Update the board statistics counters.
3716 **********************************************************************/
3718 ixgbe_update_stats_counters(struct adapter *adapter)
3720 struct ixgbe_hw *hw = &adapter->hw;
3721 u32 missed_rx = 0, bprc, lxon, lxoff, total;
3722 u64 total_missed_rx = 0;
3724 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3725 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3726 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3727 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3729 for (int i = 0; i < 16; i++) {
3730 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3731 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3732 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3734 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3735 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3736 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3738 /* Hardware workaround, gprc counts missed packets */
3739 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3740 adapter->stats.pf.gprc -= missed_rx;
3742 if (hw->mac.type != ixgbe_mac_82598EB) {
3743 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3744 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3745 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3746 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3747 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3748 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3749 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3750 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3752 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3753 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3754 /* 82598 only has a counter in the high register */
3755 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3756 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3757 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3761 * Workaround: mprc hardware is incorrectly counting
3762 * broadcasts, so for now we subtract those.
3764 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3765 adapter->stats.pf.bprc += bprc;
3766 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3767 if (hw->mac.type == ixgbe_mac_82598EB)
3768 adapter->stats.pf.mprc -= bprc;
3770 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3771 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3772 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3773 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3774 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3775 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3777 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3778 adapter->stats.pf.lxontxc += lxon;
3779 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3780 adapter->stats.pf.lxofftxc += lxoff;
3781 total = lxon + lxoff;
3783 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3784 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3785 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3786 adapter->stats.pf.gptc -= total;
3787 adapter->stats.pf.mptc -= total;
3788 adapter->stats.pf.ptc64 -= total;
3789 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3791 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3792 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3793 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3794 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3795 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3796 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3797 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3798 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3799 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3800 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3801 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3802 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3803 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3804 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3805 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3806 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3807 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3808 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3809 /* Only read FCOE on 82599 */
3810 if (hw->mac.type != ixgbe_mac_82598EB) {
3811 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3812 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3813 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3814 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3815 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3818 /* Fill out the OS statistics structure */
3819 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3820 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3821 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3822 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3823 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3824 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3825 IXGBE_SET_COLLISIONS(adapter, 0);
3826 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3827 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3828 + adapter->stats.pf.rlec);
3831 #if __FreeBSD_version >= 1100036
3833 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3835 struct adapter *adapter;
3836 struct tx_ring *txr;
3839 adapter = if_getsoftc(ifp);
3842 case IFCOUNTER_IPACKETS:
3843 return (adapter->ipackets);
3844 case IFCOUNTER_OPACKETS:
3845 return (adapter->opackets);
3846 case IFCOUNTER_IBYTES:
3847 return (adapter->ibytes);
3848 case IFCOUNTER_OBYTES:
3849 return (adapter->obytes);
3850 case IFCOUNTER_IMCASTS:
3851 return (adapter->imcasts);
3852 case IFCOUNTER_OMCASTS:
3853 return (adapter->omcasts);
3854 case IFCOUNTER_COLLISIONS:
3856 case IFCOUNTER_IQDROPS:
3857 return (adapter->iqdrops);
3858 case IFCOUNTER_OQDROPS:
3860 txr = adapter->tx_rings;
3861 for (int i = 0; i < adapter->num_queues; i++, txr++)
3862 rv += txr->br->br_drops;
3864 case IFCOUNTER_IERRORS:
3865 return (adapter->ierrors);
3867 return (if_get_counter_default(ifp, cnt));
3872 /** ixgbe_sysctl_tdh_handler - Handler function
3873 * Retrieves the TDH value from the hardware
3876 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3880 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3883 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3884 error = sysctl_handle_int(oidp, &val, 0, req);
3885 if (error || !req->newptr)
3890 /** ixgbe_sysctl_tdt_handler - Handler function
3891 * Retrieves the TDT value from the hardware
3894 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3898 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3901 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3902 error = sysctl_handle_int(oidp, &val, 0, req);
3903 if (error || !req->newptr)
3908 /** ixgbe_sysctl_rdh_handler - Handler function
3909 * Retrieves the RDH value from the hardware
3912 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3916 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3919 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3920 error = sysctl_handle_int(oidp, &val, 0, req);
3921 if (error || !req->newptr)
3926 /** ixgbe_sysctl_rdt_handler - Handler function
3927 * Retrieves the RDT value from the hardware
3930 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3934 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3937 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3938 error = sysctl_handle_int(oidp, &val, 0, req);
3939 if (error || !req->newptr)
3945 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3948 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3949 unsigned int reg, usec, rate;
3951 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3952 usec = ((reg & 0x0FF8) >> 3);
3954 rate = 500000 / usec;
3957 error = sysctl_handle_int(oidp, &rate, 0, req);
3958 if (error || !req->newptr)
3960 reg &= ~0xfff; /* default, no limitation */
3961 ixgbe_max_interrupt_rate = 0;
3962 if (rate > 0 && rate < 500000) {
3965 ixgbe_max_interrupt_rate = rate;
3966 reg |= ((4000000/rate) & 0xff8 );
3968 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3973 ixgbe_add_device_sysctls(struct adapter *adapter)
3975 device_t dev = adapter->dev;
3976 struct ixgbe_hw *hw = &adapter->hw;
3977 struct sysctl_oid_list *child;
3978 struct sysctl_ctx_list *ctx;
3980 ctx = device_get_sysctl_ctx(dev);
3981 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3983 /* Sysctls for all devices */
3984 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
3985 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3986 ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3988 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
3990 &ixgbe_enable_aim, 1, "Interrupt Moderation");
3992 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
3993 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3994 ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3996 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
3997 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3998 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
4000 /* for X550 devices */
4001 if (hw->mac.type >= ixgbe_mac_X550)
4002 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4003 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4004 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
4006 /* for X550T and X550EM backplane devices */
4007 if (hw->device_id == IXGBE_DEV_ID_X550T ||
4008 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
4009 struct sysctl_oid *eee_node;
4010 struct sysctl_oid_list *eee_list;
4012 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4014 "Energy Efficient Ethernet sysctls");
4015 eee_list = SYSCTL_CHILDREN(eee_node);
4017 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4018 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4019 ixgbe_sysctl_eee_enable, "I",
4020 "Enable or Disable EEE");
4022 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4023 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4024 ixgbe_sysctl_eee_negotiated, "I",
4025 "EEE negotiated on link");
4027 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4028 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4029 ixgbe_sysctl_eee_tx_lpi_status, "I",
4030 "Whether or not TX link is in LPI state");
4032 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4033 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4034 ixgbe_sysctl_eee_rx_lpi_status, "I",
4035 "Whether or not RX link is in LPI state");
4038 /* for certain 10GBaseT devices */
4039 if (hw->device_id == IXGBE_DEV_ID_X550T ||
4040 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4041 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4042 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4043 ixgbe_sysctl_wol_enable, "I",
4044 "Enable/Disable Wake on LAN");
4046 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4047 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4048 ixgbe_sysctl_wufc, "I",
4049 "Enable/Disable Wake Up Filters");
4052 /* for X550EM 10GBaseT devices */
4053 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4054 struct sysctl_oid *phy_node;
4055 struct sysctl_oid_list *phy_list;
4057 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4059 "External PHY sysctls");
4060 phy_list = SYSCTL_CHILDREN(phy_node);
4062 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4063 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4064 ixgbe_sysctl_phy_temp, "I",
4065 "Current External PHY Temperature (Celsius)");
4067 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4068 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4069 ixgbe_sysctl_phy_overtemp_occurred, "I",
4070 "External PHY High Temperature Event Occurred");
4075 * Add sysctl variables, one per statistic, to the system.
4078 ixgbe_add_hw_stats(struct adapter *adapter)
4080 device_t dev = adapter->dev;
4082 struct tx_ring *txr = adapter->tx_rings;
4083 struct rx_ring *rxr = adapter->rx_rings;
4085 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4086 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4087 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4088 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4090 struct sysctl_oid *stat_node, *queue_node;
4091 struct sysctl_oid_list *stat_list, *queue_list;
4093 #define QUEUE_NAME_LEN 32
4094 char namebuf[QUEUE_NAME_LEN];
4096 /* Driver Statistics */
4097 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4098 CTLFLAG_RD, &adapter->dropped_pkts,
4099 "Driver dropped packets");
4100 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4101 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4102 "m_defrag() failed");
4103 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4104 CTLFLAG_RD, &adapter->watchdog_events,
4105 "Watchdog timeouts");
4106 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4107 CTLFLAG_RD, &adapter->link_irq,
4108 "Link MSIX IRQ Handled");
4110 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4111 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4112 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4113 CTLFLAG_RD, NULL, "Queue Name");
4114 queue_list = SYSCTL_CHILDREN(queue_node);
4116 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4117 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4118 sizeof(&adapter->queues[i]),
4119 ixgbe_sysctl_interrupt_rate_handler, "IU",
4121 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4122 CTLFLAG_RD, &(adapter->queues[i].irqs),
4123 "irqs on this queue");
4124 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4125 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4126 ixgbe_sysctl_tdh_handler, "IU",
4127 "Transmit Descriptor Head");
4128 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4129 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4130 ixgbe_sysctl_tdt_handler, "IU",
4131 "Transmit Descriptor Tail");
4132 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4133 CTLFLAG_RD, &txr->tso_tx,
4135 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4136 CTLFLAG_RD, &txr->no_tx_dma_setup,
4137 "Driver tx dma failure in xmit");
4138 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4139 CTLFLAG_RD, &txr->no_desc_avail,
4140 "Queue No Descriptor Available");
4141 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4142 CTLFLAG_RD, &txr->total_packets,
4143 "Queue Packets Transmitted");
4144 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4145 CTLFLAG_RD, &txr->br->br_drops,
4146 "Packets dropped in buf_ring");
4149 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4150 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4151 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4152 CTLFLAG_RD, NULL, "Queue Name");
4153 queue_list = SYSCTL_CHILDREN(queue_node);
4155 struct lro_ctrl *lro = &rxr->lro;
4157 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4158 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4159 CTLFLAG_RD, NULL, "Queue Name");
4160 queue_list = SYSCTL_CHILDREN(queue_node);
4162 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4163 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4164 ixgbe_sysctl_rdh_handler, "IU",
4165 "Receive Descriptor Head");
4166 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4167 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4168 ixgbe_sysctl_rdt_handler, "IU",
4169 "Receive Descriptor Tail");
4170 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4171 CTLFLAG_RD, &rxr->rx_packets,
4172 "Queue Packets Received");
4173 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4174 CTLFLAG_RD, &rxr->rx_bytes,
4175 "Queue Bytes Received");
4176 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4177 CTLFLAG_RD, &rxr->rx_copies,
4178 "Copied RX Frames");
4179 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4180 CTLFLAG_RD, &lro->lro_queued, 0,
4182 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4183 CTLFLAG_RD, &lro->lro_flushed, 0,
4187 /* MAC stats get the own sub node */
4189 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4190 CTLFLAG_RD, NULL, "MAC Statistics");
4191 stat_list = SYSCTL_CHILDREN(stat_node);
4193 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4194 CTLFLAG_RD, &stats->crcerrs,
4196 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4197 CTLFLAG_RD, &stats->illerrc,
4198 "Illegal Byte Errors");
4199 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4200 CTLFLAG_RD, &stats->errbc,
4202 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4203 CTLFLAG_RD, &stats->mspdc,
4204 "MAC Short Packets Discarded");
4205 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4206 CTLFLAG_RD, &stats->mlfc,
4207 "MAC Local Faults");
4208 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4209 CTLFLAG_RD, &stats->mrfc,
4210 "MAC Remote Faults");
4211 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4212 CTLFLAG_RD, &stats->rlec,
4213 "Receive Length Errors");
4215 /* Flow Control stats */
4216 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4217 CTLFLAG_RD, &stats->lxontxc,
4218 "Link XON Transmitted");
4219 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4220 CTLFLAG_RD, &stats->lxonrxc,
4221 "Link XON Received");
4222 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4223 CTLFLAG_RD, &stats->lxofftxc,
4224 "Link XOFF Transmitted");
4225 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4226 CTLFLAG_RD, &stats->lxoffrxc,
4227 "Link XOFF Received");
4229 /* Packet Reception Stats */
4230 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4231 CTLFLAG_RD, &stats->tor,
4232 "Total Octets Received");
4233 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4234 CTLFLAG_RD, &stats->gorc,
4235 "Good Octets Received");
4236 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4237 CTLFLAG_RD, &stats->tpr,
4238 "Total Packets Received");
4239 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4240 CTLFLAG_RD, &stats->gprc,
4241 "Good Packets Received");
4242 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4243 CTLFLAG_RD, &stats->mprc,
4244 "Multicast Packets Received");
4245 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4246 CTLFLAG_RD, &stats->bprc,
4247 "Broadcast Packets Received");
4248 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4249 CTLFLAG_RD, &stats->prc64,
4250 "64 byte frames received ");
4251 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4252 CTLFLAG_RD, &stats->prc127,
4253 "65-127 byte frames received");
4254 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4255 CTLFLAG_RD, &stats->prc255,
4256 "128-255 byte frames received");
4257 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4258 CTLFLAG_RD, &stats->prc511,
4259 "256-511 byte frames received");
4260 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4261 CTLFLAG_RD, &stats->prc1023,
4262 "512-1023 byte frames received");
4263 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4264 CTLFLAG_RD, &stats->prc1522,
4265 "1023-1522 byte frames received");
4266 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4267 CTLFLAG_RD, &stats->ruc,
4268 "Receive Undersized");
4269 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4270 CTLFLAG_RD, &stats->rfc,
4271 "Fragmented Packets Received ");
4272 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4273 CTLFLAG_RD, &stats->roc,
4274 "Oversized Packets Received");
4275 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4276 CTLFLAG_RD, &stats->rjc,
4278 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4279 CTLFLAG_RD, &stats->mngprc,
4280 "Management Packets Received");
4281 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4282 CTLFLAG_RD, &stats->mngptc,
4283 "Management Packets Dropped");
4284 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4285 CTLFLAG_RD, &stats->xec,
4288 /* Packet Transmission Stats */
4289 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4290 CTLFLAG_RD, &stats->gotc,
4291 "Good Octets Transmitted");
4292 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4293 CTLFLAG_RD, &stats->tpt,
4294 "Total Packets Transmitted");
4295 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4296 CTLFLAG_RD, &stats->gptc,
4297 "Good Packets Transmitted");
4298 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4299 CTLFLAG_RD, &stats->bptc,
4300 "Broadcast Packets Transmitted");
4301 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4302 CTLFLAG_RD, &stats->mptc,
4303 "Multicast Packets Transmitted");
4304 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4305 CTLFLAG_RD, &stats->mngptc,
4306 "Management Packets Transmitted");
4307 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4308 CTLFLAG_RD, &stats->ptc64,
4309 "64 byte frames transmitted ");
4310 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4311 CTLFLAG_RD, &stats->ptc127,
4312 "65-127 byte frames transmitted");
4313 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4314 CTLFLAG_RD, &stats->ptc255,
4315 "128-255 byte frames transmitted");
4316 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4317 CTLFLAG_RD, &stats->ptc511,
4318 "256-511 byte frames transmitted");
4319 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4320 CTLFLAG_RD, &stats->ptc1023,
4321 "512-1023 byte frames transmitted");
4322 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4323 CTLFLAG_RD, &stats->ptc1522,
4324 "1024-1522 byte frames transmitted");
4328 ** Set flow control using sysctl:
4329 ** Flow control values:
4336 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4339 struct adapter *adapter = (struct adapter *) arg1;
4342 error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4343 if ((error) || (req->newptr == NULL))
4346 /* Don't bother if it's not changed */
4347 if (adapter->fc == last)
4350 switch (adapter->fc) {
4351 case ixgbe_fc_rx_pause:
4352 case ixgbe_fc_tx_pause:
4354 adapter->hw.fc.requested_mode = adapter->fc;
4355 if (adapter->num_queues > 1)
4356 ixgbe_disable_rx_drop(adapter);
4359 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4360 if (adapter->num_queues > 1)
4361 ixgbe_enable_rx_drop(adapter);
4367 /* Don't autoneg if forcing a value */
4368 adapter->hw.fc.disable_fc_autoneg = TRUE;
4369 ixgbe_fc_enable(&adapter->hw);
4374 ** Control advertised link speed:
4376 ** 0x1 - advertise 100 Mb
4377 ** 0x2 - advertise 1G
4378 ** 0x4 - advertise 10G
4381 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4383 int error = 0, requested;
4384 struct adapter *adapter;
4386 struct ixgbe_hw *hw;
4387 ixgbe_link_speed speed = 0;
4389 adapter = (struct adapter *) arg1;
4393 requested = adapter->advertise;
4394 error = sysctl_handle_int(oidp, &requested, 0, req);
4395 if ((error) || (req->newptr == NULL))
4398 /* Checks to validate new value */
4399 if (adapter->advertise == requested) /* no change */
4402 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4403 (hw->phy.multispeed_fiber))) {
4405 "Advertised speed can only be set on copper or "
4406 "multispeed fiber media types.\n");
4410 if (requested < 0x1 || requested > 0x7) {
4412 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4416 if ((requested & 0x1)
4417 && (hw->mac.type != ixgbe_mac_X540)
4418 && (hw->mac.type != ixgbe_mac_X550)) {
4419 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4423 /* Set new value and report new advertised mode */
4424 if (requested & 0x1)
4425 speed |= IXGBE_LINK_SPEED_100_FULL;
4426 if (requested & 0x2)
4427 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4428 if (requested & 0x4)
4429 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4431 hw->mac.autotry_restart = TRUE;
4432 hw->mac.ops.setup_link(hw, speed, TRUE);
4433 adapter->advertise = requested;
4439 * The following two sysctls are for X550 BaseT devices;
4440 * they deal with the external PHY used in them.
4443 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4445 struct adapter *adapter = (struct adapter *) arg1;
4446 struct ixgbe_hw *hw = &adapter->hw;
4449 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4450 device_printf(adapter->dev,
4451 "Device has no supported external thermal sensor.\n");
4455 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4456 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4458 device_printf(adapter->dev,
4459 "Error reading from PHY's current temperature register\n");
4463 /* Shift temp for output */
4466 return (sysctl_handle_int(oidp, NULL, reg, req));
4470 * Reports whether the current PHY temperature is over
4471 * the overtemp threshold.
4472 * - This is reported directly from the PHY
4475 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4477 struct adapter *adapter = (struct adapter *) arg1;
4478 struct ixgbe_hw *hw = &adapter->hw;
4481 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4482 device_printf(adapter->dev,
4483 "Device has no supported external thermal sensor.\n");
4487 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4488 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4490 device_printf(adapter->dev,
4491 "Error reading from PHY's temperature status register\n");
4495 /* Get occurrence bit */
4496 reg = !!(reg & 0x4000);
4497 return (sysctl_handle_int(oidp, 0, reg, req));
4501 ** Thermal Shutdown Trigger (internal MAC)
4502 ** - Set this to 1 to cause an overtemp event to occur
4505 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4507 struct adapter *adapter = (struct adapter *) arg1;
4508 struct ixgbe_hw *hw = &adapter->hw;
4509 int error, fire = 0;
4511 error = sysctl_handle_int(oidp, &fire, 0, req);
4512 if ((error) || (req->newptr == NULL))
4516 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4517 reg |= IXGBE_EICR_TS;
4518 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4525 ** Manage DMA Coalescing.
4527 ** 0/1 - off / on (use default value of 1000)
4529 ** Legal timer values are:
4530 ** 50,100,250,500,1000,2000,5000,10000
4532 ** Turning off interrupt moderation will also turn this off.
4535 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4537 struct adapter *adapter = (struct adapter *) arg1;
4538 struct ixgbe_hw *hw = &adapter->hw;
4539 struct ifnet *ifp = adapter->ifp;
4543 oldval = adapter->dmac;
4544 error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4545 if ((error) || (req->newptr == NULL))
4548 switch (hw->mac.type) {
4549 case ixgbe_mac_X550:
4550 case ixgbe_mac_X550EM_x:
4553 device_printf(adapter->dev,
4554 "DMA Coalescing is only supported on X550 devices\n");
4558 switch (adapter->dmac) {
4562 case 1: /* Enable and use default */
4563 adapter->dmac = 1000;
4573 /* Legal values - allow */
4576 /* Do nothing, illegal value */
4577 adapter->dmac = oldval;
4581 /* Re-initialize hardware if it's already running */
4582 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4583 ixgbe_init(adapter);
4589 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4595 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4597 struct adapter *adapter = (struct adapter *) arg1;
4598 struct ixgbe_hw *hw = &adapter->hw;
4599 int new_wol_enabled;
4602 new_wol_enabled = hw->wol_enabled;
4603 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4604 if ((error) || (req->newptr == NULL))
4606 if (new_wol_enabled == hw->wol_enabled)
4609 if (new_wol_enabled > 0 && !adapter->wol_support)
4612 hw->wol_enabled = !!(new_wol_enabled);
4618 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4619 * if supported by the adapter.
4625 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4627 struct adapter *adapter = (struct adapter *) arg1;
4628 struct ifnet *ifp = adapter->ifp;
4629 int new_eee_enabled, error = 0;
4631 new_eee_enabled = adapter->eee_enabled;
4632 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4633 if ((error) || (req->newptr == NULL))
4635 if (new_eee_enabled == adapter->eee_enabled)
4638 if (new_eee_enabled > 0 && !adapter->eee_support)
4641 adapter->eee_enabled = !!(new_eee_enabled);
4643 /* Re-initialize hardware if it's already running */
4644 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4645 ixgbe_init(adapter);
4651 * Read-only sysctl indicating whether EEE support was negotiated
4655 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4657 struct adapter *adapter = (struct adapter *) arg1;
4658 struct ixgbe_hw *hw = &adapter->hw;
4661 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4663 return (sysctl_handle_int(oidp, 0, status, req));
4667 * Read-only sysctl indicating whether RX Link is in LPI state.
4670 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4672 struct adapter *adapter = (struct adapter *) arg1;
4673 struct ixgbe_hw *hw = &adapter->hw;
4676 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4677 IXGBE_EEE_RX_LPI_STATUS);
4679 return (sysctl_handle_int(oidp, 0, status, req));
4683 * Read-only sysctl indicating whether TX Link is in LPI state.
4686 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4688 struct adapter *adapter = (struct adapter *) arg1;
4689 struct ixgbe_hw *hw = &adapter->hw;
4692 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4693 IXGBE_EEE_TX_LPI_STATUS);
4695 return (sysctl_handle_int(oidp, 0, status, req));
4699 * Sysctl to enable/disable the types of packets that the
4700 * adapter will wake up on upon receipt.
4701 * WUFC - Wake Up Filter Control
4703 * 0x1 - Link Status Change
4704 * 0x2 - Magic Packet
4705 * 0x4 - Direct Exact
4706 * 0x8 - Directed Multicast
4708 * 0x20 - ARP/IPv4 Request Packet
4709 * 0x40 - Direct IPv4 Packet
4710 * 0x80 - Direct IPv6 Packet
4712 * Setting another flag will cause the sysctl to return an
4716 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4718 struct adapter *adapter = (struct adapter *) arg1;
4722 new_wufc = adapter->wufc;
4724 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4725 if ((error) || (req->newptr == NULL))
4727 if (new_wufc == adapter->wufc)
4730 if (new_wufc & 0xffffff00)
4734 new_wufc |= (0xffffff & adapter->wufc);
4735 adapter->wufc = new_wufc;
4742 ** Enable the hardware to drop packets when the buffer is
4743 ** full. This is useful when multiqueue,so that no single
4744 ** queue being full stalls the entire RX engine. We only
4745 ** enable this when Multiqueue AND when Flow Control is
4749 ixgbe_enable_rx_drop(struct adapter *adapter)
4751 struct ixgbe_hw *hw = &adapter->hw;
4753 for (int i = 0; i < adapter->num_queues; i++) {
4754 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4755 srrctl |= IXGBE_SRRCTL_DROP_EN;
4756 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4761 ixgbe_disable_rx_drop(struct adapter *adapter)
4763 struct ixgbe_hw *hw = &adapter->hw;
4765 for (int i = 0; i < adapter->num_queues; i++) {
4766 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4767 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4768 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4773 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4777 switch (adapter->hw.mac.type) {
4778 case ixgbe_mac_82598EB:
4779 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4780 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4782 case ixgbe_mac_82599EB:
4783 case ixgbe_mac_X540:
4784 case ixgbe_mac_X550:
4785 case ixgbe_mac_X550EM_x:
4786 mask = (queues & 0xFFFFFFFF);
4787 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4788 mask = (queues >> 32);
4789 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);