1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /*********************************************************************
44 * Set this to one to display debug statistics
45 *********************************************************************/
46 int ixgbe_display_debug_stats = 0;
48 /*********************************************************************
50 *********************************************************************/
51 char ixgbe_driver_version[] = "2.8.3";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixgbe_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
95 /* required last entry */
99 /*********************************************************************
100 * Table of branding strings
101 *********************************************************************/
103 static char *ixgbe_strings[] = {
104 "Intel(R) PRO/10GbE PCI-Express Network Driver"
107 /*********************************************************************
108 * Function prototypes
109 *********************************************************************/
110 static int ixgbe_probe(device_t);
111 static int ixgbe_attach(device_t);
112 static int ixgbe_detach(device_t);
113 static int ixgbe_shutdown(device_t);
114 static int ixgbe_suspend(device_t);
115 static int ixgbe_resume(device_t);
116 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
117 static void ixgbe_init(void *);
118 static void ixgbe_init_locked(struct adapter *);
119 static void ixgbe_stop(void *);
120 #if __FreeBSD_version >= 1100036
121 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
123 static void ixgbe_add_media_types(struct adapter *);
124 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
125 static int ixgbe_media_change(struct ifnet *);
126 static void ixgbe_identify_hardware(struct adapter *);
127 static int ixgbe_allocate_pci_resources(struct adapter *);
128 static void ixgbe_get_slot_info(struct ixgbe_hw *);
129 static int ixgbe_allocate_msix(struct adapter *);
130 static int ixgbe_allocate_legacy(struct adapter *);
131 static int ixgbe_setup_msix(struct adapter *);
132 static void ixgbe_free_pci_resources(struct adapter *);
133 static void ixgbe_local_timer(void *);
134 static int ixgbe_setup_interface(device_t, struct adapter *);
135 static void ixgbe_config_dmac(struct adapter *);
136 static void ixgbe_config_delay_values(struct adapter *);
137 static void ixgbe_config_link(struct adapter *);
138 static void ixgbe_check_eee_support(struct adapter *);
139 static void ixgbe_check_wol_support(struct adapter *);
140 static int ixgbe_setup_low_power_mode(struct adapter *);
141 static void ixgbe_rearm_queues(struct adapter *, u64);
143 static void ixgbe_initialize_transmit_units(struct adapter *);
144 static void ixgbe_initialize_receive_units(struct adapter *);
145 static void ixgbe_enable_rx_drop(struct adapter *);
146 static void ixgbe_disable_rx_drop(struct adapter *);
148 static void ixgbe_enable_intr(struct adapter *);
149 static void ixgbe_disable_intr(struct adapter *);
150 static void ixgbe_update_stats_counters(struct adapter *);
151 static void ixgbe_set_promisc(struct adapter *);
152 static void ixgbe_set_multi(struct adapter *);
153 static void ixgbe_update_link_status(struct adapter *);
154 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
155 static void ixgbe_configure_ivars(struct adapter *);
156 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
158 static void ixgbe_setup_vlan_hw_support(struct adapter *);
159 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
160 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
162 static void ixgbe_add_device_sysctls(struct adapter *);
163 static void ixgbe_add_hw_stats(struct adapter *);
165 /* Sysctl handlers */
166 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
167 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
168 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
169 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
170 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
171 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
172 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
173 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
174 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
175 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
176 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
177 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
179 /* Support for pluggable optic modules */
180 static bool ixgbe_sfp_probe(struct adapter *);
181 static void ixgbe_setup_optics(struct adapter *);
183 /* Legacy (single vector interrupt handler */
184 static void ixgbe_legacy_irq(void *);
186 /* The MSI/X Interrupt handlers */
187 static void ixgbe_msix_que(void *);
188 static void ixgbe_msix_link(void *);
190 /* Deferred interrupt tasklets */
191 static void ixgbe_handle_que(void *, int);
192 static void ixgbe_handle_link(void *, int);
193 static void ixgbe_handle_msf(void *, int);
194 static void ixgbe_handle_mod(void *, int);
195 static void ixgbe_handle_phy(void *, int);
198 static void ixgbe_reinit_fdir(void *, int);
201 /*********************************************************************
202 * FreeBSD Device Interface Entry Points
203 *********************************************************************/
205 static device_method_t ix_methods[] = {
206 /* Device interface */
207 DEVMETHOD(device_probe, ixgbe_probe),
208 DEVMETHOD(device_attach, ixgbe_attach),
209 DEVMETHOD(device_detach, ixgbe_detach),
210 DEVMETHOD(device_shutdown, ixgbe_shutdown),
211 DEVMETHOD(device_suspend, ixgbe_suspend),
212 DEVMETHOD(device_resume, ixgbe_resume),
216 static driver_t ix_driver = {
217 "ix", ix_methods, sizeof(struct adapter),
220 devclass_t ix_devclass;
221 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
223 MODULE_DEPEND(ix, pci, 1, 1, 1);
224 MODULE_DEPEND(ix, ether, 1, 1, 1);
227 ** TUNEABLE PARAMETERS:
230 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
231 "IXGBE driver parameters");
234 ** AIM: Adaptive Interrupt Moderation
235 ** which means that the interrupt rate
236 ** is varied over time based on the
237 ** traffic for that interrupt vector
239 static int ixgbe_enable_aim = TRUE;
240 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
241 "Enable adaptive interrupt moderation");
243 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
244 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
245 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
247 /* How many packets rxeof tries to clean at a time */
248 static int ixgbe_rx_process_limit = 256;
249 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
250 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
251 &ixgbe_rx_process_limit, 0,
252 "Maximum number of received packets to process at a time,"
253 "-1 means unlimited");
255 /* How many packets txeof tries to clean at a time */
256 static int ixgbe_tx_process_limit = 256;
257 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
258 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
259 &ixgbe_tx_process_limit, 0,
260 "Maximum number of sent packets to process at a time,"
261 "-1 means unlimited");
264 ** Smart speed setting, default to on
265 ** this only works as a compile option
266 ** right now as its during attach, set
267 ** this to 'ixgbe_smart_speed_off' to
270 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
273 * MSIX should be the default for best performance,
274 * but this allows it to be forced off for testing.
276 static int ixgbe_enable_msix = 1;
277 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
278 "Enable MSI-X interrupts");
281 * Number of Queues, can be set to 0,
282 * it then autoconfigures based on the
283 * number of cpus with a max of 8. This
284 * can be overriden manually here.
286 static int ixgbe_num_queues = 0;
287 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
288 "Number of queues to configure, 0 indicates autoconfigure");
291 ** Number of TX descriptors per ring,
292 ** setting higher than RX as this seems
293 ** the better performing choice.
295 static int ixgbe_txd = PERFORM_TXD;
296 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
297 "Number of transmit descriptors per queue");
299 /* Number of RX descriptors per ring */
300 static int ixgbe_rxd = PERFORM_RXD;
301 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
302 "Number of receive descriptors per queue");
305 ** Defining this on will allow the use
306 ** of unsupported SFP+ modules, note that
307 ** doing so you are on your own :)
309 static int allow_unsupported_sfp = FALSE;
310 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
312 /* Keep running tab on them for sanity check */
313 static int ixgbe_total_ports;
317 ** Flow Director actually 'steals'
318 ** part of the packet buffer as its
319 ** filter pool, this variable controls
321 ** 0 = 64K, 1 = 128K, 2 = 256K
323 static int fdir_pballoc = 1;
328 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
329 * be a reference on how to implement netmap support in a driver.
330 * Additional comments are in ixgbe_netmap.h .
332 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
333 * that extend the standard driver.
335 #include <dev/netmap/ixgbe_netmap.h>
336 #endif /* DEV_NETMAP */
338 /*********************************************************************
339 * Device identification routine
341 * ixgbe_probe determines if the driver should be loaded on
342 * adapter based on PCI vendor/device id of the adapter.
344 * return BUS_PROBE_DEFAULT on success, positive on failure
345 *********************************************************************/
348 ixgbe_probe(device_t dev)
350 ixgbe_vendor_info_t *ent;
352 u16 pci_vendor_id = 0;
353 u16 pci_device_id = 0;
354 u16 pci_subvendor_id = 0;
355 u16 pci_subdevice_id = 0;
356 char adapter_name[256];
358 INIT_DEBUGOUT("ixgbe_probe: begin");
360 pci_vendor_id = pci_get_vendor(dev);
361 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
364 pci_device_id = pci_get_device(dev);
365 pci_subvendor_id = pci_get_subvendor(dev);
366 pci_subdevice_id = pci_get_subdevice(dev);
368 ent = ixgbe_vendor_info_array;
369 while (ent->vendor_id != 0) {
370 if ((pci_vendor_id == ent->vendor_id) &&
371 (pci_device_id == ent->device_id) &&
373 ((pci_subvendor_id == ent->subvendor_id) ||
374 (ent->subvendor_id == 0)) &&
376 ((pci_subdevice_id == ent->subdevice_id) ||
377 (ent->subdevice_id == 0))) {
378 sprintf(adapter_name, "%s, Version - %s",
379 ixgbe_strings[ent->index],
380 ixgbe_driver_version);
381 device_set_desc_copy(dev, adapter_name);
383 return (BUS_PROBE_DEFAULT);
390 /*********************************************************************
391 * Device initialization routine
393 * The attach entry point is called when the driver is being loaded.
394 * This routine identifies the type of hardware, allocates all resources
395 * and initializes the hardware.
397 * return 0 on success, positive on failure
398 *********************************************************************/
401 ixgbe_attach(device_t dev)
403 struct adapter *adapter;
409 INIT_DEBUGOUT("ixgbe_attach: begin");
411 /* Allocate, clear, and link in our adapter structure */
412 adapter = device_get_softc(dev);
413 adapter->dev = adapter->osdep.dev = dev;
417 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
419 /* Set up the timer callout */
420 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
422 /* Determine hardware revision */
423 ixgbe_identify_hardware(adapter);
425 /* Do base PCI setup - map BAR0 */
426 if (ixgbe_allocate_pci_resources(adapter)) {
427 device_printf(dev, "Allocation of PCI resources failed\n");
432 /* Do descriptor calc and sanity checks */
433 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
434 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
435 device_printf(dev, "TXD config issue, using default!\n");
436 adapter->num_tx_desc = DEFAULT_TXD;
438 adapter->num_tx_desc = ixgbe_txd;
441 ** With many RX rings it is easy to exceed the
442 ** system mbuf allocation. Tuning nmbclusters
443 ** can alleviate this.
445 if (nmbclusters > 0) {
447 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
448 if (s > nmbclusters) {
449 device_printf(dev, "RX Descriptors exceed "
450 "system mbuf max, using default instead!\n");
451 ixgbe_rxd = DEFAULT_RXD;
455 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
456 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
457 device_printf(dev, "RXD config issue, using default!\n");
458 adapter->num_rx_desc = DEFAULT_RXD;
460 adapter->num_rx_desc = ixgbe_rxd;
462 /* Allocate our TX/RX Queues */
463 if (ixgbe_allocate_queues(adapter)) {
468 /* Allocate multicast array memory. */
469 adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
470 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
471 if (adapter->mta == NULL) {
472 device_printf(dev, "Can not allocate multicast setup array\n");
477 /* Initialize the shared code */
478 hw->allow_unsupported_sfp = allow_unsupported_sfp;
479 error = ixgbe_init_shared_code(hw);
480 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
482 ** No optics in this port, set up
483 ** so the timer routine will probe
484 ** for later insertion.
486 adapter->sfp_probe = TRUE;
488 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
489 device_printf(dev,"Unsupported SFP+ module detected!\n");
493 device_printf(dev,"Unable to initialize the shared code\n");
498 /* Make sure we have a good EEPROM before we read from it */
499 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
500 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
505 error = ixgbe_init_hw(hw);
507 case IXGBE_ERR_EEPROM_VERSION:
508 device_printf(dev, "This device is a pre-production adapter/"
509 "LOM. Please be aware there may be issues associated "
510 "with your hardware.\n If you are experiencing problems "
511 "please contact your Intel or hardware representative "
512 "who provided you with this hardware.\n");
514 case IXGBE_ERR_SFP_NOT_SUPPORTED:
515 device_printf(dev,"Unsupported SFP+ Module\n");
518 case IXGBE_ERR_SFP_NOT_PRESENT:
519 device_printf(dev,"No SFP+ Module found\n");
525 /* Detect and set physical type */
526 ixgbe_setup_optics(adapter);
528 if ((adapter->msix > 1) && (ixgbe_enable_msix))
529 error = ixgbe_allocate_msix(adapter);
531 error = ixgbe_allocate_legacy(adapter);
535 /* Setup OS specific network interface */
536 if (ixgbe_setup_interface(dev, adapter) != 0)
539 /* Initialize statistics */
540 ixgbe_update_stats_counters(adapter);
542 /* Register for VLAN events */
543 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
544 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
545 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
546 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
548 /* Check PCIE slot type/speed/width */
549 ixgbe_get_slot_info(hw);
552 /* Set an initial default flow control value */
553 adapter->fc = ixgbe_fc_full;
555 /* Check for certain supported features */
556 ixgbe_check_wol_support(adapter);
557 ixgbe_check_eee_support(adapter);
560 ixgbe_add_device_sysctls(adapter);
561 ixgbe_add_hw_stats(adapter);
563 /* let hardware know driver is loaded */
564 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
565 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
566 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
569 ixgbe_netmap_attach(adapter);
570 #endif /* DEV_NETMAP */
571 INIT_DEBUGOUT("ixgbe_attach: end");
575 ixgbe_free_transmit_structures(adapter);
576 ixgbe_free_receive_structures(adapter);
578 if (adapter->ifp != NULL)
579 if_free(adapter->ifp);
580 ixgbe_free_pci_resources(adapter);
581 free(adapter->mta, M_DEVBUF);
585 /*********************************************************************
586 * Device removal routine
588 * The detach entry point is called when the driver is being removed.
589 * This routine stops the adapter and deallocates all the resources
590 * that were allocated for driver operation.
592 * return 0 on success, positive on failure
593 *********************************************************************/
596 ixgbe_detach(device_t dev)
598 struct adapter *adapter = device_get_softc(dev);
599 struct ix_queue *que = adapter->queues;
600 struct tx_ring *txr = adapter->tx_rings;
603 INIT_DEBUGOUT("ixgbe_detach: begin");
605 /* Make sure VLANS are not using driver */
606 if (adapter->ifp->if_vlantrunk != NULL) {
607 device_printf(dev,"Vlan in use, detach first\n");
611 /* Stop the adapter */
612 IXGBE_CORE_LOCK(adapter);
613 ixgbe_setup_low_power_mode(adapter);
614 IXGBE_CORE_UNLOCK(adapter);
616 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
618 #ifndef IXGBE_LEGACY_TX
619 taskqueue_drain(que->tq, &txr->txq_task);
621 taskqueue_drain(que->tq, &que->que_task);
622 taskqueue_free(que->tq);
626 /* Drain the Link queue */
628 taskqueue_drain(adapter->tq, &adapter->link_task);
629 taskqueue_drain(adapter->tq, &adapter->mod_task);
630 taskqueue_drain(adapter->tq, &adapter->msf_task);
631 taskqueue_drain(adapter->tq, &adapter->phy_task);
633 taskqueue_drain(adapter->tq, &adapter->fdir_task);
635 taskqueue_free(adapter->tq);
638 /* let hardware know driver is unloading */
639 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
640 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
641 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
643 /* Unregister VLAN events */
644 if (adapter->vlan_attach != NULL)
645 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
646 if (adapter->vlan_detach != NULL)
647 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
649 ether_ifdetach(adapter->ifp);
650 callout_drain(&adapter->timer);
652 netmap_detach(adapter->ifp);
653 #endif /* DEV_NETMAP */
654 ixgbe_free_pci_resources(adapter);
655 bus_generic_detach(dev);
656 if_free(adapter->ifp);
658 ixgbe_free_transmit_structures(adapter);
659 ixgbe_free_receive_structures(adapter);
660 free(adapter->mta, M_DEVBUF);
662 IXGBE_CORE_LOCK_DESTROY(adapter);
666 /*********************************************************************
668 * Shutdown entry point
670 **********************************************************************/
673 ixgbe_shutdown(device_t dev)
675 struct adapter *adapter = device_get_softc(dev);
678 INIT_DEBUGOUT("ixgbe_shutdown: begin");
680 IXGBE_CORE_LOCK(adapter);
681 error = ixgbe_setup_low_power_mode(adapter);
682 IXGBE_CORE_UNLOCK(adapter);
688 * Methods for going from:
689 * D0 -> D3: ixgbe_suspend
690 * D3 -> D0: ixgbe_resume
693 ixgbe_suspend(device_t dev)
695 struct adapter *adapter = device_get_softc(dev);
698 INIT_DEBUGOUT("ixgbe_suspend: begin");
700 IXGBE_CORE_LOCK(adapter);
702 error = ixgbe_setup_low_power_mode(adapter);
704 /* Save state and power down */
706 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
708 IXGBE_CORE_UNLOCK(adapter);
714 ixgbe_resume(device_t dev)
716 struct adapter *adapter = device_get_softc(dev);
717 struct ifnet *ifp = adapter->ifp;
718 struct ixgbe_hw *hw = &adapter->hw;
721 INIT_DEBUGOUT("ixgbe_resume: begin");
723 IXGBE_CORE_LOCK(adapter);
725 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
726 pci_restore_state(dev);
728 /* Read & clear WUS register */
729 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
731 device_printf(dev, "Woken up by (WUS): %#010x\n",
732 IXGBE_READ_REG(hw, IXGBE_WUS));
733 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
734 /* And clear WUFC until next low-power transition */
735 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
738 * Required after D3->D0 transition;
739 * will re-advertise all previous advertised speeds
741 if (ifp->if_flags & IFF_UP)
742 ixgbe_init_locked(adapter);
744 IXGBE_CORE_UNLOCK(adapter);
746 INIT_DEBUGOUT("ixgbe_resume: end");
751 /*********************************************************************
754 * ixgbe_ioctl is called when the user wants to configure the
757 * return 0 on success, positive on failure
758 **********************************************************************/
761 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
763 struct adapter *adapter = ifp->if_softc;
764 struct ifreq *ifr = (struct ifreq *) data;
765 #if defined(INET) || defined(INET6)
766 struct ifaddr *ifa = (struct ifaddr *)data;
767 bool avoid_reset = FALSE;
775 if (ifa->ifa_addr->sa_family == AF_INET)
779 if (ifa->ifa_addr->sa_family == AF_INET6)
782 #if defined(INET) || defined(INET6)
784 ** Calling init results in link renegotiation,
785 ** so we avoid doing it when possible.
788 ifp->if_flags |= IFF_UP;
789 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
791 if (!(ifp->if_flags & IFF_NOARP))
792 arp_ifinit(ifp, ifa);
794 error = ether_ioctl(ifp, command, data);
798 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
799 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
802 IXGBE_CORE_LOCK(adapter);
803 ifp->if_mtu = ifr->ifr_mtu;
804 adapter->max_frame_size =
805 ifp->if_mtu + IXGBE_MTU_HDR;
806 ixgbe_init_locked(adapter);
807 IXGBE_CORE_UNLOCK(adapter);
811 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
812 IXGBE_CORE_LOCK(adapter);
813 if (ifp->if_flags & IFF_UP) {
814 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
815 if ((ifp->if_flags ^ adapter->if_flags) &
816 (IFF_PROMISC | IFF_ALLMULTI)) {
817 ixgbe_set_promisc(adapter);
820 ixgbe_init_locked(adapter);
822 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
824 adapter->if_flags = ifp->if_flags;
825 IXGBE_CORE_UNLOCK(adapter);
829 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
830 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
831 IXGBE_CORE_LOCK(adapter);
832 ixgbe_disable_intr(adapter);
833 ixgbe_set_multi(adapter);
834 ixgbe_enable_intr(adapter);
835 IXGBE_CORE_UNLOCK(adapter);
840 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
841 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
845 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
846 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
847 if (mask & IFCAP_HWCSUM)
848 ifp->if_capenable ^= IFCAP_HWCSUM;
849 if (mask & IFCAP_TSO4)
850 ifp->if_capenable ^= IFCAP_TSO4;
851 if (mask & IFCAP_TSO6)
852 ifp->if_capenable ^= IFCAP_TSO6;
853 if (mask & IFCAP_LRO)
854 ifp->if_capenable ^= IFCAP_LRO;
855 if (mask & IFCAP_VLAN_HWTAGGING)
856 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
857 if (mask & IFCAP_VLAN_HWFILTER)
858 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
859 if (mask & IFCAP_VLAN_HWTSO)
860 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
861 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
862 IXGBE_CORE_LOCK(adapter);
863 ixgbe_init_locked(adapter);
864 IXGBE_CORE_UNLOCK(adapter);
866 VLAN_CAPABILITIES(ifp);
869 #if __FreeBSD_version >= 1100036
872 struct ixgbe_hw *hw = &adapter->hw;
875 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
876 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
879 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
883 if (i2c.len > sizeof(i2c.data)) {
888 for (i = 0; i < i2c.len; i++)
889 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
890 i2c.dev_addr, &i2c.data[i]);
891 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
896 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
897 error = ether_ioctl(ifp, command, data);
904 /*********************************************************************
907 * This routine is used in two ways. It is used by the stack as
908 * init entry point in network interface structure. It is also used
909 * by the driver as a hw/sw initialization routine to get to a
912 * return 0 on success, positive on failure
913 **********************************************************************/
914 #define IXGBE_MHADD_MFS_SHIFT 16
917 ixgbe_init_locked(struct adapter *adapter)
919 struct ifnet *ifp = adapter->ifp;
920 device_t dev = adapter->dev;
921 struct ixgbe_hw *hw = &adapter->hw;
922 u32 k, txdctl, mhadd, gpie;
925 mtx_assert(&adapter->core_mtx, MA_OWNED);
926 INIT_DEBUGOUT("ixgbe_init_locked: begin");
927 hw->adapter_stopped = FALSE;
928 ixgbe_stop_adapter(hw);
929 callout_stop(&adapter->timer);
931 /* reprogram the RAR[0] in case user changed it. */
932 ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
934 /* Get the latest mac address, User can use a LAA */
935 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
936 IXGBE_ETH_LENGTH_OF_ADDRESS);
937 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
938 hw->addr_ctrl.rar_used_count = 1;
940 /* Set the various hardware offload abilities */
941 ifp->if_hwassist = 0;
942 if (ifp->if_capenable & IFCAP_TSO)
943 ifp->if_hwassist |= CSUM_TSO;
944 if (ifp->if_capenable & IFCAP_TXCSUM) {
945 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
946 #if __FreeBSD_version >= 800000
947 if (hw->mac.type != ixgbe_mac_82598EB)
948 ifp->if_hwassist |= CSUM_SCTP;
952 /* Prepare transmit descriptors and buffers */
953 if (ixgbe_setup_transmit_structures(adapter)) {
954 device_printf(dev, "Could not setup transmit structures\n");
960 ixgbe_initialize_transmit_units(adapter);
962 /* Setup Multicast table */
963 ixgbe_set_multi(adapter);
966 ** Determine the correct mbuf pool
967 ** for doing jumbo frames
969 if (adapter->max_frame_size <= 2048)
970 adapter->rx_mbuf_sz = MCLBYTES;
971 else if (adapter->max_frame_size <= 4096)
972 adapter->rx_mbuf_sz = MJUMPAGESIZE;
973 else if (adapter->max_frame_size <= 9216)
974 adapter->rx_mbuf_sz = MJUM9BYTES;
976 adapter->rx_mbuf_sz = MJUM16BYTES;
978 /* Prepare receive descriptors and buffers */
979 if (ixgbe_setup_receive_structures(adapter)) {
980 device_printf(dev, "Could not setup receive structures\n");
985 /* Configure RX settings */
986 ixgbe_initialize_receive_units(adapter);
988 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
990 /* Enable Fan Failure Interrupt */
991 gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
993 /* Add for Module detection */
994 if (hw->mac.type == ixgbe_mac_82599EB)
995 gpie |= IXGBE_SDP2_GPIEN;
998 * Thermal Failure Detection (X540)
999 * Link Detection (X552)
1001 if (hw->mac.type == ixgbe_mac_X540 ||
1002 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1003 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1004 gpie |= IXGBE_SDP0_GPIEN_X540;
1006 if (adapter->msix > 1) {
1007 /* Enable Enhanced MSIX mode */
1008 gpie |= IXGBE_GPIE_MSIX_MODE;
1009 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1012 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1015 if (ifp->if_mtu > ETHERMTU) {
1016 /* aka IXGBE_MAXFRS on 82599 and newer */
1017 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1018 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1019 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1020 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1023 /* Now enable all the queues */
1024 for (int i = 0; i < adapter->num_queues; i++) {
1025 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1026 txdctl |= IXGBE_TXDCTL_ENABLE;
1027 /* Set WTHRESH to 8, burst writeback */
1028 txdctl |= (8 << 16);
1030 * When the internal queue falls below PTHRESH (32),
1031 * start prefetching as long as there are at least
1032 * HTHRESH (1) buffers ready. The values are taken
1033 * from the Intel linux driver 3.8.21.
1034 * Prefetching enables tx line rate even with 1 queue.
1036 txdctl |= (32 << 0) | (1 << 8);
1037 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1040 for (int i = 0; i < adapter->num_queues; i++) {
1041 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1042 if (hw->mac.type == ixgbe_mac_82598EB) {
1048 rxdctl &= ~0x3FFFFF;
1051 rxdctl |= IXGBE_RXDCTL_ENABLE;
1052 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1053 for (k = 0; k < 10; k++) {
1054 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1055 IXGBE_RXDCTL_ENABLE)
1063 * In netmap mode, we must preserve the buffers made
1064 * available to userspace before the if_init()
1065 * (this is true by default on the TX side, because
1066 * init makes all buffers available to userspace).
1068 * netmap_reset() and the device specific routines
1069 * (e.g. ixgbe_setup_receive_rings()) map these
1070 * buffers at the end of the NIC ring, so here we
1071 * must set the RDT (tail) register to make sure
1072 * they are not overwritten.
1074 * In this driver the NIC ring starts at RDH = 0,
1075 * RDT points to the last slot available for reception (?),
1076 * so RDT = num_rx_desc - 1 means the whole ring is available.
1078 if (ifp->if_capenable & IFCAP_NETMAP) {
1079 struct netmap_adapter *na = NA(adapter->ifp);
1080 struct netmap_kring *kring = &na->rx_rings[i];
1081 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1083 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1085 #endif /* DEV_NETMAP */
1086 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1089 /* Enable Receive engine */
1090 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1091 if (hw->mac.type == ixgbe_mac_82598EB)
1092 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1093 rxctrl |= IXGBE_RXCTRL_RXEN;
1094 ixgbe_enable_rx_dma(hw, rxctrl);
1096 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1098 /* Set up MSI/X routing */
1099 if (ixgbe_enable_msix) {
1100 ixgbe_configure_ivars(adapter);
1101 /* Set up auto-mask */
1102 if (hw->mac.type == ixgbe_mac_82598EB)
1103 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1105 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1106 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1108 } else { /* Simple settings for Legacy/MSI */
1109 ixgbe_set_ivar(adapter, 0, 0, 0);
1110 ixgbe_set_ivar(adapter, 0, 0, 1);
1111 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1115 /* Init Flow director */
1116 if (hw->mac.type != ixgbe_mac_82598EB) {
1117 u32 hdrm = 32 << fdir_pballoc;
1119 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1120 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1125 ** Check on any SFP devices that
1126 ** need to be kick-started
1128 if (hw->phy.type == ixgbe_phy_none) {
1129 int err = hw->phy.ops.identify(hw);
1130 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1132 "Unsupported SFP+ module type was detected.\n");
1137 /* Set moderation on the Link interrupt */
1138 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1140 /* Configure Energy Efficient Ethernet for supported devices */
1141 if (adapter->eee_support)
1142 ixgbe_setup_eee(hw, adapter->eee_enabled);
1144 /* Config/Enable Link */
1145 ixgbe_config_link(adapter);
1147 /* Hardware Packet Buffer & Flow Control setup */
1148 ixgbe_config_delay_values(adapter);
1150 /* Initialize the FC settings */
1153 /* Set up VLAN support and filter */
1154 ixgbe_setup_vlan_hw_support(adapter);
1156 /* Setup DMA Coalescing */
1157 ixgbe_config_dmac(adapter);
1159 /* And now turn on interrupts */
1160 ixgbe_enable_intr(adapter);
1162 /* Now inform the stack we're ready */
1163 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1169 ixgbe_init(void *arg)
1171 struct adapter *adapter = arg;
1173 IXGBE_CORE_LOCK(adapter);
1174 ixgbe_init_locked(adapter);
1175 IXGBE_CORE_UNLOCK(adapter);
1180 ixgbe_config_delay_values(struct adapter *adapter)
1182 struct ixgbe_hw *hw = &adapter->hw;
1183 u32 rxpb, frame, size, tmp;
1185 frame = adapter->max_frame_size;
1187 /* Calculate High Water */
1188 switch (hw->mac.type) {
1189 case ixgbe_mac_X540:
1190 case ixgbe_mac_X550:
1191 case ixgbe_mac_X550EM_x:
1192 tmp = IXGBE_DV_X540(frame, frame);
1195 tmp = IXGBE_DV(frame, frame);
1198 size = IXGBE_BT2KB(tmp);
1199 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1200 hw->fc.high_water[0] = rxpb - size;
1202 /* Now calculate Low Water */
1203 switch (hw->mac.type) {
1204 case ixgbe_mac_X540:
1205 case ixgbe_mac_X550:
1206 case ixgbe_mac_X550EM_x:
1207 tmp = IXGBE_LOW_DV_X540(frame);
1210 tmp = IXGBE_LOW_DV(frame);
1213 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1215 hw->fc.requested_mode = adapter->fc;
1216 hw->fc.pause_time = IXGBE_FC_PAUSE;
1217 hw->fc.send_xon = TRUE;
1222 ** MSIX Interrupt Handlers and Tasklets
1227 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1229 struct ixgbe_hw *hw = &adapter->hw;
1230 u64 queue = (u64)(1 << vector);
1233 if (hw->mac.type == ixgbe_mac_82598EB) {
1234 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1235 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1237 mask = (queue & 0xFFFFFFFF);
1239 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1240 mask = (queue >> 32);
1242 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1247 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1249 struct ixgbe_hw *hw = &adapter->hw;
1250 u64 queue = (u64)(1 << vector);
1253 if (hw->mac.type == ixgbe_mac_82598EB) {
1254 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1255 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1257 mask = (queue & 0xFFFFFFFF);
1259 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1260 mask = (queue >> 32);
1262 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1267 ixgbe_handle_que(void *context, int pending)
1269 struct ix_queue *que = context;
1270 struct adapter *adapter = que->adapter;
1271 struct tx_ring *txr = que->txr;
1272 struct ifnet *ifp = adapter->ifp;
1275 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1276 more = ixgbe_rxeof(que);
1279 #ifndef IXGBE_LEGACY_TX
1280 if (!drbr_empty(ifp, txr->br))
1281 ixgbe_mq_start_locked(ifp, txr);
1283 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1284 ixgbe_start_locked(txr, ifp);
1286 IXGBE_TX_UNLOCK(txr);
1289 /* Reenable this interrupt */
1290 if (que->res != NULL)
1291 ixgbe_enable_queue(adapter, que->msix);
1293 ixgbe_enable_intr(adapter);
1298 /*********************************************************************
1300 * Legacy Interrupt Service routine
1302 **********************************************************************/
1305 ixgbe_legacy_irq(void *arg)
1307 struct ix_queue *que = arg;
1308 struct adapter *adapter = que->adapter;
1309 struct ixgbe_hw *hw = &adapter->hw;
1310 struct ifnet *ifp = adapter->ifp;
1311 struct tx_ring *txr = adapter->tx_rings;
1316 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1319 if (reg_eicr == 0) {
1320 ixgbe_enable_intr(adapter);
1324 more = ixgbe_rxeof(que);
1328 #ifdef IXGBE_LEGACY_TX
1329 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1330 ixgbe_start_locked(txr, ifp);
1332 if (!drbr_empty(ifp, txr->br))
1333 ixgbe_mq_start_locked(ifp, txr);
1335 IXGBE_TX_UNLOCK(txr);
1337 /* Check for fan failure */
1338 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1339 (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1340 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1341 "REPLACE IMMEDIATELY!!\n");
1342 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1345 /* Link status change */
1346 if (reg_eicr & IXGBE_EICR_LSC)
1347 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1349 /* External PHY interrupt */
1350 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1351 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1352 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1355 taskqueue_enqueue(que->tq, &que->que_task);
1357 ixgbe_enable_intr(adapter);
1362 /*********************************************************************
1364 * MSIX Queue Interrupt Service routine
1366 **********************************************************************/
1368 ixgbe_msix_que(void *arg)
1370 struct ix_queue *que = arg;
1371 struct adapter *adapter = que->adapter;
1372 struct ifnet *ifp = adapter->ifp;
1373 struct tx_ring *txr = que->txr;
1374 struct rx_ring *rxr = que->rxr;
1378 /* Protect against spurious interrupts */
1379 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1382 ixgbe_disable_queue(adapter, que->msix);
1385 more = ixgbe_rxeof(que);
1389 #ifdef IXGBE_LEGACY_TX
1390 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1391 ixgbe_start_locked(txr, ifp);
1393 if (!drbr_empty(ifp, txr->br))
1394 ixgbe_mq_start_locked(ifp, txr);
1396 IXGBE_TX_UNLOCK(txr);
1400 if (ixgbe_enable_aim == FALSE)
1403 ** Do Adaptive Interrupt Moderation:
1404 ** - Write out last calculated setting
1405 ** - Calculate based on average size over
1406 ** the last interval.
1408 if (que->eitr_setting)
1409 IXGBE_WRITE_REG(&adapter->hw,
1410 IXGBE_EITR(que->msix), que->eitr_setting);
1412 que->eitr_setting = 0;
1414 /* Idle, do nothing */
1415 if ((txr->bytes == 0) && (rxr->bytes == 0))
1418 if ((txr->bytes) && (txr->packets))
1419 newitr = txr->bytes/txr->packets;
1420 if ((rxr->bytes) && (rxr->packets))
1421 newitr = max(newitr,
1422 (rxr->bytes / rxr->packets));
1423 newitr += 24; /* account for hardware frame, crc */
1425 /* set an upper boundary */
1426 newitr = min(newitr, 3000);
1428 /* Be nice to the mid range */
1429 if ((newitr > 300) && (newitr < 1200))
1430 newitr = (newitr / 3);
1432 newitr = (newitr / 2);
1434 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1435 newitr |= newitr << 16;
1437 newitr |= IXGBE_EITR_CNT_WDIS;
1439 /* save for next interrupt */
1440 que->eitr_setting = newitr;
1450 taskqueue_enqueue(que->tq, &que->que_task);
1452 ixgbe_enable_queue(adapter, que->msix);
1458 ixgbe_msix_link(void *arg)
1460 struct adapter *adapter = arg;
1461 struct ixgbe_hw *hw = &adapter->hw;
1462 u32 reg_eicr, mod_mask;
1464 ++adapter->link_irq;
1466 /* First get the cause */
1467 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1468 /* Be sure the queue bits are not cleared */
1469 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1470 /* Clear interrupt with write */
1471 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1473 /* Link status change */
1474 if (reg_eicr & IXGBE_EICR_LSC)
1475 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1477 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1479 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1480 /* This is probably overkill :) */
1481 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1483 /* Disable the interrupt */
1484 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1485 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1488 if (reg_eicr & IXGBE_EICR_ECC) {
1489 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1490 "Please Reboot!!\n");
1491 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1494 /* Check for over temp condition */
1495 if (reg_eicr & IXGBE_EICR_TS) {
1496 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1497 "PHY IS SHUT DOWN!!\n");
1498 device_printf(adapter->dev, "System shutdown required!\n");
1499 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1503 /* Pluggable optics-related interrupt */
1504 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1505 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1507 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1509 if (ixgbe_is_sfp(hw)) {
1510 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1511 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1512 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1513 } else if (reg_eicr & mod_mask) {
1514 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1515 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1519 /* Check for fan failure */
1520 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1521 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1522 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1523 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1524 "REPLACE IMMEDIATELY!!\n");
1527 /* External PHY interrupt */
1528 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1529 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1530 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1531 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1538 /*********************************************************************
1540 * Media Ioctl callback
1542 * This routine is called whenever the user queries the status of
1543 * the interface using ifconfig.
1545 **********************************************************************/
1547 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1549 struct adapter *adapter = ifp->if_softc;
1550 struct ixgbe_hw *hw = &adapter->hw;
1553 INIT_DEBUGOUT("ixgbe_media_status: begin");
1554 IXGBE_CORE_LOCK(adapter);
1555 ixgbe_update_link_status(adapter);
1557 ifmr->ifm_status = IFM_AVALID;
1558 ifmr->ifm_active = IFM_ETHER;
1560 if (!adapter->link_active) {
1561 IXGBE_CORE_UNLOCK(adapter);
1565 ifmr->ifm_status |= IFM_ACTIVE;
1566 layer = ixgbe_get_supported_physical_layer(hw);
1568 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1569 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1570 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1571 switch (adapter->link_speed) {
1572 case IXGBE_LINK_SPEED_10GB_FULL:
1573 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1575 case IXGBE_LINK_SPEED_1GB_FULL:
1576 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1578 case IXGBE_LINK_SPEED_100_FULL:
1579 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1582 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1583 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1584 switch (adapter->link_speed) {
1585 case IXGBE_LINK_SPEED_10GB_FULL:
1586 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1589 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1590 switch (adapter->link_speed) {
1591 case IXGBE_LINK_SPEED_10GB_FULL:
1592 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1594 case IXGBE_LINK_SPEED_1GB_FULL:
1595 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1598 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1599 switch (adapter->link_speed) {
1600 case IXGBE_LINK_SPEED_10GB_FULL:
1601 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1603 case IXGBE_LINK_SPEED_1GB_FULL:
1604 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1607 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1608 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1609 switch (adapter->link_speed) {
1610 case IXGBE_LINK_SPEED_10GB_FULL:
1611 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1613 case IXGBE_LINK_SPEED_1GB_FULL:
1614 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1617 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1618 switch (adapter->link_speed) {
1619 case IXGBE_LINK_SPEED_10GB_FULL:
1620 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1624 ** XXX: These need to use the proper media types once
1627 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1628 switch (adapter->link_speed) {
1629 case IXGBE_LINK_SPEED_10GB_FULL:
1630 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1632 case IXGBE_LINK_SPEED_2_5GB_FULL:
1633 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1635 case IXGBE_LINK_SPEED_1GB_FULL:
1636 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1639 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1640 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1641 switch (adapter->link_speed) {
1642 case IXGBE_LINK_SPEED_10GB_FULL:
1643 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1645 case IXGBE_LINK_SPEED_2_5GB_FULL:
1646 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1648 case IXGBE_LINK_SPEED_1GB_FULL:
1649 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1653 /* If nothing is recognized... */
1654 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1655 ifmr->ifm_active |= IFM_UNKNOWN;
1657 #if __FreeBSD_version >= 900025
1658 /* Display current flow control setting used on link */
1659 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1660 hw->fc.current_mode == ixgbe_fc_full)
1661 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1662 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1663 hw->fc.current_mode == ixgbe_fc_full)
1664 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1667 IXGBE_CORE_UNLOCK(adapter);
1672 /*********************************************************************
1674 * Media Ioctl callback
1676 * This routine is called when the user changes speed/duplex using
1677 * media/mediopt option with ifconfig.
1679 **********************************************************************/
1681 ixgbe_media_change(struct ifnet * ifp)
1683 struct adapter *adapter = ifp->if_softc;
1684 struct ifmedia *ifm = &adapter->media;
1685 struct ixgbe_hw *hw = &adapter->hw;
1686 ixgbe_link_speed speed = 0;
1688 INIT_DEBUGOUT("ixgbe_media_change: begin");
1690 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1693 if (hw->phy.media_type == ixgbe_media_type_backplane)
1697 ** We don't actually need to check against the supported
1698 ** media types of the adapter; ifmedia will take care of
1701 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1704 speed |= IXGBE_LINK_SPEED_100_FULL;
1706 case IFM_10G_SR: /* KR, too */
1708 case IFM_10G_CX4: /* KX4 */
1709 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1710 case IFM_10G_TWINAX:
1711 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1714 speed |= IXGBE_LINK_SPEED_100_FULL;
1717 case IFM_1000_CX: /* KX */
1718 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1721 speed |= IXGBE_LINK_SPEED_100_FULL;
1727 hw->mac.autotry_restart = TRUE;
1728 hw->mac.ops.setup_link(hw, speed, TRUE);
1729 adapter->advertise =
1730 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1731 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1732 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1737 device_printf(adapter->dev, "Invalid media type!\n");
1742 ixgbe_set_promisc(struct adapter *adapter)
1745 struct ifnet *ifp = adapter->ifp;
1748 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1749 reg_rctl &= (~IXGBE_FCTRL_UPE);
1750 if (ifp->if_flags & IFF_ALLMULTI)
1751 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1753 struct ifmultiaddr *ifma;
1754 #if __FreeBSD_version < 800000
1757 if_maddr_rlock(ifp);
1759 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1760 if (ifma->ifma_addr->sa_family != AF_LINK)
1762 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1766 #if __FreeBSD_version < 800000
1767 IF_ADDR_UNLOCK(ifp);
1769 if_maddr_runlock(ifp);
1772 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1773 reg_rctl &= (~IXGBE_FCTRL_MPE);
1774 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1776 if (ifp->if_flags & IFF_PROMISC) {
1777 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1778 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1779 } else if (ifp->if_flags & IFF_ALLMULTI) {
1780 reg_rctl |= IXGBE_FCTRL_MPE;
1781 reg_rctl &= ~IXGBE_FCTRL_UPE;
1782 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1788 /*********************************************************************
1791 * This routine is called whenever multicast address list is updated.
1793 **********************************************************************/
1794 #define IXGBE_RAR_ENTRIES 16
1797 ixgbe_set_multi(struct adapter *adapter)
1802 struct ifmultiaddr *ifma;
1804 struct ifnet *ifp = adapter->ifp;
1806 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1809 bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1810 MAX_NUM_MULTICAST_ADDRESSES);
1812 #if __FreeBSD_version < 800000
1815 if_maddr_rlock(ifp);
1817 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1818 if (ifma->ifma_addr->sa_family != AF_LINK)
1820 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1822 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1823 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1824 IXGBE_ETH_LENGTH_OF_ADDRESS);
1827 #if __FreeBSD_version < 800000
1828 IF_ADDR_UNLOCK(ifp);
1830 if_maddr_runlock(ifp);
1833 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1834 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1835 if (ifp->if_flags & IFF_PROMISC)
1836 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1837 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1838 ifp->if_flags & IFF_ALLMULTI) {
1839 fctrl |= IXGBE_FCTRL_MPE;
1840 fctrl &= ~IXGBE_FCTRL_UPE;
1842 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1846 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1848 ixgbe_update_mc_addr_list(&adapter->hw,
1849 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1856 * This is an iterator function now needed by the multicast
1857 * shared code. It simply feeds the shared code routine the
1858 * addresses in the array of ixgbe_set_multi() one by one.
1861 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1863 u8 *addr = *update_ptr;
1867 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1868 *update_ptr = newptr;
1873 /*********************************************************************
1876 * This routine checks for link status,updates statistics,
1877 * and runs the watchdog check.
1879 **********************************************************************/
1882 ixgbe_local_timer(void *arg)
1884 struct adapter *adapter = arg;
1885 device_t dev = adapter->dev;
1886 struct ix_queue *que = adapter->queues;
1890 mtx_assert(&adapter->core_mtx, MA_OWNED);
1892 /* Check for pluggable optics */
1893 if (adapter->sfp_probe)
1894 if (!ixgbe_sfp_probe(adapter))
1895 goto out; /* Nothing to do */
1897 ixgbe_update_link_status(adapter);
1898 ixgbe_update_stats_counters(adapter);
1901 ** Check the TX queues status
1902 ** - mark hung queues so we don't schedule on them
1903 ** - watchdog only if all queues show hung
1905 for (int i = 0; i < adapter->num_queues; i++, que++) {
1906 /* Keep track of queues with work for soft irq */
1908 queues |= ((u64)1 << que->me);
1910 ** Each time txeof runs without cleaning, but there
1911 ** are uncleaned descriptors it increments busy. If
1912 ** we get to the MAX we declare it hung.
1914 if (que->busy == IXGBE_QUEUE_HUNG) {
1916 /* Mark the queue as inactive */
1917 adapter->active_queues &= ~((u64)1 << que->me);
1920 /* Check if we've come back from hung */
1921 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1922 adapter->active_queues |= ((u64)1 << que->me);
1924 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1925 device_printf(dev,"Warning queue %d "
1926 "appears to be hung!\n", i);
1927 que->txr->busy = IXGBE_QUEUE_HUNG;
1933 /* Only truly watchdog if all queues show hung */
1934 if (hung == adapter->num_queues)
1936 else if (queues != 0) { /* Force an IRQ on queues with work */
1937 ixgbe_rearm_queues(adapter, queues);
1941 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1945 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1946 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1947 adapter->watchdog_events++;
1948 ixgbe_init_locked(adapter);
1952 ** Note: this routine updates the OS on the link state
1953 ** the real check of the hardware only happens with
1954 ** a link interrupt.
1957 ixgbe_update_link_status(struct adapter *adapter)
1959 struct ifnet *ifp = adapter->ifp;
1960 device_t dev = adapter->dev;
1962 if (adapter->link_up){
1963 if (adapter->link_active == FALSE) {
1965 device_printf(dev,"Link is up %d Gbps %s \n",
1966 ((adapter->link_speed == 128)? 10:1),
1968 adapter->link_active = TRUE;
1969 /* Update any Flow Control changes */
1970 ixgbe_fc_enable(&adapter->hw);
1971 /* Update DMA coalescing config */
1972 ixgbe_config_dmac(adapter);
1973 if_link_state_change(ifp, LINK_STATE_UP);
1975 } else { /* Link down */
1976 if (adapter->link_active == TRUE) {
1978 device_printf(dev,"Link is Down\n");
1979 if_link_state_change(ifp, LINK_STATE_DOWN);
1980 adapter->link_active = FALSE;
1988 /*********************************************************************
1990 * This routine disables all traffic on the adapter by issuing a
1991 * global reset on the MAC and deallocates TX/RX buffers.
1993 **********************************************************************/
1996 ixgbe_stop(void *arg)
1999 struct adapter *adapter = arg;
2000 struct ixgbe_hw *hw = &adapter->hw;
2003 mtx_assert(&adapter->core_mtx, MA_OWNED);
2005 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2006 ixgbe_disable_intr(adapter);
2007 callout_stop(&adapter->timer);
2009 /* Let the stack know...*/
2010 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2013 hw->adapter_stopped = FALSE;
2014 ixgbe_stop_adapter(hw);
2015 if (hw->mac.type == ixgbe_mac_82599EB)
2016 ixgbe_stop_mac_link_on_d3_82599(hw);
2017 /* Turn off the laser - noop with no optics */
2018 ixgbe_disable_tx_laser(hw);
2020 /* Update the stack */
2021 adapter->link_up = FALSE;
2022 ixgbe_update_link_status(adapter);
2024 /* reprogram the RAR[0] in case user changed it. */
2025 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2031 /*********************************************************************
2033 * Determine hardware revision.
2035 **********************************************************************/
2037 ixgbe_identify_hardware(struct adapter *adapter)
2039 device_t dev = adapter->dev;
2040 struct ixgbe_hw *hw = &adapter->hw;
2042 /* Save off the information about this board */
2043 hw->vendor_id = pci_get_vendor(dev);
2044 hw->device_id = pci_get_device(dev);
2045 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2046 hw->subsystem_vendor_id =
2047 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2048 hw->subsystem_device_id =
2049 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2052 ** Make sure BUSMASTER is set
2054 pci_enable_busmaster(dev);
2056 /* We need this here to set the num_segs below */
2057 ixgbe_set_mac_type(hw);
2059 /* Pick up the 82599 settings */
2060 if (hw->mac.type != ixgbe_mac_82598EB) {
2061 hw->phy.smart_speed = ixgbe_smart_speed;
2062 adapter->num_segs = IXGBE_82599_SCATTER;
2064 adapter->num_segs = IXGBE_82598_SCATTER;
2069 /*********************************************************************
2071 * Determine optic type
2073 **********************************************************************/
2075 ixgbe_setup_optics(struct adapter *adapter)
2077 struct ixgbe_hw *hw = &adapter->hw;
2080 layer = ixgbe_get_supported_physical_layer(hw);
2082 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2083 adapter->optics = IFM_10G_T;
2087 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2088 adapter->optics = IFM_1000_T;
2092 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2093 adapter->optics = IFM_1000_SX;
2097 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2098 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2099 adapter->optics = IFM_10G_LR;
2103 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2104 adapter->optics = IFM_10G_SR;
2108 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2109 adapter->optics = IFM_10G_TWINAX;
2113 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2114 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2115 adapter->optics = IFM_10G_CX4;
2119 /* If we get here just set the default */
2120 adapter->optics = IFM_ETHER | IFM_AUTO;
2124 /*********************************************************************
2126 * Setup the Legacy or MSI Interrupt handler
2128 **********************************************************************/
2130 ixgbe_allocate_legacy(struct adapter *adapter)
2132 device_t dev = adapter->dev;
2133 struct ix_queue *que = adapter->queues;
2134 #ifndef IXGBE_LEGACY_TX
2135 struct tx_ring *txr = adapter->tx_rings;
2140 if (adapter->msix == 1)
2143 /* We allocate a single interrupt resource */
2144 adapter->res = bus_alloc_resource_any(dev,
2145 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2146 if (adapter->res == NULL) {
2147 device_printf(dev, "Unable to allocate bus resource: "
2153 * Try allocating a fast interrupt and the associated deferred
2154 * processing contexts.
2156 #ifndef IXGBE_LEGACY_TX
2157 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2159 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2160 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2161 taskqueue_thread_enqueue, &que->tq);
2162 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2163 device_get_nameunit(adapter->dev));
2165 /* Tasklets for Link, SFP and Multispeed Fiber */
2166 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2167 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2168 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2169 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2171 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2173 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2174 taskqueue_thread_enqueue, &adapter->tq);
2175 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2176 device_get_nameunit(adapter->dev));
2178 if ((error = bus_setup_intr(dev, adapter->res,
2179 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2180 que, &adapter->tag)) != 0) {
2181 device_printf(dev, "Failed to register fast interrupt "
2182 "handler: %d\n", error);
2183 taskqueue_free(que->tq);
2184 taskqueue_free(adapter->tq);
2189 /* For simplicity in the handlers */
2190 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2196 /*********************************************************************
2198 * Setup MSIX Interrupt resources and handlers
2200 **********************************************************************/
2202 ixgbe_allocate_msix(struct adapter *adapter)
2204 device_t dev = adapter->dev;
2205 struct ix_queue *que = adapter->queues;
2206 struct tx_ring *txr = adapter->tx_rings;
2207 int error, rid, vector = 0;
2210 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2212 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2213 RF_SHAREABLE | RF_ACTIVE);
2214 if (que->res == NULL) {
2215 device_printf(dev,"Unable to allocate"
2216 " bus resource: que interrupt [%d]\n", vector);
2219 /* Set the handler function */
2220 error = bus_setup_intr(dev, que->res,
2221 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2222 ixgbe_msix_que, que, &que->tag);
2225 device_printf(dev, "Failed to register QUE handler");
2228 #if __FreeBSD_version >= 800504
2229 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2232 adapter->active_queues |= (u64)(1 << que->msix);
2234 * Bind the msix vector, and thus the
2235 * rings to the corresponding cpu.
2237 * This just happens to match the default RSS round-robin
2238 * bucket -> queue -> CPU allocation.
2240 if (adapter->num_queues > 1)
2243 if (adapter->num_queues > 1)
2244 bus_bind_intr(dev, que->res, cpu_id);
2246 #ifndef IXGBE_LEGACY_TX
2247 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2249 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2250 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2251 taskqueue_thread_enqueue, &que->tq);
2252 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2253 device_get_nameunit(adapter->dev));
2258 adapter->res = bus_alloc_resource_any(dev,
2259 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2260 if (!adapter->res) {
2261 device_printf(dev,"Unable to allocate"
2262 " bus resource: Link interrupt [%d]\n", rid);
2265 /* Set the link handler function */
2266 error = bus_setup_intr(dev, adapter->res,
2267 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2268 ixgbe_msix_link, adapter, &adapter->tag);
2270 adapter->res = NULL;
2271 device_printf(dev, "Failed to register LINK handler");
2274 #if __FreeBSD_version >= 800504
2275 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2277 adapter->vector = vector;
2278 /* Tasklets for Link, SFP and Multispeed Fiber */
2279 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2280 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2281 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2282 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2284 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2286 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2287 taskqueue_thread_enqueue, &adapter->tq);
2288 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2289 device_get_nameunit(adapter->dev));
2295 * Setup Either MSI/X or MSI
2298 ixgbe_setup_msix(struct adapter *adapter)
2300 device_t dev = adapter->dev;
2301 int rid, want, queues, msgs;
2303 /* Override by tuneable */
2304 if (ixgbe_enable_msix == 0)
2307 /* First try MSI/X */
2308 msgs = pci_msix_count(dev);
2311 rid = PCIR_BAR(MSIX_82598_BAR);
2312 adapter->msix_mem = bus_alloc_resource_any(dev,
2313 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2314 if (adapter->msix_mem == NULL) {
2315 rid += 4; /* 82599 maps in higher BAR */
2316 adapter->msix_mem = bus_alloc_resource_any(dev,
2317 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2319 if (adapter->msix_mem == NULL) {
2320 /* May not be enabled */
2321 device_printf(adapter->dev,
2322 "Unable to map MSIX table \n");
2326 /* Figure out a reasonable auto config value */
2327 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2329 if (ixgbe_num_queues != 0)
2330 queues = ixgbe_num_queues;
2332 /* reflect correct sysctl value */
2333 ixgbe_num_queues = queues;
2336 ** Want one vector (RX/TX pair) per queue
2337 ** plus an additional for Link.
2343 device_printf(adapter->dev,
2344 "MSIX Configuration Problem, "
2345 "%d vectors but %d queues wanted!\n",
2349 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2350 device_printf(adapter->dev,
2351 "Using MSIX interrupts with %d vectors\n", msgs);
2352 adapter->num_queues = queues;
2356 ** If MSIX alloc failed or provided us with
2357 ** less than needed, free and fall through to MSI
2359 pci_release_msi(dev);
2362 if (adapter->msix_mem != NULL) {
2363 bus_release_resource(dev, SYS_RES_MEMORY,
2364 rid, adapter->msix_mem);
2365 adapter->msix_mem = NULL;
2368 if (pci_alloc_msi(dev, &msgs) == 0) {
2369 device_printf(adapter->dev,"Using an MSI interrupt\n");
2372 device_printf(adapter->dev,"Using a Legacy interrupt\n");
2378 ixgbe_allocate_pci_resources(struct adapter *adapter)
2381 device_t dev = adapter->dev;
2384 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2387 if (!(adapter->pci_mem)) {
2388 device_printf(dev,"Unable to allocate bus resource: memory\n");
2392 adapter->osdep.mem_bus_space_tag =
2393 rman_get_bustag(adapter->pci_mem);
2394 adapter->osdep.mem_bus_space_handle =
2395 rman_get_bushandle(adapter->pci_mem);
2396 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2398 /* Legacy defaults */
2399 adapter->num_queues = 1;
2400 adapter->hw.back = &adapter->osdep;
2403 ** Now setup MSI or MSI/X, should
2404 ** return us the number of supported
2405 ** vectors. (Will be 1 for MSI)
2407 adapter->msix = ixgbe_setup_msix(adapter);
2412 ixgbe_free_pci_resources(struct adapter * adapter)
2414 struct ix_queue *que = adapter->queues;
2415 device_t dev = adapter->dev;
2418 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2419 memrid = PCIR_BAR(MSIX_82598_BAR);
2421 memrid = PCIR_BAR(MSIX_82599_BAR);
2424 ** There is a slight possibility of a failure mode
2425 ** in attach that will result in entering this function
2426 ** before interrupt resources have been initialized, and
2427 ** in that case we do not want to execute the loops below
2428 ** We can detect this reliably by the state of the adapter
2431 if (adapter->res == NULL)
2435 ** Release all msix queue resources:
2437 for (int i = 0; i < adapter->num_queues; i++, que++) {
2438 rid = que->msix + 1;
2439 if (que->tag != NULL) {
2440 bus_teardown_intr(dev, que->res, que->tag);
2443 if (que->res != NULL)
2444 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2448 /* Clean the Legacy or Link interrupt last */
2449 if (adapter->vector) /* we are doing MSIX */
2450 rid = adapter->vector + 1;
2452 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2454 if (adapter->tag != NULL) {
2455 bus_teardown_intr(dev, adapter->res, adapter->tag);
2456 adapter->tag = NULL;
2458 if (adapter->res != NULL)
2459 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2463 pci_release_msi(dev);
2465 if (adapter->msix_mem != NULL)
2466 bus_release_resource(dev, SYS_RES_MEMORY,
2467 memrid, adapter->msix_mem);
2469 if (adapter->pci_mem != NULL)
2470 bus_release_resource(dev, SYS_RES_MEMORY,
2471 PCIR_BAR(0), adapter->pci_mem);
2476 /*********************************************************************
2478 * Setup networking device structure and register an interface.
2480 **********************************************************************/
2482 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2486 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2488 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2490 device_printf(dev, "can not allocate ifnet structure\n");
2493 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2494 ifp->if_baudrate = IF_Gbps(10);
2495 ifp->if_init = ixgbe_init;
2496 ifp->if_softc = adapter;
2497 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2498 ifp->if_ioctl = ixgbe_ioctl;
2499 #if __FreeBSD_version >= 1100036
2500 if_setgetcounterfn(ifp, ixgbe_get_counter);
2502 #if __FreeBSD_version >= 1100045
2503 /* TSO parameters */
2504 ifp->if_hw_tsomax = 65518;
2505 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2506 ifp->if_hw_tsomaxsegsize = 2048;
2508 #ifndef IXGBE_LEGACY_TX
2509 ifp->if_transmit = ixgbe_mq_start;
2510 ifp->if_qflush = ixgbe_qflush;
2512 ifp->if_start = ixgbe_start;
2513 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2514 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2515 IFQ_SET_READY(&ifp->if_snd);
2518 ether_ifattach(ifp, adapter->hw.mac.addr);
2520 adapter->max_frame_size =
2521 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2524 * Tell the upper layer(s) we support long frames.
2526 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2528 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2529 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2530 ifp->if_capabilities |= IFCAP_LRO;
2531 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2535 ifp->if_capenable = ifp->if_capabilities;
2538 ** Don't turn this on by default, if vlans are
2539 ** created on another pseudo device (eg. lagg)
2540 ** then vlan events are not passed thru, breaking
2541 ** operation, but with HW FILTER off it works. If
2542 ** using vlans directly on the ixgbe driver you can
2543 ** enable this and get full hardware tag filtering.
2545 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2548 * Specify the media types supported by this adapter and register
2549 * callbacks to update media and link information
2551 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2552 ixgbe_media_status);
2554 ixgbe_add_media_types(adapter);
2556 /* Autoselect media by default */
2557 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2563 ixgbe_add_media_types(struct adapter *adapter)
2565 struct ixgbe_hw *hw = &adapter->hw;
2566 device_t dev = adapter->dev;
2569 layer = ixgbe_get_supported_physical_layer(hw);
2571 /* Media types with matching FreeBSD media defines */
2572 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2573 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2574 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2575 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2576 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2577 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2579 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2580 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2581 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2583 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2584 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2585 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2586 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2587 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2588 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2589 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2590 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2593 ** Other (no matching FreeBSD media type):
2594 ** To workaround this, we'll assign these completely
2595 ** inappropriate media types.
2597 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2598 device_printf(dev, "Media supported: 10GbaseKR\n");
2599 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2600 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2602 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2603 device_printf(dev, "Media supported: 10GbaseKX4\n");
2604 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2605 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2607 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2608 device_printf(dev, "Media supported: 1000baseKX\n");
2609 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2610 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2612 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2613 /* Someday, someone will care about you... */
2614 device_printf(dev, "Media supported: 1000baseBX\n");
2617 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2618 ifmedia_add(&adapter->media,
2619 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2620 ifmedia_add(&adapter->media,
2621 IFM_ETHER | IFM_1000_T, 0, NULL);
2624 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2628 ixgbe_config_link(struct adapter *adapter)
2630 struct ixgbe_hw *hw = &adapter->hw;
2631 u32 autoneg, err = 0;
2632 bool sfp, negotiate;
2634 sfp = ixgbe_is_sfp(hw);
2637 if (hw->phy.multispeed_fiber) {
2638 hw->mac.ops.setup_sfp(hw);
2639 ixgbe_enable_tx_laser(hw);
2640 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2642 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2644 if (hw->mac.ops.check_link)
2645 err = ixgbe_check_link(hw, &adapter->link_speed,
2646 &adapter->link_up, FALSE);
2649 autoneg = hw->phy.autoneg_advertised;
2650 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2651 err = hw->mac.ops.get_link_capabilities(hw,
2652 &autoneg, &negotiate);
2655 if (hw->mac.ops.setup_link)
2656 err = hw->mac.ops.setup_link(hw,
2657 autoneg, adapter->link_up);
2664 /*********************************************************************
2666 * Enable transmit units.
2668 **********************************************************************/
2670 ixgbe_initialize_transmit_units(struct adapter *adapter)
2672 struct tx_ring *txr = adapter->tx_rings;
2673 struct ixgbe_hw *hw = &adapter->hw;
2675 /* Setup the Base and Length of the Tx Descriptor Ring */
2677 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2678 u64 tdba = txr->txdma.dma_paddr;
2681 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2682 (tdba & 0x00000000ffffffffULL));
2683 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2684 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2685 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2687 /* Setup the HW Tx Head and Tail descriptor pointers */
2688 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2689 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2691 /* Cache the tail address */
2692 txr->tail = IXGBE_TDT(txr->me);
2694 /* Set the processing limit */
2695 txr->process_limit = ixgbe_tx_process_limit;
2697 /* Disable Head Writeback */
2698 switch (hw->mac.type) {
2699 case ixgbe_mac_82598EB:
2700 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2702 case ixgbe_mac_82599EB:
2703 case ixgbe_mac_X540:
2705 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2708 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2709 switch (hw->mac.type) {
2710 case ixgbe_mac_82598EB:
2711 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2713 case ixgbe_mac_82599EB:
2714 case ixgbe_mac_X540:
2716 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2722 if (hw->mac.type != ixgbe_mac_82598EB) {
2723 u32 dmatxctl, rttdcs;
2724 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2725 dmatxctl |= IXGBE_DMATXCTL_TE;
2726 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2727 /* Disable arbiter to set MTQC */
2728 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2729 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2730 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2731 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2732 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2733 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2740 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2742 struct ixgbe_hw *hw = &adapter->hw;
2744 int i, j, queue_id, table_size;
2746 uint32_t rss_key[10];
2752 /* set up random bits */
2753 arc4rand(&rss_key, sizeof(rss_key), 0);
2755 /* Set multiplier for RETA setup and table size based on MAC */
2758 switch (adapter->hw.mac.type) {
2759 case ixgbe_mac_82598EB:
2762 case ixgbe_mac_X550:
2763 case ixgbe_mac_X550EM_x:
2770 /* Set up the redirection table */
2771 for (i = 0, j = 0; i < table_size; i++, j++) {
2772 if (j == adapter->num_queues) j = 0;
2773 queue_id = (j * index_mult);
2775 * The low 8 bits are for hash value (n+0);
2776 * The next 8 bits are for hash value (n+1), etc.
2779 reta = reta | ( ((uint32_t) queue_id) << 24);
2782 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2784 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2789 /* Now fill our hash function seeds */
2790 for (int i = 0; i < 10; i++)
2791 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2793 /* Perform hash on these packet types */
2795 * Disable UDP - IP fragments aren't currently being handled
2796 * and so we end up with a mix of 2-tuple and 4-tuple
2799 mrqc = IXGBE_MRQC_RSSEN
2800 | IXGBE_MRQC_RSS_FIELD_IPV4
2801 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2803 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2805 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2806 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2807 | IXGBE_MRQC_RSS_FIELD_IPV6
2808 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2810 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2811 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2814 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2818 /*********************************************************************
2820 * Setup receive registers and features.
2822 **********************************************************************/
2823 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2825 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2828 ixgbe_initialize_receive_units(struct adapter *adapter)
2830 struct rx_ring *rxr = adapter->rx_rings;
2831 struct ixgbe_hw *hw = &adapter->hw;
2832 struct ifnet *ifp = adapter->ifp;
2833 u32 bufsz, fctrl, srrctl, rxcsum;
2838 * Make sure receives are disabled while
2839 * setting up the descriptor ring
2841 ixgbe_disable_rx(hw);
2843 /* Enable broadcasts */
2844 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2845 fctrl |= IXGBE_FCTRL_BAM;
2846 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2847 fctrl |= IXGBE_FCTRL_DPF;
2848 fctrl |= IXGBE_FCTRL_PMCF;
2850 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2852 /* Set for Jumbo Frames? */
2853 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2854 if (ifp->if_mtu > ETHERMTU)
2855 hlreg |= IXGBE_HLREG0_JUMBOEN;
2857 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2859 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2860 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2861 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2863 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2864 #endif /* DEV_NETMAP */
2865 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2867 bufsz = (adapter->rx_mbuf_sz +
2868 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2870 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2871 u64 rdba = rxr->rxdma.dma_paddr;
2873 /* Setup the Base and Length of the Rx Descriptor Ring */
2874 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2875 (rdba & 0x00000000ffffffffULL));
2876 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2877 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2878 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2880 /* Set up the SRRCTL register */
2881 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2882 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2883 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2885 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2888 * Set DROP_EN iff we have no flow control and >1 queue.
2889 * Note that srrctl was cleared shortly before during reset,
2890 * so we do not need to clear the bit, but do it just in case
2891 * this code is moved elsewhere.
2893 if (adapter->num_queues > 1 &&
2894 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2895 srrctl |= IXGBE_SRRCTL_DROP_EN;
2897 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2900 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2902 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2903 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2904 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2906 /* Set the processing limit */
2907 rxr->process_limit = ixgbe_rx_process_limit;
2909 /* Set the driver rx tail address */
2910 rxr->tail = IXGBE_RDT(rxr->me);
2913 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2914 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2915 IXGBE_PSRTYPE_UDPHDR |
2916 IXGBE_PSRTYPE_IPV4HDR |
2917 IXGBE_PSRTYPE_IPV6HDR;
2918 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2921 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2923 ixgbe_initialise_rss_mapping(adapter);
2925 if (adapter->num_queues > 1) {
2926 /* RSS and RX IPP Checksum are mutually exclusive */
2927 rxcsum |= IXGBE_RXCSUM_PCSD;
2930 if (ifp->if_capenable & IFCAP_RXCSUM)
2931 rxcsum |= IXGBE_RXCSUM_PCSD;
2933 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2934 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2936 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2943 ** This routine is run via an vlan config EVENT,
2944 ** it enables us to use the HW Filter table since
2945 ** we can get the vlan id. This just creates the
2946 ** entry in the soft version of the VFTA, init will
2947 ** repopulate the real table.
2950 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2952 struct adapter *adapter = ifp->if_softc;
2955 if (ifp->if_softc != arg) /* Not our event */
2958 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2961 IXGBE_CORE_LOCK(adapter);
2962 index = (vtag >> 5) & 0x7F;
2964 adapter->shadow_vfta[index] |= (1 << bit);
2965 ++adapter->num_vlans;
2966 ixgbe_setup_vlan_hw_support(adapter);
2967 IXGBE_CORE_UNLOCK(adapter);
2971 ** This routine is run via an vlan
2972 ** unconfig EVENT, remove our entry
2973 ** in the soft vfta.
2976 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2978 struct adapter *adapter = ifp->if_softc;
2981 if (ifp->if_softc != arg)
2984 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2987 IXGBE_CORE_LOCK(adapter);
2988 index = (vtag >> 5) & 0x7F;
2990 adapter->shadow_vfta[index] &= ~(1 << bit);
2991 --adapter->num_vlans;
2992 /* Re-init to load the changes */
2993 ixgbe_setup_vlan_hw_support(adapter);
2994 IXGBE_CORE_UNLOCK(adapter);
2998 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3000 struct ifnet *ifp = adapter->ifp;
3001 struct ixgbe_hw *hw = &adapter->hw;
3002 struct rx_ring *rxr;
3007 ** We get here thru init_locked, meaning
3008 ** a soft reset, this has already cleared
3009 ** the VFTA and other state, so if there
3010 ** have been no vlan's registered do nothing.
3012 if (adapter->num_vlans == 0)
3015 /* Setup the queues for vlans */
3016 for (int i = 0; i < adapter->num_queues; i++) {
3017 rxr = &adapter->rx_rings[i];
3018 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3019 if (hw->mac.type != ixgbe_mac_82598EB) {
3020 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3021 ctrl |= IXGBE_RXDCTL_VME;
3022 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3024 rxr->vtag_strip = TRUE;
3027 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3030 ** A soft reset zero's out the VFTA, so
3031 ** we need to repopulate it now.
3033 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3034 if (adapter->shadow_vfta[i] != 0)
3035 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3036 adapter->shadow_vfta[i]);
3038 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3039 /* Enable the Filter Table if enabled */
3040 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3041 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3042 ctrl |= IXGBE_VLNCTRL_VFE;
3044 if (hw->mac.type == ixgbe_mac_82598EB)
3045 ctrl |= IXGBE_VLNCTRL_VME;
3046 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3050 ixgbe_enable_intr(struct adapter *adapter)
3052 struct ixgbe_hw *hw = &adapter->hw;
3053 struct ix_queue *que = adapter->queues;
3056 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3057 /* Enable Fan Failure detection */
3058 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3059 mask |= IXGBE_EIMS_GPI_SDP1;
3061 switch (adapter->hw.mac.type) {
3062 case ixgbe_mac_82599EB:
3063 mask |= IXGBE_EIMS_ECC;
3064 /* Temperature sensor on some adapters */
3065 mask |= IXGBE_EIMS_GPI_SDP0;
3066 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3067 mask |= IXGBE_EIMS_GPI_SDP1;
3068 mask |= IXGBE_EIMS_GPI_SDP2;
3070 mask |= IXGBE_EIMS_FLOW_DIR;
3073 case ixgbe_mac_X540:
3074 /* Detect if Thermal Sensor is enabled */
3075 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3076 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3077 mask |= IXGBE_EIMS_TS;
3078 mask |= IXGBE_EIMS_ECC;
3080 mask |= IXGBE_EIMS_FLOW_DIR;
3083 case ixgbe_mac_X550:
3084 case ixgbe_mac_X550EM_x:
3085 /* MAC thermal sensor is automatically enabled */
3086 mask |= IXGBE_EIMS_TS;
3087 /* Some devices use SDP0 for important information */
3088 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3089 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3090 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3091 mask |= IXGBE_EIMS_ECC;
3093 mask |= IXGBE_EIMS_FLOW_DIR;
3100 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3102 /* With MSI-X we use auto clear */
3103 if (adapter->msix_mem) {
3104 mask = IXGBE_EIMS_ENABLE_MASK;
3105 /* Don't autoclear Link */
3106 mask &= ~IXGBE_EIMS_OTHER;
3107 mask &= ~IXGBE_EIMS_LSC;
3108 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3112 ** Now enable all queues, this is done separately to
3113 ** allow for handling the extended (beyond 32) MSIX
3114 ** vectors that can be used by 82599
3116 for (int i = 0; i < adapter->num_queues; i++, que++)
3117 ixgbe_enable_queue(adapter, que->msix);
3119 IXGBE_WRITE_FLUSH(hw);
3125 ixgbe_disable_intr(struct adapter *adapter)
3127 if (adapter->msix_mem)
3128 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3129 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3130 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3132 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3133 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3134 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3136 IXGBE_WRITE_FLUSH(&adapter->hw);
3141 ** Get the width and transaction speed of
3142 ** the slot this adapter is plugged into.
3145 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3147 device_t dev = ((struct ixgbe_osdep *)hw->back)->dev;
3148 struct ixgbe_mac_info *mac = &hw->mac;
3152 /* For most devices simply call the shared code routine */
3153 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3154 ixgbe_get_bus_info(hw);
3155 /* These devices don't use PCI-E */
3156 switch (hw->mac.type) {
3157 case ixgbe_mac_X550EM_x:
3165 ** For the Quad port adapter we need to parse back
3166 ** up the PCI tree to find the speed of the expansion
3167 ** slot into which this adapter is plugged. A bit more work.
3169 dev = device_get_parent(device_get_parent(dev));
3171 device_printf(dev, "parent pcib = %x,%x,%x\n",
3172 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3174 dev = device_get_parent(device_get_parent(dev));
3176 device_printf(dev, "slot pcib = %x,%x,%x\n",
3177 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3179 /* Now get the PCI Express Capabilities offset */
3180 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3181 /* ...and read the Link Status Register */
3182 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3183 switch (link & IXGBE_PCI_LINK_WIDTH) {
3184 case IXGBE_PCI_LINK_WIDTH_1:
3185 hw->bus.width = ixgbe_bus_width_pcie_x1;
3187 case IXGBE_PCI_LINK_WIDTH_2:
3188 hw->bus.width = ixgbe_bus_width_pcie_x2;
3190 case IXGBE_PCI_LINK_WIDTH_4:
3191 hw->bus.width = ixgbe_bus_width_pcie_x4;
3193 case IXGBE_PCI_LINK_WIDTH_8:
3194 hw->bus.width = ixgbe_bus_width_pcie_x8;
3197 hw->bus.width = ixgbe_bus_width_unknown;
3201 switch (link & IXGBE_PCI_LINK_SPEED) {
3202 case IXGBE_PCI_LINK_SPEED_2500:
3203 hw->bus.speed = ixgbe_bus_speed_2500;
3205 case IXGBE_PCI_LINK_SPEED_5000:
3206 hw->bus.speed = ixgbe_bus_speed_5000;
3208 case IXGBE_PCI_LINK_SPEED_8000:
3209 hw->bus.speed = ixgbe_bus_speed_8000;
3212 hw->bus.speed = ixgbe_bus_speed_unknown;
3216 mac->ops.set_lan_id(hw);
3219 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3220 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3221 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3222 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3223 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3224 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3225 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3228 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3229 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3230 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3231 device_printf(dev, "PCI-Express bandwidth available"
3232 " for this card\n is not sufficient for"
3233 " optimal performance.\n");
3234 device_printf(dev, "For optimal performance a x8 "
3235 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3237 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3238 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3239 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3240 device_printf(dev, "PCI-Express bandwidth available"
3241 " for this card\n is not sufficient for"
3242 " optimal performance.\n");
3243 device_printf(dev, "For optimal performance a x8 "
3244 "PCIE Gen3 slot is required.\n");
3252 ** Setup the correct IVAR register for a particular MSIX interrupt
3253 ** (yes this is all very magic and confusing :)
3254 ** - entry is the register array entry
3255 ** - vector is the MSIX vector for this queue
3256 ** - type is RX/TX/MISC
3259 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3261 struct ixgbe_hw *hw = &adapter->hw;
3264 vector |= IXGBE_IVAR_ALLOC_VAL;
3266 switch (hw->mac.type) {
3268 case ixgbe_mac_82598EB:
3270 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3272 entry += (type * 64);
3273 index = (entry >> 2) & 0x1F;
3274 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3275 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3276 ivar |= (vector << (8 * (entry & 0x3)));
3277 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3280 case ixgbe_mac_82599EB:
3281 case ixgbe_mac_X540:
3282 case ixgbe_mac_X550:
3283 case ixgbe_mac_X550EM_x:
3284 if (type == -1) { /* MISC IVAR */
3285 index = (entry & 1) * 8;
3286 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3287 ivar &= ~(0xFF << index);
3288 ivar |= (vector << index);
3289 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3290 } else { /* RX/TX IVARS */
3291 index = (16 * (entry & 1)) + (8 * type);
3292 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3293 ivar &= ~(0xFF << index);
3294 ivar |= (vector << index);
3295 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3304 ixgbe_configure_ivars(struct adapter *adapter)
3306 struct ix_queue *que = adapter->queues;
3309 if (ixgbe_max_interrupt_rate > 0)
3310 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3313 ** Disable DMA coalescing if interrupt moderation is
3320 for (int i = 0; i < adapter->num_queues; i++, que++) {
3321 /* First the RX queue entry */
3322 ixgbe_set_ivar(adapter, i, que->msix, 0);
3323 /* ... and the TX */
3324 ixgbe_set_ivar(adapter, i, que->msix, 1);
3325 /* Set an Initial EITR value */
3326 IXGBE_WRITE_REG(&adapter->hw,
3327 IXGBE_EITR(que->msix), newitr);
3330 /* For the Link interrupt */
3331 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3335 ** ixgbe_sfp_probe - called in the local timer to
3336 ** determine if a port had optics inserted.
3338 static bool ixgbe_sfp_probe(struct adapter *adapter)
3340 struct ixgbe_hw *hw = &adapter->hw;
3341 device_t dev = adapter->dev;
3342 bool result = FALSE;
3344 if ((hw->phy.type == ixgbe_phy_nl) &&
3345 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3346 s32 ret = hw->phy.ops.identify_sfp(hw);
3349 ret = hw->phy.ops.reset(hw);
3350 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3351 device_printf(dev,"Unsupported SFP+ module detected!");
3352 printf(" Reload driver with supported module.\n");
3353 adapter->sfp_probe = FALSE;
3356 device_printf(dev,"SFP+ module detected!\n");
3357 /* We now have supported optics */
3358 adapter->sfp_probe = FALSE;
3359 /* Set the optics type so system reports correctly */
3360 ixgbe_setup_optics(adapter);
3368 ** Tasklet handler for MSIX Link interrupts
3369 ** - do outside interrupt since it might sleep
3372 ixgbe_handle_link(void *context, int pending)
3374 struct adapter *adapter = context;
3376 ixgbe_check_link(&adapter->hw,
3377 &adapter->link_speed, &adapter->link_up, 0);
3378 ixgbe_update_link_status(adapter);
3382 ** Tasklet for handling SFP module interrupts
3385 ixgbe_handle_mod(void *context, int pending)
3387 struct adapter *adapter = context;
3388 struct ixgbe_hw *hw = &adapter->hw;
3389 device_t dev = adapter->dev;
3392 err = hw->phy.ops.identify_sfp(hw);
3393 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3395 "Unsupported SFP+ module type was detected.\n");
3398 err = hw->mac.ops.setup_sfp(hw);
3399 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3401 "Setup failure - unsupported SFP+ module type.\n");
3404 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3410 ** Tasklet for handling MSF (multispeed fiber) interrupts
3413 ixgbe_handle_msf(void *context, int pending)
3415 struct adapter *adapter = context;
3416 struct ixgbe_hw *hw = &adapter->hw;
3421 err = hw->phy.ops.identify_sfp(hw);
3423 ixgbe_setup_optics(adapter);
3424 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3427 autoneg = hw->phy.autoneg_advertised;
3428 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3429 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3430 if (hw->mac.ops.setup_link)
3431 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3433 ifmedia_removeall(&adapter->media);
3434 ixgbe_add_media_types(adapter);
3439 ** Tasklet for handling interrupts from an external PHY
3442 ixgbe_handle_phy(void *context, int pending)
3444 struct adapter *adapter = context;
3445 struct ixgbe_hw *hw = &adapter->hw;
3448 error = hw->phy.ops.handle_lasi(hw);
3449 if (error == IXGBE_ERR_OVERTEMP)
3450 device_printf(adapter->dev,
3451 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3452 " PHY will downshift to lower power state!\n");
3454 device_printf(adapter->dev,
3455 "Error handling LASI interrupt: %d\n",
3462 ** Tasklet for reinitializing the Flow Director filter table
3465 ixgbe_reinit_fdir(void *context, int pending)
3467 struct adapter *adapter = context;
3468 struct ifnet *ifp = adapter->ifp;
3470 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3472 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3473 adapter->fdir_reinit = 0;
3474 /* re-enable flow director interrupts */
3475 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3476 /* Restart the interface */
3477 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3482 /*********************************************************************
3484 * Configure DMA Coalescing
3486 **********************************************************************/
3488 ixgbe_config_dmac(struct adapter *adapter)
3490 struct ixgbe_hw *hw = &adapter->hw;
3491 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3493 if (hw->mac.type < ixgbe_mac_X550 ||
3494 !hw->mac.ops.dmac_config)
3497 if (dcfg->watchdog_timer ^ adapter->dmac ||
3498 dcfg->link_speed ^ adapter->link_speed) {
3499 dcfg->watchdog_timer = adapter->dmac;
3500 dcfg->fcoe_en = false;
3501 dcfg->link_speed = adapter->link_speed;
3504 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3505 dcfg->watchdog_timer, dcfg->link_speed);
3507 hw->mac.ops.dmac_config(hw);
3512 * Checks whether the adapter supports Energy Efficient Ethernet
3513 * or not, based on device ID.
3516 ixgbe_check_eee_support(struct adapter *adapter)
3518 struct ixgbe_hw *hw = &adapter->hw;
3520 adapter->eee_support = adapter->eee_enabled =
3521 (hw->device_id == IXGBE_DEV_ID_X550T ||
3522 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
3526 * Checks whether the adapter's ports are capable of
3527 * Wake On LAN by reading the adapter's NVM.
3529 * Sets each port's hw->wol_enabled value depending
3530 * on the value read here.
3533 ixgbe_check_wol_support(struct adapter *adapter)
3535 struct ixgbe_hw *hw = &adapter->hw;
3538 /* Find out WoL support for port */
3539 adapter->wol_support = hw->wol_enabled = 0;
3540 ixgbe_get_device_caps(hw, &dev_caps);
3541 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3542 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3544 adapter->wol_support = hw->wol_enabled = 1;
3546 /* Save initial wake up filter configuration */
3547 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3553 * Prepare the adapter/port for LPLU and/or WoL
3556 ixgbe_setup_low_power_mode(struct adapter *adapter)
3558 struct ixgbe_hw *hw = &adapter->hw;
3559 device_t dev = adapter->dev;
3562 mtx_assert(&adapter->core_mtx, MA_OWNED);
3564 /* Limit power management flow to X550EM baseT */
3565 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3566 && hw->phy.ops.enter_lplu) {
3567 /* Turn off support for APM wakeup. (Using ACPI instead) */
3568 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3569 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3572 * Clear Wake Up Status register to prevent any previous wakeup
3573 * events from waking us up immediately after we suspend.
3575 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3578 * Program the Wakeup Filter Control register with user filter
3581 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3583 /* Enable wakeups and power management in Wakeup Control */
3584 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3585 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3587 /* X550EM baseT adapters need a special LPLU flow */
3588 hw->phy.reset_disable = true;
3589 ixgbe_stop(adapter);
3590 error = hw->phy.ops.enter_lplu(hw);
3593 "Error entering LPLU: %d\n", error);
3594 hw->phy.reset_disable = false;
3596 /* Just stop for other adapters */
3597 ixgbe_stop(adapter);
3603 /**********************************************************************
3605 * Update the board statistics counters.
3607 **********************************************************************/
3609 ixgbe_update_stats_counters(struct adapter *adapter)
3611 struct ixgbe_hw *hw = &adapter->hw;
3612 u32 missed_rx = 0, bprc, lxon, lxoff, total;
3613 u64 total_missed_rx = 0;
3615 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3616 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3617 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3618 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3620 for (int i = 0; i < 16; i++) {
3621 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3622 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3623 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3625 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3626 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3627 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3629 /* Hardware workaround, gprc counts missed packets */
3630 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3631 adapter->stats.pf.gprc -= missed_rx;
3633 if (hw->mac.type != ixgbe_mac_82598EB) {
3634 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3635 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3636 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3637 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3638 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3639 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3640 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3641 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3643 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3644 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3645 /* 82598 only has a counter in the high register */
3646 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3647 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3648 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3652 * Workaround: mprc hardware is incorrectly counting
3653 * broadcasts, so for now we subtract those.
3655 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3656 adapter->stats.pf.bprc += bprc;
3657 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3658 if (hw->mac.type == ixgbe_mac_82598EB)
3659 adapter->stats.pf.mprc -= bprc;
3661 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3662 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3663 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3664 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3665 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3666 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3668 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3669 adapter->stats.pf.lxontxc += lxon;
3670 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3671 adapter->stats.pf.lxofftxc += lxoff;
3672 total = lxon + lxoff;
3674 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3675 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3676 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3677 adapter->stats.pf.gptc -= total;
3678 adapter->stats.pf.mptc -= total;
3679 adapter->stats.pf.ptc64 -= total;
3680 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3682 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3683 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3684 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3685 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3686 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3687 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3688 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3689 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3690 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3691 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3692 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3693 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3694 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3695 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3696 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3697 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3698 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3699 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3700 /* Only read FCOE on 82599 */
3701 if (hw->mac.type != ixgbe_mac_82598EB) {
3702 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3703 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3704 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3705 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3706 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3709 /* Fill out the OS statistics structure */
3710 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3711 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3712 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3713 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3714 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3715 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3716 IXGBE_SET_COLLISIONS(adapter, 0);
3717 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3718 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3719 + adapter->stats.pf.rlec);
3722 #if __FreeBSD_version >= 1100036
3724 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3726 struct adapter *adapter;
3727 struct tx_ring *txr;
3730 adapter = if_getsoftc(ifp);
3733 case IFCOUNTER_IPACKETS:
3734 return (adapter->ipackets);
3735 case IFCOUNTER_OPACKETS:
3736 return (adapter->opackets);
3737 case IFCOUNTER_IBYTES:
3738 return (adapter->ibytes);
3739 case IFCOUNTER_OBYTES:
3740 return (adapter->obytes);
3741 case IFCOUNTER_IMCASTS:
3742 return (adapter->imcasts);
3743 case IFCOUNTER_OMCASTS:
3744 return (adapter->omcasts);
3745 case IFCOUNTER_COLLISIONS:
3747 case IFCOUNTER_IQDROPS:
3748 return (adapter->iqdrops);
3749 case IFCOUNTER_OQDROPS:
3751 txr = adapter->tx_rings;
3752 for (int i = 0; i < adapter->num_queues; i++, txr++)
3753 rv += txr->br->br_drops;
3755 case IFCOUNTER_IERRORS:
3756 return (adapter->ierrors);
3758 return (if_get_counter_default(ifp, cnt));
3763 /** ixgbe_sysctl_tdh_handler - Handler function
3764 * Retrieves the TDH value from the hardware
3767 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3771 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3774 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3775 error = sysctl_handle_int(oidp, &val, 0, req);
3776 if (error || !req->newptr)
3781 /** ixgbe_sysctl_tdt_handler - Handler function
3782 * Retrieves the TDT value from the hardware
3785 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3789 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3792 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3793 error = sysctl_handle_int(oidp, &val, 0, req);
3794 if (error || !req->newptr)
3799 /** ixgbe_sysctl_rdh_handler - Handler function
3800 * Retrieves the RDH value from the hardware
3803 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3807 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3810 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3811 error = sysctl_handle_int(oidp, &val, 0, req);
3812 if (error || !req->newptr)
3817 /** ixgbe_sysctl_rdt_handler - Handler function
3818 * Retrieves the RDT value from the hardware
3821 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3825 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3828 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3829 error = sysctl_handle_int(oidp, &val, 0, req);
3830 if (error || !req->newptr)
3836 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3839 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3840 unsigned int reg, usec, rate;
3842 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3843 usec = ((reg & 0x0FF8) >> 3);
3845 rate = 500000 / usec;
3848 error = sysctl_handle_int(oidp, &rate, 0, req);
3849 if (error || !req->newptr)
3851 reg &= ~0xfff; /* default, no limitation */
3852 ixgbe_max_interrupt_rate = 0;
3853 if (rate > 0 && rate < 500000) {
3856 ixgbe_max_interrupt_rate = rate;
3857 reg |= ((4000000/rate) & 0xff8 );
3859 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3864 ixgbe_add_device_sysctls(struct adapter *adapter)
3866 device_t dev = adapter->dev;
3867 struct ixgbe_hw *hw = &adapter->hw;
3868 struct sysctl_oid_list *child;
3869 struct sysctl_ctx_list *ctx;
3871 ctx = device_get_sysctl_ctx(dev);
3872 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3874 /* Sysctls for all devices */
3875 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
3876 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3877 ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3879 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
3881 &ixgbe_enable_aim, 1, "Interrupt Moderation");
3883 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
3884 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3885 ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3887 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
3888 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3889 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
3891 /* for X550 devices */
3892 if (hw->mac.type >= ixgbe_mac_X550)
3893 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
3894 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3895 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
3897 /* for X550T and X550EM backplane devices */
3898 if (hw->device_id == IXGBE_DEV_ID_X550T ||
3899 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
3900 struct sysctl_oid *eee_node;
3901 struct sysctl_oid_list *eee_list;
3903 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
3905 "Energy Efficient Ethernet sysctls");
3906 eee_list = SYSCTL_CHILDREN(eee_node);
3908 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
3909 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3910 ixgbe_sysctl_eee_enable, "I",
3911 "Enable or Disable EEE");
3913 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
3914 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3915 ixgbe_sysctl_eee_negotiated, "I",
3916 "EEE negotiated on link");
3918 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
3919 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3920 ixgbe_sysctl_eee_tx_lpi_status, "I",
3921 "Whether or not TX link is in LPI state");
3923 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
3924 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3925 ixgbe_sysctl_eee_rx_lpi_status, "I",
3926 "Whether or not RX link is in LPI state");
3929 /* for certain 10GBaseT devices */
3930 if (hw->device_id == IXGBE_DEV_ID_X550T ||
3931 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3932 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
3933 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3934 ixgbe_sysctl_wol_enable, "I",
3935 "Enable/Disable Wake on LAN");
3937 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
3938 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3939 ixgbe_sysctl_wufc, "I",
3940 "Enable/Disable Wake Up Filters");
3943 /* for X550EM 10GBaseT devices */
3944 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3945 struct sysctl_oid *phy_node;
3946 struct sysctl_oid_list *phy_list;
3948 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
3950 "External PHY sysctls");
3951 phy_list = SYSCTL_CHILDREN(phy_node);
3953 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
3954 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3955 ixgbe_sysctl_phy_temp, "I",
3956 "Current External PHY Temperature (Celsius)");
3958 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
3959 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3960 ixgbe_sysctl_phy_overtemp_occurred, "I",
3961 "External PHY High Temperature Event Occurred");
3966 * Add sysctl variables, one per statistic, to the system.
3969 ixgbe_add_hw_stats(struct adapter *adapter)
3971 device_t dev = adapter->dev;
3973 struct tx_ring *txr = adapter->tx_rings;
3974 struct rx_ring *rxr = adapter->rx_rings;
3976 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3977 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3978 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3979 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3981 struct sysctl_oid *stat_node, *queue_node;
3982 struct sysctl_oid_list *stat_list, *queue_list;
3984 #define QUEUE_NAME_LEN 32
3985 char namebuf[QUEUE_NAME_LEN];
3987 /* Driver Statistics */
3988 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3989 CTLFLAG_RD, &adapter->dropped_pkts,
3990 "Driver dropped packets");
3991 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3992 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3993 "m_defrag() failed");
3994 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3995 CTLFLAG_RD, &adapter->watchdog_events,
3996 "Watchdog timeouts");
3997 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
3998 CTLFLAG_RD, &adapter->link_irq,
3999 "Link MSIX IRQ Handled");
4001 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4002 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4003 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4004 CTLFLAG_RD, NULL, "Queue Name");
4005 queue_list = SYSCTL_CHILDREN(queue_node);
4007 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4008 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4009 sizeof(&adapter->queues[i]),
4010 ixgbe_sysctl_interrupt_rate_handler, "IU",
4012 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4013 CTLFLAG_RD, &(adapter->queues[i].irqs),
4014 "irqs on this queue");
4015 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4016 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4017 ixgbe_sysctl_tdh_handler, "IU",
4018 "Transmit Descriptor Head");
4019 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4020 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4021 ixgbe_sysctl_tdt_handler, "IU",
4022 "Transmit Descriptor Tail");
4023 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4024 CTLFLAG_RD, &txr->tso_tx,
4026 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4027 CTLFLAG_RD, &txr->no_tx_dma_setup,
4028 "Driver tx dma failure in xmit");
4029 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4030 CTLFLAG_RD, &txr->no_desc_avail,
4031 "Queue No Descriptor Available");
4032 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4033 CTLFLAG_RD, &txr->total_packets,
4034 "Queue Packets Transmitted");
4035 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4036 CTLFLAG_RD, &txr->br->br_drops,
4037 "Packets dropped in buf_ring");
4040 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4041 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4042 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4043 CTLFLAG_RD, NULL, "Queue Name");
4044 queue_list = SYSCTL_CHILDREN(queue_node);
4046 struct lro_ctrl *lro = &rxr->lro;
4048 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4049 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4050 CTLFLAG_RD, NULL, "Queue Name");
4051 queue_list = SYSCTL_CHILDREN(queue_node);
4053 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4054 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4055 ixgbe_sysctl_rdh_handler, "IU",
4056 "Receive Descriptor Head");
4057 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4058 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4059 ixgbe_sysctl_rdt_handler, "IU",
4060 "Receive Descriptor Tail");
4061 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4062 CTLFLAG_RD, &rxr->rx_packets,
4063 "Queue Packets Received");
4064 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4065 CTLFLAG_RD, &rxr->rx_bytes,
4066 "Queue Bytes Received");
4067 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4068 CTLFLAG_RD, &rxr->rx_copies,
4069 "Copied RX Frames");
4070 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4071 CTLFLAG_RD, &lro->lro_queued, 0,
4073 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4074 CTLFLAG_RD, &lro->lro_flushed, 0,
4078 /* MAC stats get the own sub node */
4080 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4081 CTLFLAG_RD, NULL, "MAC Statistics");
4082 stat_list = SYSCTL_CHILDREN(stat_node);
4084 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4085 CTLFLAG_RD, &stats->crcerrs,
4087 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4088 CTLFLAG_RD, &stats->illerrc,
4089 "Illegal Byte Errors");
4090 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4091 CTLFLAG_RD, &stats->errbc,
4093 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4094 CTLFLAG_RD, &stats->mspdc,
4095 "MAC Short Packets Discarded");
4096 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4097 CTLFLAG_RD, &stats->mlfc,
4098 "MAC Local Faults");
4099 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4100 CTLFLAG_RD, &stats->mrfc,
4101 "MAC Remote Faults");
4102 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4103 CTLFLAG_RD, &stats->rlec,
4104 "Receive Length Errors");
4106 /* Flow Control stats */
4107 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4108 CTLFLAG_RD, &stats->lxontxc,
4109 "Link XON Transmitted");
4110 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4111 CTLFLAG_RD, &stats->lxonrxc,
4112 "Link XON Received");
4113 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4114 CTLFLAG_RD, &stats->lxofftxc,
4115 "Link XOFF Transmitted");
4116 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4117 CTLFLAG_RD, &stats->lxoffrxc,
4118 "Link XOFF Received");
4120 /* Packet Reception Stats */
4121 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4122 CTLFLAG_RD, &stats->tor,
4123 "Total Octets Received");
4124 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4125 CTLFLAG_RD, &stats->gorc,
4126 "Good Octets Received");
4127 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4128 CTLFLAG_RD, &stats->tpr,
4129 "Total Packets Received");
4130 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4131 CTLFLAG_RD, &stats->gprc,
4132 "Good Packets Received");
4133 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4134 CTLFLAG_RD, &stats->mprc,
4135 "Multicast Packets Received");
4136 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4137 CTLFLAG_RD, &stats->bprc,
4138 "Broadcast Packets Received");
4139 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4140 CTLFLAG_RD, &stats->prc64,
4141 "64 byte frames received ");
4142 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4143 CTLFLAG_RD, &stats->prc127,
4144 "65-127 byte frames received");
4145 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4146 CTLFLAG_RD, &stats->prc255,
4147 "128-255 byte frames received");
4148 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4149 CTLFLAG_RD, &stats->prc511,
4150 "256-511 byte frames received");
4151 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4152 CTLFLAG_RD, &stats->prc1023,
4153 "512-1023 byte frames received");
4154 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4155 CTLFLAG_RD, &stats->prc1522,
4156 "1023-1522 byte frames received");
4157 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4158 CTLFLAG_RD, &stats->ruc,
4159 "Receive Undersized");
4160 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4161 CTLFLAG_RD, &stats->rfc,
4162 "Fragmented Packets Received ");
4163 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4164 CTLFLAG_RD, &stats->roc,
4165 "Oversized Packets Received");
4166 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4167 CTLFLAG_RD, &stats->rjc,
4169 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4170 CTLFLAG_RD, &stats->mngprc,
4171 "Management Packets Received");
4172 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4173 CTLFLAG_RD, &stats->mngptc,
4174 "Management Packets Dropped");
4175 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4176 CTLFLAG_RD, &stats->xec,
4179 /* Packet Transmission Stats */
4180 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4181 CTLFLAG_RD, &stats->gotc,
4182 "Good Octets Transmitted");
4183 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4184 CTLFLAG_RD, &stats->tpt,
4185 "Total Packets Transmitted");
4186 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4187 CTLFLAG_RD, &stats->gptc,
4188 "Good Packets Transmitted");
4189 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4190 CTLFLAG_RD, &stats->bptc,
4191 "Broadcast Packets Transmitted");
4192 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4193 CTLFLAG_RD, &stats->mptc,
4194 "Multicast Packets Transmitted");
4195 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4196 CTLFLAG_RD, &stats->mngptc,
4197 "Management Packets Transmitted");
4198 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4199 CTLFLAG_RD, &stats->ptc64,
4200 "64 byte frames transmitted ");
4201 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4202 CTLFLAG_RD, &stats->ptc127,
4203 "65-127 byte frames transmitted");
4204 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4205 CTLFLAG_RD, &stats->ptc255,
4206 "128-255 byte frames transmitted");
4207 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4208 CTLFLAG_RD, &stats->ptc511,
4209 "256-511 byte frames transmitted");
4210 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4211 CTLFLAG_RD, &stats->ptc1023,
4212 "512-1023 byte frames transmitted");
4213 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4214 CTLFLAG_RD, &stats->ptc1522,
4215 "1024-1522 byte frames transmitted");
4219 ** Set flow control using sysctl:
4220 ** Flow control values:
4227 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4230 struct adapter *adapter = (struct adapter *) arg1;
4233 error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4234 if ((error) || (req->newptr == NULL))
4237 /* Don't bother if it's not changed */
4238 if (adapter->fc == last)
4241 switch (adapter->fc) {
4242 case ixgbe_fc_rx_pause:
4243 case ixgbe_fc_tx_pause:
4245 adapter->hw.fc.requested_mode = adapter->fc;
4246 if (adapter->num_queues > 1)
4247 ixgbe_disable_rx_drop(adapter);
4250 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4251 if (adapter->num_queues > 1)
4252 ixgbe_enable_rx_drop(adapter);
4258 /* Don't autoneg if forcing a value */
4259 adapter->hw.fc.disable_fc_autoneg = TRUE;
4260 ixgbe_fc_enable(&adapter->hw);
4265 ** Control advertised link speed:
4267 ** 0x1 - advertise 100 Mb
4268 ** 0x2 - advertise 1G
4269 ** 0x4 - advertise 10G
4272 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4274 int error = 0, requested;
4275 struct adapter *adapter;
4277 struct ixgbe_hw *hw;
4278 ixgbe_link_speed speed = 0;
4280 adapter = (struct adapter *) arg1;
4284 requested = adapter->advertise;
4285 error = sysctl_handle_int(oidp, &requested, 0, req);
4286 if ((error) || (req->newptr == NULL))
4289 /* Checks to validate new value */
4290 if (adapter->advertise == requested) /* no change */
4293 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4294 (hw->phy.multispeed_fiber))) {
4296 "Advertised speed can only be set on copper or "
4297 "multispeed fiber media types.\n");
4301 if (requested < 0x1 || requested > 0x7) {
4303 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4307 if ((requested & 0x1)
4308 && (hw->mac.type != ixgbe_mac_X540)
4309 && (hw->mac.type != ixgbe_mac_X550)) {
4310 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4314 /* Set new value and report new advertised mode */
4315 if (requested & 0x1)
4316 speed |= IXGBE_LINK_SPEED_100_FULL;
4317 if (requested & 0x2)
4318 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4319 if (requested & 0x4)
4320 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4322 hw->mac.autotry_restart = TRUE;
4323 hw->mac.ops.setup_link(hw, speed, TRUE);
4324 adapter->advertise = requested;
4330 * The following two sysctls are for X550 BaseT devices;
4331 * they deal with the external PHY used in them.
4334 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4336 struct adapter *adapter = (struct adapter *) arg1;
4337 struct ixgbe_hw *hw = &adapter->hw;
4340 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4341 device_printf(adapter->dev,
4342 "Device has no supported external thermal sensor.\n");
4346 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4347 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4349 device_printf(adapter->dev,
4350 "Error reading from PHY's current temperature register\n");
4354 /* Shift temp for output */
4357 return (sysctl_handle_int(oidp, NULL, reg, req));
4361 * Reports whether the current PHY temperature is over
4362 * the overtemp threshold.
4363 * - This is reported directly from the PHY
4366 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4368 struct adapter *adapter = (struct adapter *) arg1;
4369 struct ixgbe_hw *hw = &adapter->hw;
4372 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4373 device_printf(adapter->dev,
4374 "Device has no supported external thermal sensor.\n");
4378 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4379 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4381 device_printf(adapter->dev,
4382 "Error reading from PHY's temperature status register\n");
4386 /* Get occurrence bit */
4387 reg = !!(reg & 0x4000);
4388 return (sysctl_handle_int(oidp, 0, reg, req));
4392 ** Thermal Shutdown Trigger (internal MAC)
4393 ** - Set this to 1 to cause an overtemp event to occur
4396 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4398 struct adapter *adapter = (struct adapter *) arg1;
4399 struct ixgbe_hw *hw = &adapter->hw;
4400 int error, fire = 0;
4402 error = sysctl_handle_int(oidp, &fire, 0, req);
4403 if ((error) || (req->newptr == NULL))
4407 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4408 reg |= IXGBE_EICR_TS;
4409 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4416 ** Manage DMA Coalescing.
4418 ** 0/1 - off / on (use default value of 1000)
4420 ** Legal timer values are:
4421 ** 50,100,250,500,1000,2000,5000,10000
4423 ** Turning off interrupt moderation will also turn this off.
4426 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4428 struct adapter *adapter = (struct adapter *) arg1;
4429 struct ixgbe_hw *hw = &adapter->hw;
4430 struct ifnet *ifp = adapter->ifp;
4434 oldval = adapter->dmac;
4435 error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4436 if ((error) || (req->newptr == NULL))
4439 switch (hw->mac.type) {
4440 case ixgbe_mac_X550:
4441 case ixgbe_mac_X550EM_x:
4444 device_printf(adapter->dev,
4445 "DMA Coalescing is only supported on X550 devices\n");
4449 switch (adapter->dmac) {
4453 case 1: /* Enable and use default */
4454 adapter->dmac = 1000;
4464 /* Legal values - allow */
4467 /* Do nothing, illegal value */
4468 adapter->dmac = oldval;
4472 /* Re-initialize hardware if it's already running */
4473 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4474 ixgbe_init(adapter);
4480 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4486 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4488 struct adapter *adapter = (struct adapter *) arg1;
4489 struct ixgbe_hw *hw = &adapter->hw;
4490 int new_wol_enabled;
4493 new_wol_enabled = hw->wol_enabled;
4494 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4495 if ((error) || (req->newptr == NULL))
4497 if (new_wol_enabled == hw->wol_enabled)
4500 if (new_wol_enabled > 0 && !adapter->wol_support)
4503 hw->wol_enabled = !!(new_wol_enabled);
4509 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4510 * if supported by the adapter.
4516 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4518 struct adapter *adapter = (struct adapter *) arg1;
4519 struct ifnet *ifp = adapter->ifp;
4520 int new_eee_enabled, error = 0;
4522 new_eee_enabled = adapter->eee_enabled;
4523 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4524 if ((error) || (req->newptr == NULL))
4526 if (new_eee_enabled == adapter->eee_enabled)
4529 if (new_eee_enabled > 0 && !adapter->eee_support)
4532 adapter->eee_enabled = !!(new_eee_enabled);
4534 /* Re-initialize hardware if it's already running */
4535 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4536 ixgbe_init(adapter);
4542 * Read-only sysctl indicating whether EEE support was negotiated
4546 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4548 struct adapter *adapter = (struct adapter *) arg1;
4549 struct ixgbe_hw *hw = &adapter->hw;
4552 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4554 return (sysctl_handle_int(oidp, 0, status, req));
4558 * Read-only sysctl indicating whether RX Link is in LPI state.
4561 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4563 struct adapter *adapter = (struct adapter *) arg1;
4564 struct ixgbe_hw *hw = &adapter->hw;
4567 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4568 IXGBE_EEE_RX_LPI_STATUS);
4570 return (sysctl_handle_int(oidp, 0, status, req));
4574 * Read-only sysctl indicating whether TX Link is in LPI state.
4577 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4579 struct adapter *adapter = (struct adapter *) arg1;
4580 struct ixgbe_hw *hw = &adapter->hw;
4583 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4584 IXGBE_EEE_TX_LPI_STATUS);
4586 return (sysctl_handle_int(oidp, 0, status, req));
4590 * Sysctl to enable/disable the types of packets that the
4591 * adapter will wake up on upon receipt.
4592 * WUFC - Wake Up Filter Control
4594 * 0x1 - Link Status Change
4595 * 0x2 - Magic Packet
4596 * 0x4 - Direct Exact
4597 * 0x8 - Directed Multicast
4599 * 0x20 - ARP/IPv4 Request Packet
4600 * 0x40 - Direct IPv4 Packet
4601 * 0x80 - Direct IPv6 Packet
4603 * Setting another flag will cause the sysctl to return an
4607 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4609 struct adapter *adapter = (struct adapter *) arg1;
4613 new_wufc = adapter->wufc;
4615 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4616 if ((error) || (req->newptr == NULL))
4618 if (new_wufc == adapter->wufc)
4621 if (new_wufc & 0xffffff00)
4625 new_wufc |= (0xffffff & adapter->wufc);
4626 adapter->wufc = new_wufc;
4633 ** Enable the hardware to drop packets when the buffer is
4634 ** full. This is useful when multiqueue,so that no single
4635 ** queue being full stalls the entire RX engine. We only
4636 ** enable this when Multiqueue AND when Flow Control is
4640 ixgbe_enable_rx_drop(struct adapter *adapter)
4642 struct ixgbe_hw *hw = &adapter->hw;
4644 for (int i = 0; i < adapter->num_queues; i++) {
4645 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4646 srrctl |= IXGBE_SRRCTL_DROP_EN;
4647 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4652 ixgbe_disable_rx_drop(struct adapter *adapter)
4654 struct ixgbe_hw *hw = &adapter->hw;
4656 for (int i = 0; i < adapter->num_queues; i++) {
4657 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4658 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4659 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4664 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4668 switch (adapter->hw.mac.type) {
4669 case ixgbe_mac_82598EB:
4670 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4671 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4673 case ixgbe_mac_82599EB:
4674 case ixgbe_mac_X540:
4675 case ixgbe_mac_X550:
4676 case ixgbe_mac_X550EM_x:
4677 mask = (queues & 0xFFFFFFFF);
4678 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4679 mask = (queues >> 32);
4680 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);