1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
45 #include <net/rss_config.h>
46 #include <netinet/in_rss.h>
49 /*********************************************************************
50 * Set this to one to display debug statistics
51 *********************************************************************/
52 int ixgbe_display_debug_stats = 0;
54 /*********************************************************************
56 *********************************************************************/
57 char ixgbe_driver_version[] = "2.7.4";
59 /*********************************************************************
62 * Used by probe to select devices to load on
63 * Last field stores an index into ixgbe_strings
64 * Last entry must be all 0s
66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
67 *********************************************************************/
69 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
101 /* required last entry */
105 /*********************************************************************
106 * Table of branding strings
107 *********************************************************************/
109 static char *ixgbe_strings[] = {
110 "Intel(R) PRO/10GbE PCI-Express Network Driver"
113 /*********************************************************************
114 * Function prototypes
115 *********************************************************************/
116 static int ixgbe_probe(device_t);
117 static int ixgbe_attach(device_t);
118 static int ixgbe_detach(device_t);
119 static int ixgbe_shutdown(device_t);
120 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
121 static void ixgbe_init(void *);
122 static void ixgbe_init_locked(struct adapter *);
123 static void ixgbe_stop(void *);
124 #if __FreeBSD_version >= 1100036
125 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
127 static void ixgbe_add_media_types(struct adapter *);
128 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
129 static int ixgbe_media_change(struct ifnet *);
130 static void ixgbe_identify_hardware(struct adapter *);
131 static int ixgbe_allocate_pci_resources(struct adapter *);
132 static void ixgbe_get_slot_info(struct ixgbe_hw *);
133 static int ixgbe_allocate_msix(struct adapter *);
134 static int ixgbe_allocate_legacy(struct adapter *);
135 static int ixgbe_setup_msix(struct adapter *);
136 static void ixgbe_free_pci_resources(struct adapter *);
137 static void ixgbe_local_timer(void *);
138 static int ixgbe_setup_interface(device_t, struct adapter *);
139 static void ixgbe_config_link(struct adapter *);
140 static void ixgbe_rearm_queues(struct adapter *, u64);
142 static void ixgbe_initialize_transmit_units(struct adapter *);
143 static void ixgbe_initialize_receive_units(struct adapter *);
144 static void ixgbe_enable_rx_drop(struct adapter *);
145 static void ixgbe_disable_rx_drop(struct adapter *);
147 static void ixgbe_enable_intr(struct adapter *);
148 static void ixgbe_disable_intr(struct adapter *);
149 static void ixgbe_update_stats_counters(struct adapter *);
150 static void ixgbe_set_promisc(struct adapter *);
151 static void ixgbe_set_multi(struct adapter *);
152 static void ixgbe_update_link_status(struct adapter *);
153 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
154 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
155 static int ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS);
156 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
157 static void ixgbe_configure_ivars(struct adapter *);
158 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
160 static void ixgbe_setup_vlan_hw_support(struct adapter *);
161 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
162 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
164 static void ixgbe_add_hw_stats(struct adapter *adapter);
166 /* Support for pluggable optic modules */
167 static bool ixgbe_sfp_probe(struct adapter *);
168 static void ixgbe_setup_optics(struct adapter *);
170 /* Legacy (single vector interrupt handler */
171 static void ixgbe_legacy_irq(void *);
173 /* The MSI/X Interrupt handlers */
174 static void ixgbe_msix_que(void *);
175 static void ixgbe_msix_link(void *);
177 /* Deferred interrupt tasklets */
178 static void ixgbe_handle_que(void *, int);
179 static void ixgbe_handle_link(void *, int);
180 static void ixgbe_handle_msf(void *, int);
181 static void ixgbe_handle_mod(void *, int);
184 static void ixgbe_reinit_fdir(void *, int);
188 /* Missing shared code prototype */
189 extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
191 /*********************************************************************
192 * FreeBSD Device Interface Entry Points
193 *********************************************************************/
195 static device_method_t ix_methods[] = {
196 /* Device interface */
197 DEVMETHOD(device_probe, ixgbe_probe),
198 DEVMETHOD(device_attach, ixgbe_attach),
199 DEVMETHOD(device_detach, ixgbe_detach),
200 DEVMETHOD(device_shutdown, ixgbe_shutdown),
204 static driver_t ix_driver = {
205 "ix", ix_methods, sizeof(struct adapter),
208 devclass_t ix_devclass;
209 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
211 MODULE_DEPEND(ix, pci, 1, 1, 1);
212 MODULE_DEPEND(ix, ether, 1, 1, 1);
215 ** TUNEABLE PARAMETERS:
218 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
219 "IXGBE driver parameters");
222 ** AIM: Adaptive Interrupt Moderation
223 ** which means that the interrupt rate
224 ** is varied over time based on the
225 ** traffic for that interrupt vector
227 static int ixgbe_enable_aim = TRUE;
228 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
229 "Enable adaptive interrupt moderation");
231 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
232 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
233 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
235 /* How many packets rxeof tries to clean at a time */
236 static int ixgbe_rx_process_limit = 256;
237 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
238 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
239 &ixgbe_rx_process_limit, 0,
240 "Maximum number of received packets to process at a time,"
241 "-1 means unlimited");
243 /* How many packets txeof tries to clean at a time */
244 static int ixgbe_tx_process_limit = 256;
245 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
246 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
247 &ixgbe_tx_process_limit, 0,
248 "Maximum number of sent packets to process at a time,"
249 "-1 means unlimited");
252 ** Smart speed setting, default to on
253 ** this only works as a compile option
254 ** right now as its during attach, set
255 ** this to 'ixgbe_smart_speed_off' to
258 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
261 * MSIX should be the default for best performance,
262 * but this allows it to be forced off for testing.
264 static int ixgbe_enable_msix = 1;
265 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
266 "Enable MSI-X interrupts");
269 * Number of Queues, can be set to 0,
270 * it then autoconfigures based on the
271 * number of cpus with a max of 8. This
272 * can be overriden manually here.
274 static int ixgbe_num_queues = 0;
275 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
276 "Number of queues to configure, 0 indicates autoconfigure");
279 ** Number of TX descriptors per ring,
280 ** setting higher than RX as this seems
281 ** the better performing choice.
283 static int ixgbe_txd = PERFORM_TXD;
284 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
285 "Number of transmit descriptors per queue");
287 /* Number of RX descriptors per ring */
288 static int ixgbe_rxd = PERFORM_RXD;
289 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
290 "Number of receive descriptors per queue");
293 ** Defining this on will allow the use
294 ** of unsupported SFP+ modules, note that
295 ** doing so you are on your own :)
297 static int allow_unsupported_sfp = FALSE;
298 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
300 /* Keep running tab on them for sanity check */
301 static int ixgbe_total_ports;
305 ** Flow Director actually 'steals'
306 ** part of the packet buffer as its
307 ** filter pool, this variable controls
309 ** 0 = 64K, 1 = 128K, 2 = 256K
311 static int fdir_pballoc = 1;
316 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
317 * be a reference on how to implement netmap support in a driver.
318 * Additional comments are in ixgbe_netmap.h .
320 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
321 * that extend the standard driver.
323 #include <dev/netmap/ixgbe_netmap.h>
324 #endif /* DEV_NETMAP */
326 /*********************************************************************
327 * Device identification routine
329 * ixgbe_probe determines if the driver should be loaded on
330 * adapter based on PCI vendor/device id of the adapter.
332 * return BUS_PROBE_DEFAULT on success, positive on failure
333 *********************************************************************/
336 ixgbe_probe(device_t dev)
338 ixgbe_vendor_info_t *ent;
340 u16 pci_vendor_id = 0;
341 u16 pci_device_id = 0;
342 u16 pci_subvendor_id = 0;
343 u16 pci_subdevice_id = 0;
344 char adapter_name[256];
346 INIT_DEBUGOUT("ixgbe_probe: begin");
348 pci_vendor_id = pci_get_vendor(dev);
349 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
352 pci_device_id = pci_get_device(dev);
353 pci_subvendor_id = pci_get_subvendor(dev);
354 pci_subdevice_id = pci_get_subdevice(dev);
356 ent = ixgbe_vendor_info_array;
357 while (ent->vendor_id != 0) {
358 if ((pci_vendor_id == ent->vendor_id) &&
359 (pci_device_id == ent->device_id) &&
361 ((pci_subvendor_id == ent->subvendor_id) ||
362 (ent->subvendor_id == 0)) &&
364 ((pci_subdevice_id == ent->subdevice_id) ||
365 (ent->subdevice_id == 0))) {
366 sprintf(adapter_name, "%s, Version - %s",
367 ixgbe_strings[ent->index],
368 ixgbe_driver_version);
369 device_set_desc_copy(dev, adapter_name);
371 return (BUS_PROBE_DEFAULT);
378 /*********************************************************************
379 * Device initialization routine
381 * The attach entry point is called when the driver is being loaded.
382 * This routine identifies the type of hardware, allocates all resources
383 * and initializes the hardware.
385 * return 0 on success, positive on failure
386 *********************************************************************/
389 ixgbe_attach(device_t dev)
391 struct adapter *adapter;
397 INIT_DEBUGOUT("ixgbe_attach: begin");
399 /* Allocate, clear, and link in our adapter structure */
400 adapter = device_get_softc(dev);
401 adapter->dev = adapter->osdep.dev = dev;
405 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
408 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
409 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
410 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
411 adapter, 0, ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
413 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
414 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
415 OID_AUTO, "enable_aim", CTLFLAG_RW,
416 &ixgbe_enable_aim, 1, "Interrupt Moderation");
419 ** Allow a kind of speed control by forcing the autoneg
420 ** advertised speed list to only a certain value, this
421 ** supports 1G on 82599 devices, and 100Mb on x540.
423 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
424 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
425 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
426 adapter, 0, ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
428 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
429 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430 OID_AUTO, "ts", CTLTYPE_INT | CTLFLAG_RW, adapter,
431 0, ixgbe_set_thermal_test, "I", "Thermal Test");
433 /* Set up the timer callout */
434 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
436 /* Determine hardware revision */
437 ixgbe_identify_hardware(adapter);
439 /* Do base PCI setup - map BAR0 */
440 if (ixgbe_allocate_pci_resources(adapter)) {
441 device_printf(dev, "Allocation of PCI resources failed\n");
446 /* Do descriptor calc and sanity checks */
447 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
448 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
449 device_printf(dev, "TXD config issue, using default!\n");
450 adapter->num_tx_desc = DEFAULT_TXD;
452 adapter->num_tx_desc = ixgbe_txd;
455 ** With many RX rings it is easy to exceed the
456 ** system mbuf allocation. Tuning nmbclusters
457 ** can alleviate this.
459 if (nmbclusters > 0) {
461 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
462 if (s > nmbclusters) {
463 device_printf(dev, "RX Descriptors exceed "
464 "system mbuf max, using default instead!\n");
465 ixgbe_rxd = DEFAULT_RXD;
469 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
470 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
471 device_printf(dev, "RXD config issue, using default!\n");
472 adapter->num_rx_desc = DEFAULT_RXD;
474 adapter->num_rx_desc = ixgbe_rxd;
476 /* Allocate our TX/RX Queues */
477 if (ixgbe_allocate_queues(adapter)) {
482 /* Allocate multicast array memory. */
483 adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
484 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
485 if (adapter->mta == NULL) {
486 device_printf(dev, "Can not allocate multicast setup array\n");
491 /* Initialize the shared code */
492 hw->allow_unsupported_sfp = allow_unsupported_sfp;
493 error = ixgbe_init_shared_code(hw);
494 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
496 ** No optics in this port, set up
497 ** so the timer routine will probe
498 ** for later insertion.
500 adapter->sfp_probe = TRUE;
502 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
503 device_printf(dev,"Unsupported SFP+ module detected!\n");
507 device_printf(dev,"Unable to initialize the shared code\n");
512 /* Make sure we have a good EEPROM before we read from it */
513 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
514 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
519 error = ixgbe_init_hw(hw);
521 case IXGBE_ERR_EEPROM_VERSION:
522 device_printf(dev, "This device is a pre-production adapter/"
523 "LOM. Please be aware there may be issues associated "
524 "with your hardware.\n If you are experiencing problems "
525 "please contact your Intel or hardware representative "
526 "who provided you with this hardware.\n");
528 case IXGBE_ERR_SFP_NOT_SUPPORTED:
529 device_printf(dev,"Unsupported SFP+ Module\n");
532 case IXGBE_ERR_SFP_NOT_PRESENT:
533 device_printf(dev,"No SFP+ Module found\n");
539 /* Detect and set physical type */
540 ixgbe_setup_optics(adapter);
542 if ((adapter->msix > 1) && (ixgbe_enable_msix))
543 error = ixgbe_allocate_msix(adapter);
545 error = ixgbe_allocate_legacy(adapter);
549 /* Setup OS specific network interface */
550 if (ixgbe_setup_interface(dev, adapter) != 0)
553 /* Initialize statistics */
554 ixgbe_update_stats_counters(adapter);
556 /* Register for VLAN events */
557 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
558 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
559 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
560 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
563 ** Check PCIE slot type/speed/width
565 ixgbe_get_slot_info(hw);
568 /* Set an initial default flow control value */
569 adapter->fc = ixgbe_fc_full;
571 /* let hardware know driver is loaded */
572 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
573 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
574 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
576 ixgbe_add_hw_stats(adapter);
579 ixgbe_netmap_attach(adapter);
580 #endif /* DEV_NETMAP */
581 INIT_DEBUGOUT("ixgbe_attach: end");
585 ixgbe_free_transmit_structures(adapter);
586 ixgbe_free_receive_structures(adapter);
588 if (adapter->ifp != NULL)
589 if_free(adapter->ifp);
590 ixgbe_free_pci_resources(adapter);
591 free(adapter->mta, M_DEVBUF);
595 /*********************************************************************
596 * Device removal routine
598 * The detach entry point is called when the driver is being removed.
599 * This routine stops the adapter and deallocates all the resources
600 * that were allocated for driver operation.
602 * return 0 on success, positive on failure
603 *********************************************************************/
606 ixgbe_detach(device_t dev)
608 struct adapter *adapter = device_get_softc(dev);
609 struct ix_queue *que = adapter->queues;
610 struct tx_ring *txr = adapter->tx_rings;
613 INIT_DEBUGOUT("ixgbe_detach: begin");
615 /* Make sure VLANS are not using driver */
616 if (adapter->ifp->if_vlantrunk != NULL) {
617 device_printf(dev,"Vlan in use, detach first\n");
621 IXGBE_CORE_LOCK(adapter);
623 IXGBE_CORE_UNLOCK(adapter);
625 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
627 #ifndef IXGBE_LEGACY_TX
628 taskqueue_drain(que->tq, &txr->txq_task);
630 taskqueue_drain(que->tq, &que->que_task);
631 taskqueue_free(que->tq);
635 /* Drain the Link queue */
637 taskqueue_drain(adapter->tq, &adapter->link_task);
638 taskqueue_drain(adapter->tq, &adapter->mod_task);
639 taskqueue_drain(adapter->tq, &adapter->msf_task);
641 taskqueue_drain(adapter->tq, &adapter->fdir_task);
643 taskqueue_free(adapter->tq);
646 /* let hardware know driver is unloading */
647 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
648 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
649 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
651 /* Unregister VLAN events */
652 if (adapter->vlan_attach != NULL)
653 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
654 if (adapter->vlan_detach != NULL)
655 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
657 ether_ifdetach(adapter->ifp);
658 callout_drain(&adapter->timer);
660 netmap_detach(adapter->ifp);
661 #endif /* DEV_NETMAP */
662 ixgbe_free_pci_resources(adapter);
663 bus_generic_detach(dev);
664 if_free(adapter->ifp);
666 ixgbe_free_transmit_structures(adapter);
667 ixgbe_free_receive_structures(adapter);
668 free(adapter->mta, M_DEVBUF);
670 IXGBE_CORE_LOCK_DESTROY(adapter);
674 /*********************************************************************
676 * Shutdown entry point
678 **********************************************************************/
681 ixgbe_shutdown(device_t dev)
683 struct adapter *adapter = device_get_softc(dev);
684 IXGBE_CORE_LOCK(adapter);
686 IXGBE_CORE_UNLOCK(adapter);
691 /*********************************************************************
694 * ixgbe_ioctl is called when the user wants to configure the
697 * return 0 on success, positive on failure
698 **********************************************************************/
701 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
703 struct adapter *adapter = ifp->if_softc;
704 struct ifreq *ifr = (struct ifreq *) data;
705 #if defined(INET) || defined(INET6)
706 struct ifaddr *ifa = (struct ifaddr *)data;
707 bool avoid_reset = FALSE;
715 if (ifa->ifa_addr->sa_family == AF_INET)
719 if (ifa->ifa_addr->sa_family == AF_INET6)
722 #if defined(INET) || defined(INET6)
724 ** Calling init results in link renegotiation,
725 ** so we avoid doing it when possible.
728 ifp->if_flags |= IFF_UP;
729 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
731 if (!(ifp->if_flags & IFF_NOARP))
732 arp_ifinit(ifp, ifa);
734 error = ether_ioctl(ifp, command, data);
738 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
739 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
742 IXGBE_CORE_LOCK(adapter);
743 ifp->if_mtu = ifr->ifr_mtu;
744 adapter->max_frame_size =
745 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
746 ixgbe_init_locked(adapter);
747 IXGBE_CORE_UNLOCK(adapter);
751 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
752 IXGBE_CORE_LOCK(adapter);
753 if (ifp->if_flags & IFF_UP) {
754 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
755 if ((ifp->if_flags ^ adapter->if_flags) &
756 (IFF_PROMISC | IFF_ALLMULTI)) {
757 ixgbe_set_promisc(adapter);
760 ixgbe_init_locked(adapter);
762 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
764 adapter->if_flags = ifp->if_flags;
765 IXGBE_CORE_UNLOCK(adapter);
769 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
770 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
771 IXGBE_CORE_LOCK(adapter);
772 ixgbe_disable_intr(adapter);
773 ixgbe_set_multi(adapter);
774 ixgbe_enable_intr(adapter);
775 IXGBE_CORE_UNLOCK(adapter);
780 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
781 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
785 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
786 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
787 if (mask & IFCAP_HWCSUM)
788 ifp->if_capenable ^= IFCAP_HWCSUM;
789 if (mask & IFCAP_TSO4)
790 ifp->if_capenable ^= IFCAP_TSO4;
791 if (mask & IFCAP_TSO6)
792 ifp->if_capenable ^= IFCAP_TSO6;
793 if (mask & IFCAP_LRO)
794 ifp->if_capenable ^= IFCAP_LRO;
795 if (mask & IFCAP_VLAN_HWTAGGING)
796 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
797 if (mask & IFCAP_VLAN_HWFILTER)
798 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
799 if (mask & IFCAP_VLAN_HWTSO)
800 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
801 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
802 IXGBE_CORE_LOCK(adapter);
803 ixgbe_init_locked(adapter);
804 IXGBE_CORE_UNLOCK(adapter);
806 VLAN_CAPABILITIES(ifp);
809 #if __FreeBSD_version >= 1100036
812 struct ixgbe_hw *hw = &adapter->hw;
815 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
816 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
819 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
823 if (i2c.len > sizeof(i2c.data)) {
828 for (i = 0; i < i2c.len; i++)
829 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
830 i2c.dev_addr, &i2c.data[i]);
831 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
836 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
837 error = ether_ioctl(ifp, command, data);
844 /*********************************************************************
847 * This routine is used in two ways. It is used by the stack as
848 * init entry point in network interface structure. It is also used
849 * by the driver as a hw/sw initialization routine to get to a
852 * return 0 on success, positive on failure
853 **********************************************************************/
854 #define IXGBE_MHADD_MFS_SHIFT 16
857 ixgbe_init_locked(struct adapter *adapter)
859 struct ifnet *ifp = adapter->ifp;
860 device_t dev = adapter->dev;
861 struct ixgbe_hw *hw = &adapter->hw;
862 u32 k, txdctl, mhadd, gpie;
865 mtx_assert(&adapter->core_mtx, MA_OWNED);
866 INIT_DEBUGOUT("ixgbe_init_locked: begin");
867 hw->adapter_stopped = FALSE;
868 ixgbe_stop_adapter(hw);
869 callout_stop(&adapter->timer);
871 /* reprogram the RAR[0] in case user changed it. */
872 ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
874 /* Get the latest mac address, User can use a LAA */
875 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
876 IXGBE_ETH_LENGTH_OF_ADDRESS);
877 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
878 hw->addr_ctrl.rar_used_count = 1;
880 /* Set the various hardware offload abilities */
881 ifp->if_hwassist = 0;
882 if (ifp->if_capenable & IFCAP_TSO)
883 ifp->if_hwassist |= CSUM_TSO;
884 if (ifp->if_capenable & IFCAP_TXCSUM) {
885 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
886 #if __FreeBSD_version >= 800000
887 if (hw->mac.type != ixgbe_mac_82598EB)
888 ifp->if_hwassist |= CSUM_SCTP;
892 /* Prepare transmit descriptors and buffers */
893 if (ixgbe_setup_transmit_structures(adapter)) {
894 device_printf(dev,"Could not setup transmit structures\n");
900 ixgbe_initialize_transmit_units(adapter);
902 /* Setup Multicast table */
903 ixgbe_set_multi(adapter);
906 ** Determine the correct mbuf pool
907 ** for doing jumbo frames
909 if (adapter->max_frame_size <= 2048)
910 adapter->rx_mbuf_sz = MCLBYTES;
911 else if (adapter->max_frame_size <= 4096)
912 adapter->rx_mbuf_sz = MJUMPAGESIZE;
913 else if (adapter->max_frame_size <= 9216)
914 adapter->rx_mbuf_sz = MJUM9BYTES;
916 adapter->rx_mbuf_sz = MJUM16BYTES;
918 /* Prepare receive descriptors and buffers */
919 if (ixgbe_setup_receive_structures(adapter)) {
920 device_printf(dev,"Could not setup receive structures\n");
925 /* Configure RX settings */
926 ixgbe_initialize_receive_units(adapter);
928 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
930 /* Enable Fan Failure Interrupt */
931 gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
933 /* Add for Module detection */
934 if (hw->mac.type == ixgbe_mac_82599EB)
935 gpie |= IXGBE_SDP2_GPIEN_BY_MAC(hw);
937 /* Thermal Failure Detection */
938 if (hw->mac.type == ixgbe_mac_X540)
939 gpie |= IXGBE_SDP0_GPIEN_BY_MAC(hw);
941 if (adapter->msix > 1) {
942 /* Enable Enhanced MSIX mode */
943 gpie |= IXGBE_GPIE_MSIX_MODE;
944 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
947 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
950 if (ifp->if_mtu > ETHERMTU) {
951 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
952 mhadd &= ~IXGBE_MHADD_MFS_MASK;
953 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
954 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
957 /* Now enable all the queues */
959 for (int i = 0; i < adapter->num_queues; i++) {
960 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
961 txdctl |= IXGBE_TXDCTL_ENABLE;
962 /* Set WTHRESH to 8, burst writeback */
965 * When the internal queue falls below PTHRESH (32),
966 * start prefetching as long as there are at least
967 * HTHRESH (1) buffers ready. The values are taken
968 * from the Intel linux driver 3.8.21.
969 * Prefetching enables tx line rate even with 1 queue.
971 txdctl |= (32 << 0) | (1 << 8);
972 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
975 for (int i = 0; i < adapter->num_queues; i++) {
976 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
977 if (hw->mac.type == ixgbe_mac_82598EB) {
986 rxdctl |= IXGBE_RXDCTL_ENABLE;
987 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
988 for (k = 0; k < 10; k++) {
989 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
998 * In netmap mode, we must preserve the buffers made
999 * available to userspace before the if_init()
1000 * (this is true by default on the TX side, because
1001 * init makes all buffers available to userspace).
1003 * netmap_reset() and the device specific routines
1004 * (e.g. ixgbe_setup_receive_rings()) map these
1005 * buffers at the end of the NIC ring, so here we
1006 * must set the RDT (tail) register to make sure
1007 * they are not overwritten.
1009 * In this driver the NIC ring starts at RDH = 0,
1010 * RDT points to the last slot available for reception (?),
1011 * so RDT = num_rx_desc - 1 means the whole ring is available.
1013 if (ifp->if_capenable & IFCAP_NETMAP) {
1014 struct netmap_adapter *na = NA(adapter->ifp);
1015 struct netmap_kring *kring = &na->rx_rings[i];
1016 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1018 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1020 #endif /* DEV_NETMAP */
1021 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1024 /* Enable Receive engine */
1025 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1026 if (hw->mac.type == ixgbe_mac_82598EB)
1027 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1028 rxctrl |= IXGBE_RXCTRL_RXEN;
1029 ixgbe_enable_rx_dma(hw, rxctrl);
1031 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1033 /* Set up MSI/X routing */
1034 if (ixgbe_enable_msix) {
1035 ixgbe_configure_ivars(adapter);
1036 /* Set up auto-mask */
1037 if (hw->mac.type == ixgbe_mac_82598EB)
1038 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1040 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1041 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1043 } else { /* Simple settings for Legacy/MSI */
1044 ixgbe_set_ivar(adapter, 0, 0, 0);
1045 ixgbe_set_ivar(adapter, 0, 0, 1);
1046 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1050 /* Init Flow director */
1051 if (hw->mac.type != ixgbe_mac_82598EB) {
1052 u32 hdrm = 32 << fdir_pballoc;
1054 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1055 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1060 ** Check on any SFP devices that
1061 ** need to be kick-started
1063 if (hw->phy.type == ixgbe_phy_none) {
1064 int err = hw->phy.ops.identify(hw);
1065 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1067 "Unsupported SFP+ module type was detected.\n");
1072 /* Set moderation on the Link interrupt */
1073 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1075 /* Config/Enable Link */
1076 ixgbe_config_link(adapter);
1078 /* Hardware Packet Buffer & Flow Control setup */
1080 u32 rxpb, frame, size, tmp;
1082 frame = adapter->max_frame_size;
1084 /* Calculate High Water */
1085 switch (hw->mac.type) {
1086 case ixgbe_mac_X540:
1087 case ixgbe_mac_X550:
1088 case ixgbe_mac_X550EM_a:
1089 case ixgbe_mac_X550EM_x:
1090 tmp = IXGBE_DV_X540(frame, frame);
1093 tmp = IXGBE_DV(frame, frame);
1096 size = IXGBE_BT2KB(tmp);
1097 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1098 hw->fc.high_water[0] = rxpb - size;
1100 /* Now calculate Low Water */
1101 switch (hw->mac.type) {
1102 case ixgbe_mac_X540:
1103 case ixgbe_mac_X550:
1104 case ixgbe_mac_X550EM_a:
1105 case ixgbe_mac_X550EM_x:
1106 tmp = IXGBE_LOW_DV_X540(frame);
1109 tmp = IXGBE_LOW_DV(frame);
1112 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1114 hw->fc.requested_mode = adapter->fc;
1115 hw->fc.pause_time = IXGBE_FC_PAUSE;
1116 hw->fc.send_xon = TRUE;
1118 /* Initialize the FC settings */
1121 /* Set up VLAN support and filter */
1122 ixgbe_setup_vlan_hw_support(adapter);
1124 /* And now turn on interrupts */
1125 ixgbe_enable_intr(adapter);
1127 /* Now inform the stack we're ready */
1128 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1134 ixgbe_init(void *arg)
1136 struct adapter *adapter = arg;
1138 IXGBE_CORE_LOCK(adapter);
1139 ixgbe_init_locked(adapter);
1140 IXGBE_CORE_UNLOCK(adapter);
1147 ** MSIX Interrupt Handlers and Tasklets
1152 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1154 struct ixgbe_hw *hw = &adapter->hw;
1155 u64 queue = (u64)(1 << vector);
1158 if (hw->mac.type == ixgbe_mac_82598EB) {
1159 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1160 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1162 mask = (queue & 0xFFFFFFFF);
1164 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1165 mask = (queue >> 32);
1167 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1172 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1174 struct ixgbe_hw *hw = &adapter->hw;
1175 u64 queue = (u64)(1 << vector);
1178 if (hw->mac.type == ixgbe_mac_82598EB) {
1179 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1180 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1182 mask = (queue & 0xFFFFFFFF);
1184 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1185 mask = (queue >> 32);
1187 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1192 ixgbe_handle_que(void *context, int pending)
1194 struct ix_queue *que = context;
1195 struct adapter *adapter = que->adapter;
1196 struct tx_ring *txr = que->txr;
1197 struct ifnet *ifp = adapter->ifp;
1199 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1203 #ifndef IXGBE_LEGACY_TX
1204 if (!drbr_empty(ifp, txr->br))
1205 ixgbe_mq_start_locked(ifp, txr);
1207 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1208 ixgbe_start_locked(txr, ifp);
1210 IXGBE_TX_UNLOCK(txr);
1213 /* Reenable this interrupt */
1214 if (que->res != NULL)
1215 ixgbe_enable_queue(adapter, que->msix);
1217 ixgbe_enable_intr(adapter);
1222 /*********************************************************************
1224 * Legacy Interrupt Service routine
1226 **********************************************************************/
1229 ixgbe_legacy_irq(void *arg)
1231 struct ix_queue *que = arg;
1232 struct adapter *adapter = que->adapter;
1233 struct ixgbe_hw *hw = &adapter->hw;
1234 struct ifnet *ifp = adapter->ifp;
1235 struct tx_ring *txr = adapter->tx_rings;
1240 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1243 if (reg_eicr == 0) {
1244 ixgbe_enable_intr(adapter);
1248 more = ixgbe_rxeof(que);
1252 #ifdef IXGBE_LEGACY_TX
1253 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1254 ixgbe_start_locked(txr, ifp);
1256 if (!drbr_empty(ifp, txr->br))
1257 ixgbe_mq_start_locked(ifp, txr);
1259 IXGBE_TX_UNLOCK(txr);
1261 /* Check for fan failure */
1262 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1263 (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1264 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1265 "REPLACE IMMEDIATELY!!\n");
1266 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1269 /* Link status change */
1270 if (reg_eicr & IXGBE_EICR_LSC)
1271 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1274 taskqueue_enqueue(que->tq, &que->que_task);
1276 ixgbe_enable_intr(adapter);
1281 /*********************************************************************
1283 * MSIX Queue Interrupt Service routine
1285 **********************************************************************/
1287 ixgbe_msix_que(void *arg)
1289 struct ix_queue *que = arg;
1290 struct adapter *adapter = que->adapter;
1291 struct ifnet *ifp = adapter->ifp;
1292 struct tx_ring *txr = que->txr;
1293 struct rx_ring *rxr = que->rxr;
1297 /* Protect against spurious interrupts */
1298 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1301 ixgbe_disable_queue(adapter, que->msix);
1304 more = ixgbe_rxeof(que);
1308 #ifdef IXGBE_LEGACY_TX
1309 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1310 ixgbe_start_locked(txr, ifp);
1312 if (!drbr_empty(ifp, txr->br))
1313 ixgbe_mq_start_locked(ifp, txr);
1315 IXGBE_TX_UNLOCK(txr);
1319 if (ixgbe_enable_aim == FALSE)
1322 ** Do Adaptive Interrupt Moderation:
1323 ** - Write out last calculated setting
1324 ** - Calculate based on average size over
1325 ** the last interval.
1327 if (que->eitr_setting)
1328 IXGBE_WRITE_REG(&adapter->hw,
1329 IXGBE_EITR(que->msix), que->eitr_setting);
1331 que->eitr_setting = 0;
1333 /* Idle, do nothing */
1334 if ((txr->bytes == 0) && (rxr->bytes == 0))
1337 if ((txr->bytes) && (txr->packets))
1338 newitr = txr->bytes/txr->packets;
1339 if ((rxr->bytes) && (rxr->packets))
1340 newitr = max(newitr,
1341 (rxr->bytes / rxr->packets));
1342 newitr += 24; /* account for hardware frame, crc */
1344 /* set an upper boundary */
1345 newitr = min(newitr, 3000);
1347 /* Be nice to the mid range */
1348 if ((newitr > 300) && (newitr < 1200))
1349 newitr = (newitr / 3);
1351 newitr = (newitr / 2);
1353 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1354 newitr |= newitr << 16;
1356 newitr |= IXGBE_EITR_CNT_WDIS;
1358 /* save for next interrupt */
1359 que->eitr_setting = newitr;
1369 taskqueue_enqueue(que->tq, &que->que_task);
1371 ixgbe_enable_queue(adapter, que->msix);
1377 ixgbe_msix_link(void *arg)
1379 struct adapter *adapter = arg;
1380 struct ixgbe_hw *hw = &adapter->hw;
1383 ++adapter->vector_irq;
1385 /* First get the cause */
1386 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1387 /* Be sure the queue bits are not cleared */
1388 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1389 /* Clear interrupt with write */
1390 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1392 /* Link status change */
1393 if (reg_eicr & IXGBE_EICR_LSC)
1394 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1396 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1398 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1399 /* This is probably overkill :) */
1400 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1402 /* Disable the interrupt */
1403 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1404 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1407 if (reg_eicr & IXGBE_EICR_ECC) {
1408 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1409 "Please Reboot!!\n");
1410 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1413 if (ixgbe_is_sfp(hw)) {
1414 if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1415 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1416 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1417 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1418 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw));
1419 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1424 /* Check for fan failure */
1425 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1426 (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1427 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1428 "REPLACE IMMEDIATELY!!\n");
1429 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1432 /* Check for over temp condition */
1433 switch (hw->mac.type) {
1434 case ixgbe_mac_X540:
1435 case ixgbe_mac_X550:
1436 case ixgbe_mac_X550EM_a:
1437 if (reg_eicr & IXGBE_EICR_TS) {
1438 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1439 "PHY IS SHUT DOWN!!\n");
1440 device_printf(adapter->dev, "System shutdown required\n");
1441 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1445 /* Other MACs have no thermal sensor interrupt */
1449 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1453 /*********************************************************************
1455 * Media Ioctl callback
1457 * This routine is called whenever the user queries the status of
1458 * the interface using ifconfig.
1460 **********************************************************************/
1462 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1464 struct adapter *adapter = ifp->if_softc;
1465 struct ixgbe_hw *hw = &adapter->hw;
1468 INIT_DEBUGOUT("ixgbe_media_status: begin");
1469 IXGBE_CORE_LOCK(adapter);
1470 ixgbe_update_link_status(adapter);
1472 ifmr->ifm_status = IFM_AVALID;
1473 ifmr->ifm_active = IFM_ETHER;
1475 if (!adapter->link_active) {
1476 IXGBE_CORE_UNLOCK(adapter);
1480 ifmr->ifm_status |= IFM_ACTIVE;
1481 layer = ixgbe_get_supported_physical_layer(hw);
1483 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1484 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1485 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1486 switch (adapter->link_speed) {
1487 case IXGBE_LINK_SPEED_10GB_FULL:
1488 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1490 case IXGBE_LINK_SPEED_1GB_FULL:
1491 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1493 case IXGBE_LINK_SPEED_100_FULL:
1494 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1497 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1498 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1499 switch (adapter->link_speed) {
1500 case IXGBE_LINK_SPEED_10GB_FULL:
1501 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1504 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1505 switch (adapter->link_speed) {
1506 case IXGBE_LINK_SPEED_10GB_FULL:
1507 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1509 case IXGBE_LINK_SPEED_1GB_FULL:
1510 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1513 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1514 switch (adapter->link_speed) {
1515 case IXGBE_LINK_SPEED_10GB_FULL:
1516 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1518 case IXGBE_LINK_SPEED_1GB_FULL:
1519 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1522 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1523 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1524 switch (adapter->link_speed) {
1525 case IXGBE_LINK_SPEED_10GB_FULL:
1526 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1528 case IXGBE_LINK_SPEED_1GB_FULL:
1529 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1532 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1533 switch (adapter->link_speed) {
1534 case IXGBE_LINK_SPEED_10GB_FULL:
1535 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1539 ** XXX: These need to use the proper media types once
1542 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1543 switch (adapter->link_speed) {
1544 case IXGBE_LINK_SPEED_10GB_FULL:
1545 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1547 case IXGBE_LINK_SPEED_1GB_FULL:
1548 ifmr->ifm_active |= IFM_10_5 | IFM_FDX;
1551 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1552 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1553 switch (adapter->link_speed) {
1554 case IXGBE_LINK_SPEED_10GB_FULL:
1555 ifmr->ifm_active |= IFM_10_2 | IFM_FDX;
1557 case IXGBE_LINK_SPEED_1GB_FULL:
1558 ifmr->ifm_active |= IFM_10_5 | IFM_FDX;
1562 /* If nothing is recognized... */
1563 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1564 ifmr->ifm_active |= IFM_UNKNOWN;
1566 #if __FreeBSD_version >= 900025
1567 /* Flow control setting */
1568 if (adapter->fc == ixgbe_fc_rx_pause || adapter->fc == ixgbe_fc_full)
1569 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1570 if (adapter->fc == ixgbe_fc_tx_pause || adapter->fc == ixgbe_fc_full)
1571 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1574 IXGBE_CORE_UNLOCK(adapter);
1579 /*********************************************************************
1581 * Media Ioctl callback
1583 * This routine is called when the user changes speed/duplex using
1584 * media/mediopt option with ifconfig.
1586 **********************************************************************/
1588 ixgbe_media_change(struct ifnet * ifp)
1590 struct adapter *adapter = ifp->if_softc;
1591 struct ifmedia *ifm = &adapter->media;
1592 struct ixgbe_hw *hw = &adapter->hw;
1593 ixgbe_link_speed speed = 0;
1595 INIT_DEBUGOUT("ixgbe_media_change: begin");
1597 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1601 ** We don't actually need to check against the supported
1602 ** media types of the adapter; ifmedia will take care of
1604 ** NOTE: this relies on falling thru the switch
1605 ** to get all the values set, it can be confusing.
1607 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1610 speed |= IXGBE_LINK_SPEED_100_FULL;
1612 case IFM_10G_SR: /* KR, too */
1614 case IFM_10G_CX4: /* KX4 for now */
1615 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1616 case IFM_10G_TWINAX:
1617 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1620 speed |= IXGBE_LINK_SPEED_100_FULL;
1623 case IFM_1000_CX: /* KX until there's real support */
1624 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1627 speed |= IXGBE_LINK_SPEED_100_FULL;
1633 hw->mac.autotry_restart = TRUE;
1634 hw->mac.ops.setup_link(hw, speed, TRUE);
1635 adapter->advertise =
1636 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1637 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1638 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1643 device_printf(adapter->dev, "Invalid media type\n");
1648 ixgbe_set_promisc(struct adapter *adapter)
1651 struct ifnet *ifp = adapter->ifp;
1654 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1655 reg_rctl &= (~IXGBE_FCTRL_UPE);
1656 if (ifp->if_flags & IFF_ALLMULTI)
1657 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1659 struct ifmultiaddr *ifma;
1660 #if __FreeBSD_version < 800000
1663 if_maddr_rlock(ifp);
1665 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1666 if (ifma->ifma_addr->sa_family != AF_LINK)
1668 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1672 #if __FreeBSD_version < 800000
1673 IF_ADDR_UNLOCK(ifp);
1675 if_maddr_runlock(ifp);
1678 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1679 reg_rctl &= (~IXGBE_FCTRL_MPE);
1680 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1682 if (ifp->if_flags & IFF_PROMISC) {
1683 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1684 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1685 } else if (ifp->if_flags & IFF_ALLMULTI) {
1686 reg_rctl |= IXGBE_FCTRL_MPE;
1687 reg_rctl &= ~IXGBE_FCTRL_UPE;
1688 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1694 /*********************************************************************
1697 * This routine is called whenever multicast address list is updated.
1699 **********************************************************************/
1700 #define IXGBE_RAR_ENTRIES 16
1703 ixgbe_set_multi(struct adapter *adapter)
1708 struct ifmultiaddr *ifma;
1710 struct ifnet *ifp = adapter->ifp;
1712 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1715 bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1716 MAX_NUM_MULTICAST_ADDRESSES);
1718 #if __FreeBSD_version < 800000
1721 if_maddr_rlock(ifp);
1723 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1724 if (ifma->ifma_addr->sa_family != AF_LINK)
1726 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1728 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1729 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1730 IXGBE_ETH_LENGTH_OF_ADDRESS);
1733 #if __FreeBSD_version < 800000
1734 IF_ADDR_UNLOCK(ifp);
1736 if_maddr_runlock(ifp);
1739 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1740 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1741 if (ifp->if_flags & IFF_PROMISC)
1742 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1743 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1744 ifp->if_flags & IFF_ALLMULTI) {
1745 fctrl |= IXGBE_FCTRL_MPE;
1746 fctrl &= ~IXGBE_FCTRL_UPE;
1748 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1750 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1752 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1754 ixgbe_update_mc_addr_list(&adapter->hw,
1755 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1762 * This is an iterator function now needed by the multicast
1763 * shared code. It simply feeds the shared code routine the
1764 * addresses in the array of ixgbe_set_multi() one by one.
1767 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1769 u8 *addr = *update_ptr;
1773 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1774 *update_ptr = newptr;
1779 /*********************************************************************
1782 * This routine checks for link status,updates statistics,
1783 * and runs the watchdog check.
1785 **********************************************************************/
1788 ixgbe_local_timer(void *arg)
1790 struct adapter *adapter = arg;
1791 device_t dev = adapter->dev;
1792 struct ix_queue *que = adapter->queues;
1796 mtx_assert(&adapter->core_mtx, MA_OWNED);
1798 /* Check for pluggable optics */
1799 if (adapter->sfp_probe)
1800 if (!ixgbe_sfp_probe(adapter))
1801 goto out; /* Nothing to do */
1803 ixgbe_update_link_status(adapter);
1804 ixgbe_update_stats_counters(adapter);
1807 ** Check the TX queues status
1808 ** - mark hung queues so we don't schedule on them
1809 ** - watchdog only if all queues show hung
1811 for (int i = 0; i < adapter->num_queues; i++, que++) {
1812 /* Keep track of queues with work for soft irq */
1814 queues |= ((u64)1 << que->me);
1816 ** Each time txeof runs without cleaning, but there
1817 ** are uncleaned descriptors it increments busy. If
1818 ** we get to the MAX we declare it hung.
1820 if (que->busy == IXGBE_QUEUE_HUNG) {
1822 /* Mark the queue as inactive */
1823 adapter->active_queues &= ~((u64)1 << que->me);
1826 /* Check if we've come back from hung */
1827 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1828 adapter->active_queues |= ((u64)1 << que->me);
1830 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1831 device_printf(dev,"Warning queue %d "
1832 "appears to be hung!\n", i);
1833 que->txr->busy = IXGBE_QUEUE_HUNG;
1839 /* Only truly watchdog if all queues show hung */
1840 if (hung == adapter->num_queues)
1842 else if (queues != 0) { /* Force an IRQ on queues with work */
1843 ixgbe_rearm_queues(adapter, queues);
1847 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1851 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1852 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1853 adapter->watchdog_events++;
1854 ixgbe_init_locked(adapter);
1858 ** Note: this routine updates the OS on the link state
1859 ** the real check of the hardware only happens with
1860 ** a link interrupt.
1863 ixgbe_update_link_status(struct adapter *adapter)
1865 struct ifnet *ifp = adapter->ifp;
1866 device_t dev = adapter->dev;
1869 if (adapter->link_up){
1870 if (adapter->link_active == FALSE) {
1872 device_printf(dev,"Link is up %d Gbps %s \n",
1873 ((adapter->link_speed == 128)? 10:1),
1875 adapter->link_active = TRUE;
1876 /* Update any Flow Control changes */
1877 ixgbe_fc_enable(&adapter->hw);
1878 if_link_state_change(ifp, LINK_STATE_UP);
1880 } else { /* Link down */
1881 if (adapter->link_active == TRUE) {
1883 device_printf(dev,"Link is Down\n");
1884 if_link_state_change(ifp, LINK_STATE_DOWN);
1885 adapter->link_active = FALSE;
1893 /*********************************************************************
1895 * This routine disables all traffic on the adapter by issuing a
1896 * global reset on the MAC and deallocates TX/RX buffers.
1898 **********************************************************************/
1901 ixgbe_stop(void *arg)
1904 struct adapter *adapter = arg;
1905 struct ixgbe_hw *hw = &adapter->hw;
1908 mtx_assert(&adapter->core_mtx, MA_OWNED);
1910 INIT_DEBUGOUT("ixgbe_stop: begin\n");
1911 ixgbe_disable_intr(adapter);
1912 callout_stop(&adapter->timer);
1914 /* Let the stack know...*/
1915 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1918 hw->adapter_stopped = FALSE;
1919 ixgbe_stop_adapter(hw);
1920 if (hw->mac.type == ixgbe_mac_82599EB)
1921 ixgbe_stop_mac_link_on_d3_82599(hw);
1922 /* Turn off the laser - noop with no optics */
1923 ixgbe_disable_tx_laser(hw);
1925 /* Update the stack */
1926 adapter->link_up = FALSE;
1927 ixgbe_update_link_status(adapter);
1929 /* reprogram the RAR[0] in case user changed it. */
1930 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1936 /*********************************************************************
1938 * Determine hardware revision.
1940 **********************************************************************/
1942 ixgbe_identify_hardware(struct adapter *adapter)
1944 device_t dev = adapter->dev;
1945 struct ixgbe_hw *hw = &adapter->hw;
1947 /* Save off the information about this board */
1948 hw->vendor_id = pci_get_vendor(dev);
1949 hw->device_id = pci_get_device(dev);
1950 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1951 hw->subsystem_vendor_id =
1952 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1953 hw->subsystem_device_id =
1954 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1957 ** Make sure BUSMASTER is set
1959 pci_enable_busmaster(dev);
1961 /* We need this here to set the num_segs below */
1962 ixgbe_set_mac_type(hw);
1964 /* Pick up the 82599 and VF settings */
1965 if (hw->mac.type != ixgbe_mac_82598EB) {
1966 hw->phy.smart_speed = ixgbe_smart_speed;
1967 adapter->num_segs = IXGBE_82599_SCATTER;
1969 adapter->num_segs = IXGBE_82598_SCATTER;
1974 /*********************************************************************
1976 * Determine optic type
1978 **********************************************************************/
1980 ixgbe_setup_optics(struct adapter *adapter)
1982 struct ixgbe_hw *hw = &adapter->hw;
1985 layer = ixgbe_get_supported_physical_layer(hw);
1987 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1988 adapter->optics = IFM_10G_T;
1992 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1993 adapter->optics = IFM_1000_T;
1997 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1998 adapter->optics = IFM_1000_SX;
2002 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2003 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2004 adapter->optics = IFM_10G_LR;
2008 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2009 adapter->optics = IFM_10G_SR;
2013 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2014 adapter->optics = IFM_10G_TWINAX;
2018 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2019 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2020 adapter->optics = IFM_10G_CX4;
2024 /* If we get here just set the default */
2025 adapter->optics = IFM_ETHER | IFM_AUTO;
2029 /*********************************************************************
2031 * Setup the Legacy or MSI Interrupt handler
2033 **********************************************************************/
2035 ixgbe_allocate_legacy(struct adapter *adapter)
2037 device_t dev = adapter->dev;
2038 struct ix_queue *que = adapter->queues;
2039 #ifndef IXGBE_LEGACY_TX
2040 struct tx_ring *txr = adapter->tx_rings;
2045 if (adapter->msix == 1)
2048 /* We allocate a single interrupt resource */
2049 adapter->res = bus_alloc_resource_any(dev,
2050 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2051 if (adapter->res == NULL) {
2052 device_printf(dev, "Unable to allocate bus resource: "
2058 * Try allocating a fast interrupt and the associated deferred
2059 * processing contexts.
2061 #ifndef IXGBE_LEGACY_TX
2062 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2064 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2065 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2066 taskqueue_thread_enqueue, &que->tq);
2067 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2068 device_get_nameunit(adapter->dev));
2070 /* Tasklets for Link, SFP and Multispeed Fiber */
2071 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2072 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2073 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2075 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2077 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2078 taskqueue_thread_enqueue, &adapter->tq);
2079 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2080 device_get_nameunit(adapter->dev));
2082 if ((error = bus_setup_intr(dev, adapter->res,
2083 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2084 que, &adapter->tag)) != 0) {
2085 device_printf(dev, "Failed to register fast interrupt "
2086 "handler: %d\n", error);
2087 taskqueue_free(que->tq);
2088 taskqueue_free(adapter->tq);
2093 /* For simplicity in the handlers */
2094 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2100 /*********************************************************************
2102 * Setup MSIX Interrupt resources and handlers
2104 **********************************************************************/
2106 ixgbe_allocate_msix(struct adapter *adapter)
2108 device_t dev = adapter->dev;
2109 struct ix_queue *que = adapter->queues;
2110 struct tx_ring *txr = adapter->tx_rings;
2111 int error, rid, vector = 0;
2119 * If we're doing RSS, the number of queues needs to
2120 * match the number of RSS buckets that are configured.
2122 * + If there's more queues than RSS buckets, we'll end
2123 * up with queues that get no traffic.
2125 * + If there's more RSS buckets than queues, we'll end
2126 * up having multiple RSS buckets map to the same queue,
2127 * so there'll be some contention.
2129 if (adapter->num_queues != rss_getnumbuckets()) {
2131 "%s: number of queues (%d) != number of RSS buckets (%d)"
2132 "; performance will be impacted.\n",
2134 adapter->num_queues,
2135 rss_getnumbuckets());
2141 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2143 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2144 RF_SHAREABLE | RF_ACTIVE);
2145 if (que->res == NULL) {
2146 device_printf(dev,"Unable to allocate"
2147 " bus resource: que interrupt [%d]\n", vector);
2150 /* Set the handler function */
2151 error = bus_setup_intr(dev, que->res,
2152 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2153 ixgbe_msix_que, que, &que->tag);
2156 device_printf(dev, "Failed to register QUE handler");
2159 #if __FreeBSD_version >= 800504
2160 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2163 adapter->active_queues |= (u64)(1 << que->msix);
2166 * The queue ID is used as the RSS layer bucket ID.
2167 * We look up the queue ID -> RSS CPU ID and select
2170 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2173 * Bind the msix vector, and thus the
2174 * rings to the corresponding cpu.
2176 * This just happens to match the default RSS round-robin
2177 * bucket -> queue -> CPU allocation.
2179 if (adapter->num_queues > 1)
2182 if (adapter->num_queues > 1)
2183 bus_bind_intr(dev, que->res, cpu_id);
2187 "Bound RSS bucket %d to CPU %d\n",
2190 #if 0 // This is too noisy
2192 "Bound queue %d to cpu %d\n",
2198 #ifndef IXGBE_LEGACY_TX
2199 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2201 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2202 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2203 taskqueue_thread_enqueue, &que->tq);
2205 CPU_SETOF(cpu_id, &cpu_mask);
2206 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2209 device_get_nameunit(adapter->dev),
2212 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2213 device_get_nameunit(adapter->dev));
2219 adapter->res = bus_alloc_resource_any(dev,
2220 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2221 if (!adapter->res) {
2222 device_printf(dev,"Unable to allocate"
2223 " bus resource: Link interrupt [%d]\n", rid);
2226 /* Set the link handler function */
2227 error = bus_setup_intr(dev, adapter->res,
2228 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2229 ixgbe_msix_link, adapter, &adapter->tag);
2231 adapter->res = NULL;
2232 device_printf(dev, "Failed to register LINK handler");
2235 #if __FreeBSD_version >= 800504
2236 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2238 adapter->vector = vector;
2239 /* Tasklets for Link, SFP and Multispeed Fiber */
2240 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2241 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2242 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2244 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2246 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2247 taskqueue_thread_enqueue, &adapter->tq);
2248 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2249 device_get_nameunit(adapter->dev));
2255 * Setup Either MSI/X or MSI
2258 ixgbe_setup_msix(struct adapter *adapter)
2260 device_t dev = adapter->dev;
2261 int rid, want, queues, msgs;
2263 /* Override by tuneable */
2264 if (ixgbe_enable_msix == 0)
2267 /* First try MSI/X */
2268 msgs = pci_msix_count(dev);
2271 rid = PCIR_BAR(MSIX_82598_BAR);
2272 adapter->msix_mem = bus_alloc_resource_any(dev,
2273 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2274 if (adapter->msix_mem == NULL) {
2275 rid += 4; /* 82599 maps in higher BAR */
2276 adapter->msix_mem = bus_alloc_resource_any(dev,
2277 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2279 if (adapter->msix_mem == NULL) {
2280 /* May not be enabled */
2281 device_printf(adapter->dev,
2282 "Unable to map MSIX table \n");
2286 /* Figure out a reasonable auto config value */
2287 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2290 /* If we're doing RSS, clamp at the number of RSS buckets */
2291 if (queues > rss_getnumbuckets())
2292 queues = rss_getnumbuckets();
2295 if (ixgbe_num_queues != 0)
2296 queues = ixgbe_num_queues;
2298 /* reflect correct sysctl value */
2299 ixgbe_num_queues = queues;
2302 ** Want one vector (RX/TX pair) per queue
2303 ** plus an additional for Link.
2309 device_printf(adapter->dev,
2310 "MSIX Configuration Problem, "
2311 "%d vectors but %d queues wanted!\n",
2315 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2316 device_printf(adapter->dev,
2317 "Using MSIX interrupts with %d vectors\n", msgs);
2318 adapter->num_queues = queues;
2322 ** If MSIX alloc failed or provided us with
2323 ** less than needed, free and fall through to MSI
2325 pci_release_msi(dev);
2328 if (adapter->msix_mem != NULL) {
2329 bus_release_resource(dev, SYS_RES_MEMORY,
2330 rid, adapter->msix_mem);
2331 adapter->msix_mem = NULL;
2334 if (pci_alloc_msi(dev, &msgs) == 0) {
2335 device_printf(adapter->dev,"Using an MSI interrupt\n");
2338 device_printf(adapter->dev,"Using a Legacy interrupt\n");
2344 ixgbe_allocate_pci_resources(struct adapter *adapter)
2347 device_t dev = adapter->dev;
2350 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2353 if (!(adapter->pci_mem)) {
2354 device_printf(dev,"Unable to allocate bus resource: memory\n");
2358 adapter->osdep.mem_bus_space_tag =
2359 rman_get_bustag(adapter->pci_mem);
2360 adapter->osdep.mem_bus_space_handle =
2361 rman_get_bushandle(adapter->pci_mem);
2362 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2364 /* Legacy defaults */
2365 adapter->num_queues = 1;
2366 adapter->hw.back = &adapter->osdep;
2369 ** Now setup MSI or MSI/X, should
2370 ** return us the number of supported
2371 ** vectors. (Will be 1 for MSI)
2373 adapter->msix = ixgbe_setup_msix(adapter);
2378 ixgbe_free_pci_resources(struct adapter * adapter)
2380 struct ix_queue *que = adapter->queues;
2381 device_t dev = adapter->dev;
2384 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2385 memrid = PCIR_BAR(MSIX_82598_BAR);
2387 memrid = PCIR_BAR(MSIX_82599_BAR);
2390 ** There is a slight possibility of a failure mode
2391 ** in attach that will result in entering this function
2392 ** before interrupt resources have been initialized, and
2393 ** in that case we do not want to execute the loops below
2394 ** We can detect this reliably by the state of the adapter
2397 if (adapter->res == NULL)
2401 ** Release all msix queue resources:
2403 for (int i = 0; i < adapter->num_queues; i++, que++) {
2404 rid = que->msix + 1;
2405 if (que->tag != NULL) {
2406 bus_teardown_intr(dev, que->res, que->tag);
2409 if (que->res != NULL)
2410 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2414 /* Clean the Legacy or Link interrupt last */
2415 if (adapter->vector) /* we are doing MSIX */
2416 rid = adapter->vector + 1;
2418 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2420 if (adapter->tag != NULL) {
2421 bus_teardown_intr(dev, adapter->res, adapter->tag);
2422 adapter->tag = NULL;
2424 if (adapter->res != NULL)
2425 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2429 pci_release_msi(dev);
2431 if (adapter->msix_mem != NULL)
2432 bus_release_resource(dev, SYS_RES_MEMORY,
2433 memrid, adapter->msix_mem);
2435 if (adapter->pci_mem != NULL)
2436 bus_release_resource(dev, SYS_RES_MEMORY,
2437 PCIR_BAR(0), adapter->pci_mem);
2442 /*********************************************************************
2444 * Setup networking device structure and register an interface.
2446 **********************************************************************/
2448 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2452 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2454 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2456 device_printf(dev, "can not allocate ifnet structure\n");
2459 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2460 ifp->if_baudrate = IF_Gbps(10);
2461 ifp->if_init = ixgbe_init;
2462 ifp->if_softc = adapter;
2463 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2464 ifp->if_ioctl = ixgbe_ioctl;
2465 #if __FreeBSD_version >= 1100036
2466 if_setgetcounterfn(ifp, ixgbe_get_counter);
2468 #ifndef IXGBE_LEGACY_TX
2469 ifp->if_transmit = ixgbe_mq_start;
2470 ifp->if_qflush = ixgbe_qflush;
2472 ifp->if_start = ixgbe_start;
2473 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2474 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2475 IFQ_SET_READY(&ifp->if_snd);
2478 ether_ifattach(ifp, adapter->hw.mac.addr);
2480 adapter->max_frame_size =
2481 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2484 * Tell the upper layer(s) we support long frames.
2486 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2488 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2489 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2490 ifp->if_capabilities |= IFCAP_LRO;
2491 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2495 ifp->if_capenable = ifp->if_capabilities;
2498 ** Don't turn this on by default, if vlans are
2499 ** created on another pseudo device (eg. lagg)
2500 ** then vlan events are not passed thru, breaking
2501 ** operation, but with HW FILTER off it works. If
2502 ** using vlans directly on the ixgbe driver you can
2503 ** enable this and get full hardware tag filtering.
2505 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2508 * Specify the media types supported by this adapter and register
2509 * callbacks to update media and link information
2511 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2512 ixgbe_media_status);
2514 ixgbe_add_media_types(adapter);
2516 /* Autoselect media by default */
2517 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2523 ixgbe_add_media_types(struct adapter *adapter)
2525 struct ixgbe_hw *hw = &adapter->hw;
2526 device_t dev = adapter->dev;
2529 layer = ixgbe_get_supported_physical_layer(hw);
2531 /* Media types with matching FreeBSD media defines */
2532 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2533 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2534 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2535 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2536 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2537 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2539 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2540 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2541 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2543 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2544 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2545 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2546 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2547 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2548 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2549 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2550 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2552 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_LX)
2553 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2557 ** Other (no matching FreeBSD media type):
2558 ** To workaround this, we'll assign these completely
2559 ** inappropriate media types.
2561 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2562 device_printf(dev, "Media supported: 10GbaseKR\n");
2563 device_printf(dev, "10GbaseKR mapped to 10baseT\n");
2564 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2566 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2567 device_printf(dev, "Media supported: 10GbaseKX4\n");
2568 device_printf(dev, "10GbaseKX4 mapped to 10base2\n");
2569 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_2, 0, NULL);
2571 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2572 device_printf(dev, "Media supported: 1000baseKX\n");
2573 device_printf(dev, "1000baseKX mapped to 10base5\n");
2574 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_5, 0, NULL);
2576 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2577 /* Someday, someone will care about you... */
2578 device_printf(dev, "Media supported: 1000baseBX\n");
2582 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2583 ifmedia_add(&adapter->media,
2584 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2585 ifmedia_add(&adapter->media,
2586 IFM_ETHER | IFM_1000_T, 0, NULL);
2589 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2593 ixgbe_config_link(struct adapter *adapter)
2595 struct ixgbe_hw *hw = &adapter->hw;
2596 u32 autoneg, err = 0;
2597 bool sfp, negotiate;
2599 sfp = ixgbe_is_sfp(hw);
2602 if (hw->phy.multispeed_fiber) {
2603 hw->mac.ops.setup_sfp(hw);
2604 ixgbe_enable_tx_laser(hw);
2605 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2607 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2609 if (hw->mac.ops.check_link)
2610 err = ixgbe_check_link(hw, &adapter->link_speed,
2611 &adapter->link_up, FALSE);
2614 autoneg = hw->phy.autoneg_advertised;
2615 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2616 err = hw->mac.ops.get_link_capabilities(hw,
2617 &autoneg, &negotiate);
2620 if (hw->mac.ops.setup_link)
2621 err = hw->mac.ops.setup_link(hw,
2622 autoneg, adapter->link_up);
2629 /*********************************************************************
2631 * Enable transmit units.
2633 **********************************************************************/
2635 ixgbe_initialize_transmit_units(struct adapter *adapter)
2637 struct tx_ring *txr = adapter->tx_rings;
2638 struct ixgbe_hw *hw = &adapter->hw;
2640 /* Setup the Base and Length of the Tx Descriptor Ring */
2642 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2643 u64 tdba = txr->txdma.dma_paddr;
2646 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2647 (tdba & 0x00000000ffffffffULL));
2648 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2649 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2650 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2652 /* Setup the HW Tx Head and Tail descriptor pointers */
2653 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2654 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2656 /* Cache the tail address */
2657 txr->tail = IXGBE_TDT(txr->me);
2659 /* Set the processing limit */
2660 txr->process_limit = ixgbe_tx_process_limit;
2662 /* Disable Head Writeback */
2663 switch (hw->mac.type) {
2664 case ixgbe_mac_82598EB:
2665 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2667 case ixgbe_mac_82599EB:
2668 case ixgbe_mac_X540:
2670 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2673 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2674 switch (hw->mac.type) {
2675 case ixgbe_mac_82598EB:
2676 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2678 case ixgbe_mac_82599EB:
2679 case ixgbe_mac_X540:
2681 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2687 if (hw->mac.type != ixgbe_mac_82598EB) {
2688 u32 dmatxctl, rttdcs;
2689 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2690 dmatxctl |= IXGBE_DMATXCTL_TE;
2691 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2692 /* Disable arbiter to set MTQC */
2693 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2694 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2695 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2696 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2697 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2698 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2705 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2707 struct ixgbe_hw *hw = &adapter->hw;
2710 uint32_t rss_key[10];
2713 uint32_t rss_hash_config;
2720 /* Fetch the configured RSS key */
2721 rss_getkey((uint8_t *) &rss_key);
2723 /* set up random bits */
2724 arc4rand(&rss_key, sizeof(rss_key), 0);
2727 /* Set up the redirection table */
2728 for (i = 0, j = 0; i < 128; i++, j++) {
2729 if (j == adapter->num_queues) j = 0;
2732 * Fetch the RSS bucket id for the given indirection entry.
2733 * Cap it at the number of configured buckets (which is
2736 queue_id = rss_get_indirection_to_bucket(i);
2737 queue_id = queue_id % adapter->num_queues;
2739 queue_id = (j * 0x11);
2742 * The low 8 bits are for hash value (n+0);
2743 * The next 8 bits are for hash value (n+1), etc.
2746 reta = reta | ( ((uint32_t) queue_id) << 24);
2748 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2753 /* Now fill our hash function seeds */
2754 for (int i = 0; i < 10; i++)
2755 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2757 /* Perform hash on these packet types */
2759 mrqc = IXGBE_MRQC_RSSEN;
2760 rss_hash_config = rss_gethashconfig();
2761 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2762 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2763 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2764 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2765 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2766 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2767 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2768 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2769 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2770 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2771 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
2772 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2773 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2774 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2775 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
2776 device_printf(adapter->dev,
2777 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
2778 "but not supported\n", __func__);
2779 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2780 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2781 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
2782 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2785 * Disable UDP - IP fragments aren't currently being handled
2786 * and so we end up with a mix of 2-tuple and 4-tuple
2789 mrqc = IXGBE_MRQC_RSSEN
2790 | IXGBE_MRQC_RSS_FIELD_IPV4
2791 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2793 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2795 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2796 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2797 | IXGBE_MRQC_RSS_FIELD_IPV6
2798 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2800 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2801 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2805 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2809 /*********************************************************************
2811 * Setup receive registers and features.
2813 **********************************************************************/
2814 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2816 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2819 ixgbe_initialize_receive_units(struct adapter *adapter)
2821 struct rx_ring *rxr = adapter->rx_rings;
2822 struct ixgbe_hw *hw = &adapter->hw;
2823 struct ifnet *ifp = adapter->ifp;
2824 u32 bufsz, fctrl, srrctl, rxcsum;
2829 * Make sure receives are disabled while
2830 * setting up the descriptor ring
2832 ixgbe_disable_rx(hw);
2834 /* Enable broadcasts */
2835 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2836 fctrl |= IXGBE_FCTRL_BAM;
2837 fctrl |= IXGBE_FCTRL_DPF;
2838 fctrl |= IXGBE_FCTRL_PMCF;
2839 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2841 /* Set for Jumbo Frames? */
2842 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2843 if (ifp->if_mtu > ETHERMTU)
2844 hlreg |= IXGBE_HLREG0_JUMBOEN;
2846 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2848 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2849 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2850 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2852 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2853 #endif /* DEV_NETMAP */
2854 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2856 bufsz = (adapter->rx_mbuf_sz +
2857 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2859 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2860 u64 rdba = rxr->rxdma.dma_paddr;
2862 /* Setup the Base and Length of the Rx Descriptor Ring */
2863 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2864 (rdba & 0x00000000ffffffffULL));
2865 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2866 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2867 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2869 /* Set up the SRRCTL register */
2870 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2871 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2872 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2874 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2877 * Set DROP_EN iff we have no flow control and >1 queue.
2878 * Note that srrctl was cleared shortly before during reset,
2879 * so we do not need to clear the bit, but do it just in case
2880 * this code is moved elsewhere.
2882 if (adapter->num_queues > 1 &&
2883 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2884 srrctl |= IXGBE_SRRCTL_DROP_EN;
2886 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2889 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2891 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2892 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2893 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2895 /* Set the processing limit */
2896 rxr->process_limit = ixgbe_rx_process_limit;
2898 /* Set the driver rx tail address */
2899 rxr->tail = IXGBE_RDT(rxr->me);
2902 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2903 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2904 IXGBE_PSRTYPE_UDPHDR |
2905 IXGBE_PSRTYPE_IPV4HDR |
2906 IXGBE_PSRTYPE_IPV6HDR;
2907 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2910 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2912 ixgbe_initialise_rss_mapping(adapter);
2914 if (adapter->num_queues > 1) {
2915 /* RSS and RX IPP Checksum are mutually exclusive */
2916 rxcsum |= IXGBE_RXCSUM_PCSD;
2919 if (ifp->if_capenable & IFCAP_RXCSUM)
2920 rxcsum |= IXGBE_RXCSUM_PCSD;
2922 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2923 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2925 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2932 ** This routine is run via an vlan config EVENT,
2933 ** it enables us to use the HW Filter table since
2934 ** we can get the vlan id. This just creates the
2935 ** entry in the soft version of the VFTA, init will
2936 ** repopulate the real table.
2939 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2941 struct adapter *adapter = ifp->if_softc;
2944 if (ifp->if_softc != arg) /* Not our event */
2947 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2950 IXGBE_CORE_LOCK(adapter);
2951 index = (vtag >> 5) & 0x7F;
2953 adapter->shadow_vfta[index] |= (1 << bit);
2954 ++adapter->num_vlans;
2955 ixgbe_setup_vlan_hw_support(adapter);
2956 IXGBE_CORE_UNLOCK(adapter);
2960 ** This routine is run via an vlan
2961 ** unconfig EVENT, remove our entry
2962 ** in the soft vfta.
2965 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2967 struct adapter *adapter = ifp->if_softc;
2970 if (ifp->if_softc != arg)
2973 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2976 IXGBE_CORE_LOCK(adapter);
2977 index = (vtag >> 5) & 0x7F;
2979 adapter->shadow_vfta[index] &= ~(1 << bit);
2980 --adapter->num_vlans;
2981 /* Re-init to load the changes */
2982 ixgbe_setup_vlan_hw_support(adapter);
2983 IXGBE_CORE_UNLOCK(adapter);
2987 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2989 struct ifnet *ifp = adapter->ifp;
2990 struct ixgbe_hw *hw = &adapter->hw;
2991 struct rx_ring *rxr;
2996 ** We get here thru init_locked, meaning
2997 ** a soft reset, this has already cleared
2998 ** the VFTA and other state, so if there
2999 ** have been no vlan's registered do nothing.
3001 if (adapter->num_vlans == 0)
3004 /* Setup the queues for vlans */
3005 for (int i = 0; i < adapter->num_queues; i++) {
3006 rxr = &adapter->rx_rings[i];
3007 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3008 if (hw->mac.type != ixgbe_mac_82598EB) {
3009 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3010 ctrl |= IXGBE_RXDCTL_VME;
3011 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3013 rxr->vtag_strip = TRUE;
3016 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3019 ** A soft reset zero's out the VFTA, so
3020 ** we need to repopulate it now.
3022 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3023 if (adapter->shadow_vfta[i] != 0)
3024 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3025 adapter->shadow_vfta[i]);
3027 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3028 /* Enable the Filter Table if enabled */
3029 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3030 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3031 ctrl |= IXGBE_VLNCTRL_VFE;
3033 if (hw->mac.type == ixgbe_mac_82598EB)
3034 ctrl |= IXGBE_VLNCTRL_VME;
3035 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3039 ixgbe_enable_intr(struct adapter *adapter)
3041 struct ixgbe_hw *hw = &adapter->hw;
3042 struct ix_queue *que = adapter->queues;
3045 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3046 /* Enable Fan Failure detection */
3047 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3048 mask |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3050 switch (adapter->hw.mac.type) {
3051 case ixgbe_mac_82599EB:
3052 mask |= IXGBE_EIMS_ECC;
3053 /* Temperature sensor on some adapters */
3054 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3055 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3056 mask |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3057 mask |= IXGBE_EIMS_GPI_SDP2_BY_MAC(hw);
3059 mask |= IXGBE_EIMS_FLOW_DIR;
3062 case ixgbe_mac_X540:
3063 case ixgbe_mac_X550:
3064 case ixgbe_mac_X550EM_a:
3065 case ixgbe_mac_X550EM_x:
3066 /* Detect if Thermal Sensor is enabled */
3067 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3068 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3069 mask |= IXGBE_EIMS_TS;
3070 /* XXX: Which SFP mode line does this look at? */
3071 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
3072 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3073 mask |= IXGBE_EIMS_ECC;
3075 mask |= IXGBE_EIMS_FLOW_DIR;
3082 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3084 /* With RSS we use auto clear */
3085 if (adapter->msix_mem) {
3086 mask = IXGBE_EIMS_ENABLE_MASK;
3087 /* Don't autoclear Link */
3088 mask &= ~IXGBE_EIMS_OTHER;
3089 mask &= ~IXGBE_EIMS_LSC;
3090 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3094 ** Now enable all queues, this is done separately to
3095 ** allow for handling the extended (beyond 32) MSIX
3096 ** vectors that can be used by 82599
3098 for (int i = 0; i < adapter->num_queues; i++, que++)
3099 ixgbe_enable_queue(adapter, que->msix);
3101 IXGBE_WRITE_FLUSH(hw);
3107 ixgbe_disable_intr(struct adapter *adapter)
3109 if (adapter->msix_mem)
3110 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3111 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3112 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3114 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3115 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3118 IXGBE_WRITE_FLUSH(&adapter->hw);
3123 ** Get the width and transaction speed of
3124 ** the slot this adapter is plugged into.
3127 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3129 device_t dev = ((struct ixgbe_osdep *)hw->back)->dev;
3130 struct ixgbe_mac_info *mac = &hw->mac;
3134 /* For most devices simply call the shared code routine */
3135 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3136 ixgbe_get_bus_info(hw);
3137 /* These devices don't use PCI-E */
3138 if (hw->mac.type == ixgbe_mac_X550EM_x
3139 || hw->mac.type == ixgbe_mac_X550EM_a)
3145 ** For the Quad port adapter we need to parse back
3146 ** up the PCI tree to find the speed of the expansion
3147 ** slot into which this adapter is plugged. A bit more work.
3149 dev = device_get_parent(device_get_parent(dev));
3151 device_printf(dev, "parent pcib = %x,%x,%x\n",
3152 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3154 dev = device_get_parent(device_get_parent(dev));
3156 device_printf(dev, "slot pcib = %x,%x,%x\n",
3157 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3159 /* Now get the PCI Express Capabilities offset */
3160 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3161 /* ...and read the Link Status Register */
3162 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3163 switch (link & IXGBE_PCI_LINK_WIDTH) {
3164 case IXGBE_PCI_LINK_WIDTH_1:
3165 hw->bus.width = ixgbe_bus_width_pcie_x1;
3167 case IXGBE_PCI_LINK_WIDTH_2:
3168 hw->bus.width = ixgbe_bus_width_pcie_x2;
3170 case IXGBE_PCI_LINK_WIDTH_4:
3171 hw->bus.width = ixgbe_bus_width_pcie_x4;
3173 case IXGBE_PCI_LINK_WIDTH_8:
3174 hw->bus.width = ixgbe_bus_width_pcie_x8;
3177 hw->bus.width = ixgbe_bus_width_unknown;
3181 switch (link & IXGBE_PCI_LINK_SPEED) {
3182 case IXGBE_PCI_LINK_SPEED_2500:
3183 hw->bus.speed = ixgbe_bus_speed_2500;
3185 case IXGBE_PCI_LINK_SPEED_5000:
3186 hw->bus.speed = ixgbe_bus_speed_5000;
3188 case IXGBE_PCI_LINK_SPEED_8000:
3189 hw->bus.speed = ixgbe_bus_speed_8000;
3192 hw->bus.speed = ixgbe_bus_speed_unknown;
3196 mac->ops.set_lan_id(hw);
3199 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3200 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3201 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3202 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3203 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3204 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3205 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3208 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3209 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3210 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3211 device_printf(dev, "PCI-Express bandwidth available"
3212 " for this card\n is not sufficient for"
3213 " optimal performance.\n");
3214 device_printf(dev, "For optimal performance a x8 "
3215 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3217 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3218 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3219 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3220 device_printf(dev, "PCI-Express bandwidth available"
3221 " for this card\n is not sufficient for"
3222 " optimal performance.\n");
3223 device_printf(dev, "For optimal performance a x8 "
3224 "PCIE Gen3 slot is required.\n");
3232 ** Setup the correct IVAR register for a particular MSIX interrupt
3233 ** (yes this is all very magic and confusing :)
3234 ** - entry is the register array entry
3235 ** - vector is the MSIX vector for this queue
3236 ** - type is RX/TX/MISC
3239 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3241 struct ixgbe_hw *hw = &adapter->hw;
3244 vector |= IXGBE_IVAR_ALLOC_VAL;
3246 switch (hw->mac.type) {
3248 case ixgbe_mac_82598EB:
3250 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3252 entry += (type * 64);
3253 index = (entry >> 2) & 0x1F;
3254 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3255 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3256 ivar |= (vector << (8 * (entry & 0x3)));
3257 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3260 case ixgbe_mac_82599EB:
3261 case ixgbe_mac_X540:
3262 case ixgbe_mac_X550:
3263 case ixgbe_mac_X550EM_a:
3264 case ixgbe_mac_X550EM_x:
3265 if (type == -1) { /* MISC IVAR */
3266 index = (entry & 1) * 8;
3267 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3268 ivar &= ~(0xFF << index);
3269 ivar |= (vector << index);
3270 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3271 } else { /* RX/TX IVARS */
3272 index = (16 * (entry & 1)) + (8 * type);
3273 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3274 ivar &= ~(0xFF << index);
3275 ivar |= (vector << index);
3276 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3285 ixgbe_configure_ivars(struct adapter *adapter)
3287 struct ix_queue *que = adapter->queues;
3290 if (ixgbe_max_interrupt_rate > 0)
3291 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3295 for (int i = 0; i < adapter->num_queues; i++, que++) {
3296 /* First the RX queue entry */
3297 ixgbe_set_ivar(adapter, i, que->msix, 0);
3298 /* ... and the TX */
3299 ixgbe_set_ivar(adapter, i, que->msix, 1);
3300 /* Set an Initial EITR value */
3301 IXGBE_WRITE_REG(&adapter->hw,
3302 IXGBE_EITR(que->msix), newitr);
3305 /* For the Link interrupt */
3306 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3310 ** ixgbe_sfp_probe - called in the local timer to
3311 ** determine if a port had optics inserted.
3313 static bool ixgbe_sfp_probe(struct adapter *adapter)
3315 struct ixgbe_hw *hw = &adapter->hw;
3316 device_t dev = adapter->dev;
3317 bool result = FALSE;
3319 if ((hw->phy.type == ixgbe_phy_nl) &&
3320 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3321 s32 ret = hw->phy.ops.identify_sfp(hw);
3324 ret = hw->phy.ops.reset(hw);
3325 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3326 device_printf(dev,"Unsupported SFP+ module detected!");
3327 printf(" Reload driver with supported module.\n");
3328 adapter->sfp_probe = FALSE;
3331 device_printf(dev,"SFP+ module detected!\n");
3332 /* We now have supported optics */
3333 adapter->sfp_probe = FALSE;
3334 /* Set the optics type so system reports correctly */
3335 ixgbe_setup_optics(adapter);
3343 ** Tasklet handler for MSIX Link interrupts
3344 ** - do outside interrupt since it might sleep
3347 ixgbe_handle_link(void *context, int pending)
3349 struct adapter *adapter = context;
3351 ixgbe_check_link(&adapter->hw,
3352 &adapter->link_speed, &adapter->link_up, 0);
3353 ixgbe_update_link_status(adapter);
3357 ** Tasklet for handling SFP module interrupts
3360 ixgbe_handle_mod(void *context, int pending)
3362 struct adapter *adapter = context;
3363 struct ixgbe_hw *hw = &adapter->hw;
3364 device_t dev = adapter->dev;
3367 err = hw->phy.ops.identify_sfp(hw);
3368 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3370 "Unsupported SFP+ module type was detected.\n");
3373 err = hw->mac.ops.setup_sfp(hw);
3374 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3376 "Setup failure - unsupported SFP+ module type.\n");
3379 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3385 ** Tasklet for handling MSF (multispeed fiber) interrupts
3388 ixgbe_handle_msf(void *context, int pending)
3390 struct adapter *adapter = context;
3391 struct ixgbe_hw *hw = &adapter->hw;
3396 err = hw->phy.ops.identify_sfp(hw);
3398 ixgbe_setup_optics(adapter);
3399 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3402 autoneg = hw->phy.autoneg_advertised;
3403 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3404 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3405 if (hw->mac.ops.setup_link)
3406 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3408 ifmedia_removeall(&adapter->media);
3409 ixgbe_add_media_types(adapter);
3415 ** Tasklet for reinitializing the Flow Director filter table
3418 ixgbe_reinit_fdir(void *context, int pending)
3420 struct adapter *adapter = context;
3421 struct ifnet *ifp = adapter->ifp;
3423 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3425 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3426 adapter->fdir_reinit = 0;
3427 /* re-enable flow director interrupts */
3428 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3429 /* Restart the interface */
3430 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3435 /**********************************************************************
3437 * Update the board statistics counters.
3439 **********************************************************************/
3441 ixgbe_update_stats_counters(struct adapter *adapter)
3443 struct ixgbe_hw *hw = &adapter->hw;
3444 u32 missed_rx = 0, bprc, lxon, lxoff, total;
3445 u64 total_missed_rx = 0;
3447 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3448 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3449 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3450 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3453 ** Note: these are for the 8 possible traffic classes,
3454 ** which in current implementation is unused,
3455 ** therefore only 0 should read real data.
3457 for (int i = 0; i < 8; i++) {
3459 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3460 /* missed_rx tallies misses for the gprc workaround */
3462 /* global total per queue */
3463 adapter->stats.pf.mpc[i] += mp;
3464 /* total for stats display */
3465 total_missed_rx += adapter->stats.pf.mpc[i];
3466 if (hw->mac.type == ixgbe_mac_82598EB) {
3467 adapter->stats.pf.rnbc[i] +=
3468 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3469 adapter->stats.pf.qbtc[i] +=
3470 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
3471 adapter->stats.pf.qbrc[i] +=
3472 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
3473 adapter->stats.pf.pxonrxc[i] +=
3474 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
3476 adapter->stats.pf.pxonrxc[i] +=
3477 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
3478 adapter->stats.pf.pxontxc[i] +=
3479 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
3480 adapter->stats.pf.pxofftxc[i] +=
3481 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
3482 if (hw->mac.type != ixgbe_mac_X550EM_x)
3483 adapter->stats.pf.pxoffrxc[i] +=
3484 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
3485 adapter->stats.pf.pxon2offc[i] +=
3486 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
3488 for (int i = 0; i < 16; i++) {
3489 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3490 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3491 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3493 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3494 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3495 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3497 /* Hardware workaround, gprc counts missed packets */
3498 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3499 adapter->stats.pf.gprc -= missed_rx;
3501 if (hw->mac.type != ixgbe_mac_82598EB) {
3502 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3503 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3504 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3505 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3506 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3507 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3508 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3509 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3511 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3512 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3513 /* 82598 only has a counter in the high register */
3514 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3515 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3516 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3520 * Workaround: mprc hardware is incorrectly counting
3521 * broadcasts, so for now we subtract those.
3523 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3524 adapter->stats.pf.bprc += bprc;
3525 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3526 if (hw->mac.type == ixgbe_mac_82598EB)
3527 adapter->stats.pf.mprc -= bprc;
3529 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3530 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3531 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3532 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3533 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3534 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3536 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3537 adapter->stats.pf.lxontxc += lxon;
3538 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3539 adapter->stats.pf.lxofftxc += lxoff;
3540 total = lxon + lxoff;
3542 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3543 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3544 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3545 adapter->stats.pf.gptc -= total;
3546 adapter->stats.pf.mptc -= total;
3547 adapter->stats.pf.ptc64 -= total;
3548 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3550 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3551 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3552 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3553 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3554 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3555 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3556 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3557 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3558 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3559 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3560 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3561 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3562 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3563 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3564 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3565 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3566 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3567 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3568 /* Only read FCOE on 82599 */
3569 if (hw->mac.type != ixgbe_mac_82598EB) {
3570 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3571 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3572 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3573 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3574 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3577 /* Fill out the OS statistics structure */
3578 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3579 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3580 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3581 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3582 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3583 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3584 IXGBE_SET_COLLISIONS(adapter, 0);
3585 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3586 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3587 + adapter->stats.pf.rlec);
3590 #if __FreeBSD_version >= 1100036
3592 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3594 struct adapter *adapter;
3596 adapter = if_getsoftc(ifp);
3599 case IFCOUNTER_IPACKETS:
3600 return (adapter->ipackets);
3601 case IFCOUNTER_OPACKETS:
3602 return (adapter->opackets);
3603 case IFCOUNTER_IBYTES:
3604 return (adapter->ibytes);
3605 case IFCOUNTER_OBYTES:
3606 return (adapter->obytes);
3607 case IFCOUNTER_IMCASTS:
3608 return (adapter->imcasts);
3609 case IFCOUNTER_OMCASTS:
3610 return (adapter->omcasts);
3611 case IFCOUNTER_COLLISIONS:
3613 case IFCOUNTER_IQDROPS:
3614 return (adapter->iqdrops);
3615 case IFCOUNTER_IERRORS:
3616 return (adapter->ierrors);
3618 return (if_get_counter_default(ifp, cnt));
3623 /** ixgbe_sysctl_tdh_handler - Handler function
3624 * Retrieves the TDH value from the hardware
3627 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3631 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3634 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3635 error = sysctl_handle_int(oidp, &val, 0, req);
3636 if (error || !req->newptr)
3641 /** ixgbe_sysctl_tdt_handler - Handler function
3642 * Retrieves the TDT value from the hardware
3645 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3649 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3652 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3653 error = sysctl_handle_int(oidp, &val, 0, req);
3654 if (error || !req->newptr)
3659 /** ixgbe_sysctl_rdh_handler - Handler function
3660 * Retrieves the RDH value from the hardware
3663 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3667 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3670 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3671 error = sysctl_handle_int(oidp, &val, 0, req);
3672 if (error || !req->newptr)
3677 /** ixgbe_sysctl_rdt_handler - Handler function
3678 * Retrieves the RDT value from the hardware
3681 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3685 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3688 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3689 error = sysctl_handle_int(oidp, &val, 0, req);
3690 if (error || !req->newptr)
3696 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3699 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3700 unsigned int reg, usec, rate;
3702 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3703 usec = ((reg & 0x0FF8) >> 3);
3705 rate = 500000 / usec;
3708 error = sysctl_handle_int(oidp, &rate, 0, req);
3709 if (error || !req->newptr)
3711 reg &= ~0xfff; /* default, no limitation */
3712 ixgbe_max_interrupt_rate = 0;
3713 if (rate > 0 && rate < 500000) {
3716 ixgbe_max_interrupt_rate = rate;
3717 reg |= ((4000000/rate) & 0xff8 );
3719 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3724 * Add sysctl variables, one per statistic, to the system.
3727 ixgbe_add_hw_stats(struct adapter *adapter)
3729 device_t dev = adapter->dev;
3731 struct tx_ring *txr = adapter->tx_rings;
3732 struct rx_ring *rxr = adapter->rx_rings;
3734 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3735 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3736 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3737 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3739 struct sysctl_oid *stat_node, *queue_node;
3740 struct sysctl_oid_list *stat_list, *queue_list;
3742 #define QUEUE_NAME_LEN 32
3743 char namebuf[QUEUE_NAME_LEN];
3745 /* Driver Statistics */
3746 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3747 CTLFLAG_RD, &adapter->dropped_pkts,
3748 "Driver dropped packets");
3749 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3750 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3751 "m_defrag() failed");
3752 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3753 CTLFLAG_RD, &adapter->watchdog_events,
3754 "Watchdog timeouts");
3755 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
3756 CTLFLAG_RD, &adapter->vector_irq,
3757 "Link MSIX IRQ Handled");
3759 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3760 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3761 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3762 CTLFLAG_RD, NULL, "Queue Name");
3763 queue_list = SYSCTL_CHILDREN(queue_node);
3765 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
3766 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
3767 sizeof(&adapter->queues[i]),
3768 ixgbe_sysctl_interrupt_rate_handler, "IU",
3770 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3771 CTLFLAG_RD, &(adapter->queues[i].irqs),
3772 "irqs on this queue");
3773 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
3774 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
3775 ixgbe_sysctl_tdh_handler, "IU",
3776 "Transmit Descriptor Head");
3777 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
3778 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
3779 ixgbe_sysctl_tdt_handler, "IU",
3780 "Transmit Descriptor Tail");
3781 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
3782 CTLFLAG_RD, &txr->tso_tx,
3784 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
3785 CTLFLAG_RD, &txr->no_tx_dma_setup,
3786 "Driver tx dma failure in xmit");
3787 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3788 CTLFLAG_RD, &txr->no_desc_avail,
3789 "Queue No Descriptor Available");
3790 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3791 CTLFLAG_RD, &txr->total_packets,
3792 "Queue Packets Transmitted");
3795 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3796 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3797 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3798 CTLFLAG_RD, NULL, "Queue Name");
3799 queue_list = SYSCTL_CHILDREN(queue_node);
3801 struct lro_ctrl *lro = &rxr->lro;
3803 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3804 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3805 CTLFLAG_RD, NULL, "Queue Name");
3806 queue_list = SYSCTL_CHILDREN(queue_node);
3808 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
3809 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
3810 ixgbe_sysctl_rdh_handler, "IU",
3811 "Receive Descriptor Head");
3812 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
3813 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
3814 ixgbe_sysctl_rdt_handler, "IU",
3815 "Receive Descriptor Tail");
3816 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3817 CTLFLAG_RD, &rxr->rx_packets,
3818 "Queue Packets Received");
3819 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3820 CTLFLAG_RD, &rxr->rx_bytes,
3821 "Queue Bytes Received");
3822 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
3823 CTLFLAG_RD, &rxr->rx_copies,
3824 "Copied RX Frames");
3825 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
3826 CTLFLAG_RD, &lro->lro_queued, 0,
3828 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
3829 CTLFLAG_RD, &lro->lro_flushed, 0,
3833 /* MAC stats get the own sub node */
3835 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
3836 CTLFLAG_RD, NULL, "MAC Statistics");
3837 stat_list = SYSCTL_CHILDREN(stat_node);
3839 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
3840 CTLFLAG_RD, &stats->crcerrs,
3842 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
3843 CTLFLAG_RD, &stats->illerrc,
3844 "Illegal Byte Errors");
3845 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
3846 CTLFLAG_RD, &stats->errbc,
3848 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
3849 CTLFLAG_RD, &stats->mspdc,
3850 "MAC Short Packets Discarded");
3851 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
3852 CTLFLAG_RD, &stats->mlfc,
3853 "MAC Local Faults");
3854 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
3855 CTLFLAG_RD, &stats->mrfc,
3856 "MAC Remote Faults");
3857 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
3858 CTLFLAG_RD, &stats->rlec,
3859 "Receive Length Errors");
3861 /* Flow Control stats */
3862 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
3863 CTLFLAG_RD, &stats->lxontxc,
3864 "Link XON Transmitted");
3865 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
3866 CTLFLAG_RD, &stats->lxonrxc,
3867 "Link XON Received");
3868 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
3869 CTLFLAG_RD, &stats->lxofftxc,
3870 "Link XOFF Transmitted");
3871 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
3872 CTLFLAG_RD, &stats->lxoffrxc,
3873 "Link XOFF Received");
3875 /* Packet Reception Stats */
3876 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
3877 CTLFLAG_RD, &stats->tor,
3878 "Total Octets Received");
3879 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
3880 CTLFLAG_RD, &stats->gorc,
3881 "Good Octets Received");
3882 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
3883 CTLFLAG_RD, &stats->tpr,
3884 "Total Packets Received");
3885 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
3886 CTLFLAG_RD, &stats->gprc,
3887 "Good Packets Received");
3888 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
3889 CTLFLAG_RD, &stats->mprc,
3890 "Multicast Packets Received");
3891 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
3892 CTLFLAG_RD, &stats->bprc,
3893 "Broadcast Packets Received");
3894 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
3895 CTLFLAG_RD, &stats->prc64,
3896 "64 byte frames received ");
3897 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
3898 CTLFLAG_RD, &stats->prc127,
3899 "65-127 byte frames received");
3900 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
3901 CTLFLAG_RD, &stats->prc255,
3902 "128-255 byte frames received");
3903 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
3904 CTLFLAG_RD, &stats->prc511,
3905 "256-511 byte frames received");
3906 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
3907 CTLFLAG_RD, &stats->prc1023,
3908 "512-1023 byte frames received");
3909 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
3910 CTLFLAG_RD, &stats->prc1522,
3911 "1023-1522 byte frames received");
3912 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
3913 CTLFLAG_RD, &stats->ruc,
3914 "Receive Undersized");
3915 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
3916 CTLFLAG_RD, &stats->rfc,
3917 "Fragmented Packets Received ");
3918 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
3919 CTLFLAG_RD, &stats->roc,
3920 "Oversized Packets Received");
3921 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
3922 CTLFLAG_RD, &stats->rjc,
3924 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
3925 CTLFLAG_RD, &stats->mngprc,
3926 "Management Packets Received");
3927 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
3928 CTLFLAG_RD, &stats->mngptc,
3929 "Management Packets Dropped");
3930 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
3931 CTLFLAG_RD, &stats->xec,
3934 /* Packet Transmission Stats */
3935 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
3936 CTLFLAG_RD, &stats->gotc,
3937 "Good Octets Transmitted");
3938 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
3939 CTLFLAG_RD, &stats->tpt,
3940 "Total Packets Transmitted");
3941 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
3942 CTLFLAG_RD, &stats->gptc,
3943 "Good Packets Transmitted");
3944 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
3945 CTLFLAG_RD, &stats->bptc,
3946 "Broadcast Packets Transmitted");
3947 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
3948 CTLFLAG_RD, &stats->mptc,
3949 "Multicast Packets Transmitted");
3950 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
3951 CTLFLAG_RD, &stats->mngptc,
3952 "Management Packets Transmitted");
3953 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
3954 CTLFLAG_RD, &stats->ptc64,
3955 "64 byte frames transmitted ");
3956 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
3957 CTLFLAG_RD, &stats->ptc127,
3958 "65-127 byte frames transmitted");
3959 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
3960 CTLFLAG_RD, &stats->ptc255,
3961 "128-255 byte frames transmitted");
3962 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
3963 CTLFLAG_RD, &stats->ptc511,
3964 "256-511 byte frames transmitted");
3965 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
3966 CTLFLAG_RD, &stats->ptc1023,
3967 "512-1023 byte frames transmitted");
3968 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
3969 CTLFLAG_RD, &stats->ptc1522,
3970 "1024-1522 byte frames transmitted");
3974 ** Set flow control using sysctl:
3975 ** Flow control values:
3982 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
3985 struct adapter *adapter = (struct adapter *) arg1;
3988 error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
3989 if ((error) || (req->newptr == NULL))
3992 /* Don't bother if it's not changed */
3993 if (adapter->fc == last)
3996 switch (adapter->fc) {
3997 case ixgbe_fc_rx_pause:
3998 case ixgbe_fc_tx_pause:
4000 adapter->hw.fc.requested_mode = adapter->fc;
4001 if (adapter->num_queues > 1)
4002 ixgbe_disable_rx_drop(adapter);
4005 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4006 if (adapter->num_queues > 1)
4007 ixgbe_enable_rx_drop(adapter);
4013 /* Don't autoneg if forcing a value */
4014 adapter->hw.fc.disable_fc_autoneg = TRUE;
4015 ixgbe_fc_enable(&adapter->hw);
4020 ** Control advertised link speed:
4022 ** 0x1 - advertise 100 Mb
4023 ** 0x2 - advertise 1G
4024 ** 0x4 - advertise 10G
4027 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4029 int error = 0, requested;
4030 struct adapter *adapter;
4032 struct ixgbe_hw *hw;
4033 ixgbe_link_speed speed = 0;
4035 adapter = (struct adapter *) arg1;
4039 requested = adapter->advertise;
4040 error = sysctl_handle_int(oidp, &requested, 0, req);
4041 if ((error) || (req->newptr == NULL))
4044 /* Checks to validate new value */
4045 if (adapter->advertise == requested) /* no change */
4048 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4049 (hw->phy.multispeed_fiber))) {
4051 "Advertised speed can only be set on copper or "
4052 "multispeed fiber media types.\n");
4056 if (requested < 0x1 || requested > 0x7) {
4058 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4062 if ((requested & 0x1)
4063 && (hw->mac.type != ixgbe_mac_X540)
4064 && (hw->mac.type != ixgbe_mac_X550)) {
4065 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4069 /* Set new value and report new advertised mode */
4070 if (requested & 0x1)
4071 speed |= IXGBE_LINK_SPEED_100_FULL;
4072 if (requested & 0x2)
4073 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4074 if (requested & 0x4)
4075 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4077 hw->mac.autotry_restart = TRUE;
4078 hw->mac.ops.setup_link(hw, speed, TRUE);
4079 adapter->advertise = requested;
4085 ** Thermal Shutdown Trigger
4086 ** - cause a Thermal Overtemp IRQ
4087 ** - this now requires firmware enabling
4090 ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS)
4092 int error, fire = 0;
4093 struct adapter *adapter = (struct adapter *) arg1;
4094 struct ixgbe_hw *hw = &adapter->hw;
4097 if (hw->mac.type < ixgbe_mac_X540)
4100 error = sysctl_handle_int(oidp, &fire, 0, req);
4101 if ((error) || (req->newptr == NULL))
4105 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4106 reg |= IXGBE_EICR_TS;
4107 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4114 ** Enable the hardware to drop packets when the buffer is
4115 ** full. This is useful when multiqueue,so that no single
4116 ** queue being full stalls the entire RX engine. We only
4117 ** enable this when Multiqueue AND when Flow Control is
4121 ixgbe_enable_rx_drop(struct adapter *adapter)
4123 struct ixgbe_hw *hw = &adapter->hw;
4125 for (int i = 0; i < adapter->num_queues; i++) {
4126 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4127 srrctl |= IXGBE_SRRCTL_DROP_EN;
4128 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4133 ixgbe_disable_rx_drop(struct adapter *adapter)
4135 struct ixgbe_hw *hw = &adapter->hw;
4137 for (int i = 0; i < adapter->num_queues; i++) {
4138 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4139 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4140 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4145 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4149 switch (adapter->hw.mac.type) {
4150 case ixgbe_mac_82598EB:
4151 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4152 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4154 case ixgbe_mac_82599EB:
4155 case ixgbe_mac_X540:
4156 case ixgbe_mac_X550:
4157 mask = (queues & 0xFFFFFFFF);
4158 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4159 mask = (queues >> 32);
4160 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);