1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
45 #include <net/rss_config.h>
46 #include <netinet/in_rss.h>
49 /*********************************************************************
50 * Set this to one to display debug statistics
51 *********************************************************************/
52 int ixgbe_display_debug_stats = 0;
54 /*********************************************************************
56 *********************************************************************/
57 char ixgbe_driver_version[] = "3.1.0";
59 /*********************************************************************
62 * Used by probe to select devices to load on
63 * Last field stores an index into ixgbe_strings
64 * Last entry must be all 0s
66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
67 *********************************************************************/
69 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
101 /* required last entry */
105 /*********************************************************************
106 * Table of branding strings
107 *********************************************************************/
109 static char *ixgbe_strings[] = {
110 "Intel(R) PRO/10GbE PCI-Express Network Driver"
113 /*********************************************************************
114 * Function prototypes
115 *********************************************************************/
116 static int ixgbe_probe(device_t);
117 static int ixgbe_attach(device_t);
118 static int ixgbe_detach(device_t);
119 static int ixgbe_shutdown(device_t);
120 static int ixgbe_suspend(device_t);
121 static int ixgbe_resume(device_t);
122 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
123 static void ixgbe_init(void *);
124 static void ixgbe_init_locked(struct adapter *);
125 static void ixgbe_stop(void *);
126 #if __FreeBSD_version >= 1100036
127 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
129 static void ixgbe_add_media_types(struct adapter *);
130 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
131 static int ixgbe_media_change(struct ifnet *);
132 static void ixgbe_identify_hardware(struct adapter *);
133 static int ixgbe_allocate_pci_resources(struct adapter *);
134 static void ixgbe_get_slot_info(struct ixgbe_hw *);
135 static int ixgbe_allocate_msix(struct adapter *);
136 static int ixgbe_allocate_legacy(struct adapter *);
137 static int ixgbe_setup_msix(struct adapter *);
138 static void ixgbe_free_pci_resources(struct adapter *);
139 static void ixgbe_local_timer(void *);
140 static int ixgbe_setup_interface(device_t, struct adapter *);
141 static void ixgbe_config_gpie(struct adapter *);
142 static void ixgbe_config_dmac(struct adapter *);
143 static void ixgbe_config_delay_values(struct adapter *);
144 static void ixgbe_config_link(struct adapter *);
145 static void ixgbe_check_eee_support(struct adapter *);
146 static void ixgbe_check_wol_support(struct adapter *);
147 static int ixgbe_setup_low_power_mode(struct adapter *);
148 static void ixgbe_rearm_queues(struct adapter *, u64);
150 static void ixgbe_initialize_transmit_units(struct adapter *);
151 static void ixgbe_initialize_receive_units(struct adapter *);
152 static void ixgbe_enable_rx_drop(struct adapter *);
153 static void ixgbe_disable_rx_drop(struct adapter *);
155 static void ixgbe_enable_intr(struct adapter *);
156 static void ixgbe_disable_intr(struct adapter *);
157 static void ixgbe_update_stats_counters(struct adapter *);
158 static void ixgbe_set_promisc(struct adapter *);
159 static void ixgbe_set_multi(struct adapter *);
160 static void ixgbe_update_link_status(struct adapter *);
161 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
162 static void ixgbe_configure_ivars(struct adapter *);
163 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
165 static void ixgbe_setup_vlan_hw_support(struct adapter *);
166 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
167 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
169 static void ixgbe_add_device_sysctls(struct adapter *);
170 static void ixgbe_add_hw_stats(struct adapter *);
172 /* Sysctl handlers */
173 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
174 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
175 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
176 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
177 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
178 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
179 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
180 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
181 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
182 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
183 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
184 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
186 /* Support for pluggable optic modules */
187 static bool ixgbe_sfp_probe(struct adapter *);
188 static void ixgbe_setup_optics(struct adapter *);
190 /* Legacy (single vector interrupt handler */
191 static void ixgbe_legacy_irq(void *);
193 /* The MSI/X Interrupt handlers */
194 static void ixgbe_msix_que(void *);
195 static void ixgbe_msix_link(void *);
197 /* Deferred interrupt tasklets */
198 static void ixgbe_handle_que(void *, int);
199 static void ixgbe_handle_link(void *, int);
200 static void ixgbe_handle_msf(void *, int);
201 static void ixgbe_handle_mod(void *, int);
202 static void ixgbe_handle_phy(void *, int);
205 static void ixgbe_reinit_fdir(void *, int);
209 static void ixgbe_ping_all_vfs(struct adapter *);
210 static void ixgbe_handle_mbx(void *, int);
211 static int ixgbe_init_iov(device_t, u16, const nvlist_t *);
212 static void ixgbe_uninit_iov(device_t);
213 static int ixgbe_add_vf(device_t, u16, const nvlist_t *);
214 static void ixgbe_initialize_iov(struct adapter *);
215 static void ixgbe_recalculate_max_frame(struct adapter *);
216 static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
220 /*********************************************************************
221 * FreeBSD Device Interface Entry Points
222 *********************************************************************/
224 static device_method_t ix_methods[] = {
225 /* Device interface */
226 DEVMETHOD(device_probe, ixgbe_probe),
227 DEVMETHOD(device_attach, ixgbe_attach),
228 DEVMETHOD(device_detach, ixgbe_detach),
229 DEVMETHOD(device_shutdown, ixgbe_shutdown),
230 DEVMETHOD(device_suspend, ixgbe_suspend),
231 DEVMETHOD(device_resume, ixgbe_resume),
233 DEVMETHOD(pci_iov_init, ixgbe_init_iov),
234 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
235 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
240 static driver_t ix_driver = {
241 "ix", ix_methods, sizeof(struct adapter),
244 devclass_t ix_devclass;
245 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
247 MODULE_DEPEND(ix, pci, 1, 1, 1);
248 MODULE_DEPEND(ix, ether, 1, 1, 1);
250 MODULE_DEPEND(ix, netmap, 1, 1, 1);
251 #endif /* DEV_NETMAP */
254 ** TUNEABLE PARAMETERS:
257 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
258 "IXGBE driver parameters");
261 ** AIM: Adaptive Interrupt Moderation
262 ** which means that the interrupt rate
263 ** is varied over time based on the
264 ** traffic for that interrupt vector
266 static int ixgbe_enable_aim = TRUE;
267 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
268 "Enable adaptive interrupt moderation");
270 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
271 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
272 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
274 /* How many packets rxeof tries to clean at a time */
275 static int ixgbe_rx_process_limit = 256;
276 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
277 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
278 &ixgbe_rx_process_limit, 0,
279 "Maximum number of received packets to process at a time,"
280 "-1 means unlimited");
282 /* How many packets txeof tries to clean at a time */
283 static int ixgbe_tx_process_limit = 256;
284 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
285 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
286 &ixgbe_tx_process_limit, 0,
287 "Maximum number of sent packets to process at a time,"
288 "-1 means unlimited");
291 ** Smart speed setting, default to on
292 ** this only works as a compile option
293 ** right now as its during attach, set
294 ** this to 'ixgbe_smart_speed_off' to
297 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
300 * MSIX should be the default for best performance,
301 * but this allows it to be forced off for testing.
303 static int ixgbe_enable_msix = 1;
304 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
305 "Enable MSI-X interrupts");
308 * Number of Queues, can be set to 0,
309 * it then autoconfigures based on the
310 * number of cpus with a max of 8. This
311 * can be overriden manually here.
313 static int ixgbe_num_queues = 0;
314 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
315 "Number of queues to configure, 0 indicates autoconfigure");
318 ** Number of TX descriptors per ring,
319 ** setting higher than RX as this seems
320 ** the better performing choice.
322 static int ixgbe_txd = PERFORM_TXD;
323 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
324 "Number of transmit descriptors per queue");
326 /* Number of RX descriptors per ring */
327 static int ixgbe_rxd = PERFORM_RXD;
328 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
329 "Number of receive descriptors per queue");
332 ** Defining this on will allow the use
333 ** of unsupported SFP+ modules, note that
334 ** doing so you are on your own :)
336 static int allow_unsupported_sfp = FALSE;
337 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
339 /* Keep running tab on them for sanity check */
340 static int ixgbe_total_ports;
344 ** Flow Director actually 'steals'
345 ** part of the packet buffer as its
346 ** filter pool, this variable controls
348 ** 0 = 64K, 1 = 128K, 2 = 256K
350 static int fdir_pballoc = 1;
355 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
356 * be a reference on how to implement netmap support in a driver.
357 * Additional comments are in ixgbe_netmap.h .
359 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
360 * that extend the standard driver.
362 #include <dev/netmap/ixgbe_netmap.h>
363 #endif /* DEV_NETMAP */
365 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
367 /*********************************************************************
368 * Device identification routine
370 * ixgbe_probe determines if the driver should be loaded on
371 * adapter based on PCI vendor/device id of the adapter.
373 * return BUS_PROBE_DEFAULT on success, positive on failure
374 *********************************************************************/
377 ixgbe_probe(device_t dev)
379 ixgbe_vendor_info_t *ent;
381 u16 pci_vendor_id = 0;
382 u16 pci_device_id = 0;
383 u16 pci_subvendor_id = 0;
384 u16 pci_subdevice_id = 0;
385 char adapter_name[256];
387 INIT_DEBUGOUT("ixgbe_probe: begin");
389 pci_vendor_id = pci_get_vendor(dev);
390 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
393 pci_device_id = pci_get_device(dev);
394 pci_subvendor_id = pci_get_subvendor(dev);
395 pci_subdevice_id = pci_get_subdevice(dev);
397 ent = ixgbe_vendor_info_array;
398 while (ent->vendor_id != 0) {
399 if ((pci_vendor_id == ent->vendor_id) &&
400 (pci_device_id == ent->device_id) &&
402 ((pci_subvendor_id == ent->subvendor_id) ||
403 (ent->subvendor_id == 0)) &&
405 ((pci_subdevice_id == ent->subdevice_id) ||
406 (ent->subdevice_id == 0))) {
407 sprintf(adapter_name, "%s, Version - %s",
408 ixgbe_strings[ent->index],
409 ixgbe_driver_version);
410 device_set_desc_copy(dev, adapter_name);
412 return (BUS_PROBE_DEFAULT);
419 /*********************************************************************
420 * Device initialization routine
422 * The attach entry point is called when the driver is being loaded.
423 * This routine identifies the type of hardware, allocates all resources
424 * and initializes the hardware.
426 * return 0 on success, positive on failure
427 *********************************************************************/
430 ixgbe_attach(device_t dev)
432 struct adapter *adapter;
438 INIT_DEBUGOUT("ixgbe_attach: begin");
440 /* Allocate, clear, and link in our adapter structure */
441 adapter = device_get_softc(dev);
442 adapter->dev = adapter->osdep.dev = dev;
446 adapter->init_locked = ixgbe_init_locked;
447 adapter->stop_locked = ixgbe_stop;
451 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
453 /* Set up the timer callout */
454 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
456 /* Determine hardware revision */
457 ixgbe_identify_hardware(adapter);
459 /* Do base PCI setup - map BAR0 */
460 if (ixgbe_allocate_pci_resources(adapter)) {
461 device_printf(dev, "Allocation of PCI resources failed\n");
466 /* Do descriptor calc and sanity checks */
467 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
468 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
469 device_printf(dev, "TXD config issue, using default!\n");
470 adapter->num_tx_desc = DEFAULT_TXD;
472 adapter->num_tx_desc = ixgbe_txd;
475 ** With many RX rings it is easy to exceed the
476 ** system mbuf allocation. Tuning nmbclusters
477 ** can alleviate this.
479 if (nmbclusters > 0) {
481 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
482 if (s > nmbclusters) {
483 device_printf(dev, "RX Descriptors exceed "
484 "system mbuf max, using default instead!\n");
485 ixgbe_rxd = DEFAULT_RXD;
489 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
490 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
491 device_printf(dev, "RXD config issue, using default!\n");
492 adapter->num_rx_desc = DEFAULT_RXD;
494 adapter->num_rx_desc = ixgbe_rxd;
496 /* Allocate our TX/RX Queues */
497 if (ixgbe_allocate_queues(adapter)) {
502 /* Allocate multicast array memory. */
503 adapter->mta = malloc(sizeof(*adapter->mta) *
504 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
505 if (adapter->mta == NULL) {
506 device_printf(dev, "Can not allocate multicast setup array\n");
511 /* Initialize the shared code */
512 hw->allow_unsupported_sfp = allow_unsupported_sfp;
513 error = ixgbe_init_shared_code(hw);
514 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
516 ** No optics in this port, set up
517 ** so the timer routine will probe
518 ** for later insertion.
520 adapter->sfp_probe = TRUE;
522 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
523 device_printf(dev,"Unsupported SFP+ module detected!\n");
527 device_printf(dev,"Unable to initialize the shared code\n");
532 /* Make sure we have a good EEPROM before we read from it */
533 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
534 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
539 error = ixgbe_init_hw(hw);
541 case IXGBE_ERR_EEPROM_VERSION:
542 device_printf(dev, "This device is a pre-production adapter/"
543 "LOM. Please be aware there may be issues associated "
544 "with your hardware.\n If you are experiencing problems "
545 "please contact your Intel or hardware representative "
546 "who provided you with this hardware.\n");
548 case IXGBE_ERR_SFP_NOT_SUPPORTED:
549 device_printf(dev,"Unsupported SFP+ Module\n");
552 case IXGBE_ERR_SFP_NOT_PRESENT:
553 device_printf(dev,"No SFP+ Module found\n");
559 /* Detect and set physical type */
560 ixgbe_setup_optics(adapter);
562 if ((adapter->msix > 1) && (ixgbe_enable_msix))
563 error = ixgbe_allocate_msix(adapter);
565 error = ixgbe_allocate_legacy(adapter);
569 /* Setup OS specific network interface */
570 if (ixgbe_setup_interface(dev, adapter) != 0)
573 /* Initialize statistics */
574 ixgbe_update_stats_counters(adapter);
576 /* Register for VLAN events */
577 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
578 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
579 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
580 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
582 /* Check PCIE slot type/speed/width */
583 ixgbe_get_slot_info(hw);
586 /* Set an initial default flow control value */
587 adapter->fc = ixgbe_fc_full;
590 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
591 nvlist_t *pf_schema, *vf_schema;
593 hw->mbx.ops.init_params(hw);
594 pf_schema = pci_iov_schema_alloc_node();
595 vf_schema = pci_iov_schema_alloc_node();
596 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
597 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
598 IOV_SCHEMA_HASDEFAULT, TRUE);
599 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
600 IOV_SCHEMA_HASDEFAULT, FALSE);
601 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
602 IOV_SCHEMA_HASDEFAULT, FALSE);
603 error = pci_iov_attach(dev, pf_schema, vf_schema);
606 "Error %d setting up SR-IOV\n", error);
611 /* Check for certain supported features */
612 ixgbe_check_wol_support(adapter);
613 ixgbe_check_eee_support(adapter);
616 ixgbe_add_device_sysctls(adapter);
617 ixgbe_add_hw_stats(adapter);
619 /* let hardware know driver is loaded */
620 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
621 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
622 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
625 ixgbe_netmap_attach(adapter);
626 #endif /* DEV_NETMAP */
627 INIT_DEBUGOUT("ixgbe_attach: end");
631 ixgbe_free_transmit_structures(adapter);
632 ixgbe_free_receive_structures(adapter);
634 if (adapter->ifp != NULL)
635 if_free(adapter->ifp);
636 ixgbe_free_pci_resources(adapter);
637 free(adapter->mta, M_DEVBUF);
641 /*********************************************************************
642 * Device removal routine
644 * The detach entry point is called when the driver is being removed.
645 * This routine stops the adapter and deallocates all the resources
646 * that were allocated for driver operation.
648 * return 0 on success, positive on failure
649 *********************************************************************/
652 ixgbe_detach(device_t dev)
654 struct adapter *adapter = device_get_softc(dev);
655 struct ix_queue *que = adapter->queues;
656 struct tx_ring *txr = adapter->tx_rings;
659 INIT_DEBUGOUT("ixgbe_detach: begin");
661 /* Make sure VLANS are not using driver */
662 if (adapter->ifp->if_vlantrunk != NULL) {
663 device_printf(dev,"Vlan in use, detach first\n");
668 if (pci_iov_detach(dev) != 0) {
669 device_printf(dev, "SR-IOV in use; detach first.\n");
674 /* Stop the adapter */
675 IXGBE_CORE_LOCK(adapter);
676 ixgbe_setup_low_power_mode(adapter);
677 IXGBE_CORE_UNLOCK(adapter);
679 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
681 #ifndef IXGBE_LEGACY_TX
682 taskqueue_drain(que->tq, &txr->txq_task);
684 taskqueue_drain(que->tq, &que->que_task);
685 taskqueue_free(que->tq);
689 /* Drain the Link queue */
691 taskqueue_drain(adapter->tq, &adapter->link_task);
692 taskqueue_drain(adapter->tq, &adapter->mod_task);
693 taskqueue_drain(adapter->tq, &adapter->msf_task);
695 taskqueue_drain(adapter->tq, &adapter->mbx_task);
697 taskqueue_drain(adapter->tq, &adapter->phy_task);
699 taskqueue_drain(adapter->tq, &adapter->fdir_task);
701 taskqueue_free(adapter->tq);
704 /* let hardware know driver is unloading */
705 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
706 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
707 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
709 /* Unregister VLAN events */
710 if (adapter->vlan_attach != NULL)
711 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
712 if (adapter->vlan_detach != NULL)
713 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
715 ether_ifdetach(adapter->ifp);
716 callout_drain(&adapter->timer);
718 netmap_detach(adapter->ifp);
719 #endif /* DEV_NETMAP */
720 ixgbe_free_pci_resources(adapter);
721 bus_generic_detach(dev);
722 if_free(adapter->ifp);
724 ixgbe_free_transmit_structures(adapter);
725 ixgbe_free_receive_structures(adapter);
726 free(adapter->mta, M_DEVBUF);
728 IXGBE_CORE_LOCK_DESTROY(adapter);
732 /*********************************************************************
734 * Shutdown entry point
736 **********************************************************************/
739 ixgbe_shutdown(device_t dev)
741 struct adapter *adapter = device_get_softc(dev);
744 INIT_DEBUGOUT("ixgbe_shutdown: begin");
746 IXGBE_CORE_LOCK(adapter);
747 error = ixgbe_setup_low_power_mode(adapter);
748 IXGBE_CORE_UNLOCK(adapter);
754 * Methods for going from:
755 * D0 -> D3: ixgbe_suspend
756 * D3 -> D0: ixgbe_resume
759 ixgbe_suspend(device_t dev)
761 struct adapter *adapter = device_get_softc(dev);
764 INIT_DEBUGOUT("ixgbe_suspend: begin");
766 IXGBE_CORE_LOCK(adapter);
768 error = ixgbe_setup_low_power_mode(adapter);
770 /* Save state and power down */
772 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
774 IXGBE_CORE_UNLOCK(adapter);
780 ixgbe_resume(device_t dev)
782 struct adapter *adapter = device_get_softc(dev);
783 struct ifnet *ifp = adapter->ifp;
784 struct ixgbe_hw *hw = &adapter->hw;
787 INIT_DEBUGOUT("ixgbe_resume: begin");
789 IXGBE_CORE_LOCK(adapter);
791 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
792 pci_restore_state(dev);
794 /* Read & clear WUS register */
795 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
797 device_printf(dev, "Woken up by (WUS): %#010x\n",
798 IXGBE_READ_REG(hw, IXGBE_WUS));
799 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
800 /* And clear WUFC until next low-power transition */
801 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
804 * Required after D3->D0 transition;
805 * will re-advertise all previous advertised speeds
807 if (ifp->if_flags & IFF_UP)
808 ixgbe_init_locked(adapter);
810 IXGBE_CORE_UNLOCK(adapter);
812 INIT_DEBUGOUT("ixgbe_resume: end");
817 /*********************************************************************
820 * ixgbe_ioctl is called when the user wants to configure the
823 * return 0 on success, positive on failure
824 **********************************************************************/
827 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
829 struct adapter *adapter = ifp->if_softc;
830 struct ifreq *ifr = (struct ifreq *) data;
831 #if defined(INET) || defined(INET6)
832 struct ifaddr *ifa = (struct ifaddr *)data;
833 bool avoid_reset = FALSE;
841 if (ifa->ifa_addr->sa_family == AF_INET)
845 if (ifa->ifa_addr->sa_family == AF_INET6)
848 #if defined(INET) || defined(INET6)
850 ** Calling init results in link renegotiation,
851 ** so we avoid doing it when possible.
854 ifp->if_flags |= IFF_UP;
855 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
857 if (!(ifp->if_flags & IFF_NOARP))
858 arp_ifinit(ifp, ifa);
860 error = ether_ioctl(ifp, command, data);
864 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
865 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
868 IXGBE_CORE_LOCK(adapter);
869 ifp->if_mtu = ifr->ifr_mtu;
870 adapter->max_frame_size =
871 ifp->if_mtu + IXGBE_MTU_HDR;
872 ixgbe_init_locked(adapter);
874 ixgbe_recalculate_max_frame(adapter);
876 IXGBE_CORE_UNLOCK(adapter);
880 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
881 IXGBE_CORE_LOCK(adapter);
882 if (ifp->if_flags & IFF_UP) {
883 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
884 if ((ifp->if_flags ^ adapter->if_flags) &
885 (IFF_PROMISC | IFF_ALLMULTI)) {
886 ixgbe_set_promisc(adapter);
889 ixgbe_init_locked(adapter);
891 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
893 adapter->if_flags = ifp->if_flags;
894 IXGBE_CORE_UNLOCK(adapter);
898 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
899 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
900 IXGBE_CORE_LOCK(adapter);
901 ixgbe_disable_intr(adapter);
902 ixgbe_set_multi(adapter);
903 ixgbe_enable_intr(adapter);
904 IXGBE_CORE_UNLOCK(adapter);
909 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
910 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
914 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
915 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
916 if (mask & IFCAP_HWCSUM)
917 ifp->if_capenable ^= IFCAP_HWCSUM;
918 if (mask & IFCAP_TSO4)
919 ifp->if_capenable ^= IFCAP_TSO4;
920 if (mask & IFCAP_TSO6)
921 ifp->if_capenable ^= IFCAP_TSO6;
922 if (mask & IFCAP_LRO)
923 ifp->if_capenable ^= IFCAP_LRO;
924 if (mask & IFCAP_VLAN_HWTAGGING)
925 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
926 if (mask & IFCAP_VLAN_HWFILTER)
927 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
928 if (mask & IFCAP_VLAN_HWTSO)
929 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
930 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
931 IXGBE_CORE_LOCK(adapter);
932 ixgbe_init_locked(adapter);
933 IXGBE_CORE_UNLOCK(adapter);
935 VLAN_CAPABILITIES(ifp);
938 #if __FreeBSD_version >= 1100036
941 struct ixgbe_hw *hw = &adapter->hw;
944 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
945 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
948 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
952 if (i2c.len > sizeof(i2c.data)) {
957 for (i = 0; i < i2c.len; i++)
958 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
959 i2c.dev_addr, &i2c.data[i]);
960 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
965 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
966 error = ether_ioctl(ifp, command, data);
973 /*********************************************************************
976 * This routine is used in two ways. It is used by the stack as
977 * init entry point in network interface structure. It is also used
978 * by the driver as a hw/sw initialization routine to get to a
981 * return 0 on success, positive on failure
982 **********************************************************************/
983 #define IXGBE_MHADD_MFS_SHIFT 16
986 ixgbe_init_locked(struct adapter *adapter)
988 struct ifnet *ifp = adapter->ifp;
989 device_t dev = adapter->dev;
990 struct ixgbe_hw *hw = &adapter->hw;
996 enum ixgbe_iov_mode mode;
999 mtx_assert(&adapter->core_mtx, MA_OWNED);
1000 INIT_DEBUGOUT("ixgbe_init_locked: begin");
1002 hw->adapter_stopped = FALSE;
1003 ixgbe_stop_adapter(hw);
1004 callout_stop(&adapter->timer);
1007 mode = ixgbe_get_iov_mode(adapter);
1008 adapter->pool = ixgbe_max_vfs(mode);
1009 /* Queue indices may change with IOV mode */
1010 for (int i = 0; i < adapter->num_queues; i++) {
1011 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
1012 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
1015 /* reprogram the RAR[0] in case user changed it. */
1016 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1018 /* Get the latest mac address, User can use a LAA */
1019 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1020 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1021 hw->addr_ctrl.rar_used_count = 1;
1023 /* Set the various hardware offload abilities */
1024 ifp->if_hwassist = 0;
1025 if (ifp->if_capenable & IFCAP_TSO)
1026 ifp->if_hwassist |= CSUM_TSO;
1027 if (ifp->if_capenable & IFCAP_TXCSUM) {
1028 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1029 #if __FreeBSD_version >= 800000
1030 if (hw->mac.type != ixgbe_mac_82598EB)
1031 ifp->if_hwassist |= CSUM_SCTP;
1035 /* Prepare transmit descriptors and buffers */
1036 if (ixgbe_setup_transmit_structures(adapter)) {
1037 device_printf(dev, "Could not setup transmit structures\n");
1038 ixgbe_stop(adapter);
1044 ixgbe_initialize_iov(adapter);
1046 ixgbe_initialize_transmit_units(adapter);
1048 /* Setup Multicast table */
1049 ixgbe_set_multi(adapter);
1052 ** Determine the correct mbuf pool
1053 ** for doing jumbo frames
1055 if (adapter->max_frame_size <= MCLBYTES)
1056 adapter->rx_mbuf_sz = MCLBYTES;
1058 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1060 /* Prepare receive descriptors and buffers */
1061 if (ixgbe_setup_receive_structures(adapter)) {
1062 device_printf(dev, "Could not setup receive structures\n");
1063 ixgbe_stop(adapter);
1067 /* Configure RX settings */
1068 ixgbe_initialize_receive_units(adapter);
1070 /* Enable SDP & MSIX interrupts based on adapter */
1071 ixgbe_config_gpie(adapter);
1074 if (ifp->if_mtu > ETHERMTU) {
1075 /* aka IXGBE_MAXFRS on 82599 and newer */
1076 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1077 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1078 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1079 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1082 /* Now enable all the queues */
1083 for (int i = 0; i < adapter->num_queues; i++) {
1084 txr = &adapter->tx_rings[i];
1085 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1086 txdctl |= IXGBE_TXDCTL_ENABLE;
1087 /* Set WTHRESH to 8, burst writeback */
1088 txdctl |= (8 << 16);
1090 * When the internal queue falls below PTHRESH (32),
1091 * start prefetching as long as there are at least
1092 * HTHRESH (1) buffers ready. The values are taken
1093 * from the Intel linux driver 3.8.21.
1094 * Prefetching enables tx line rate even with 1 queue.
1096 txdctl |= (32 << 0) | (1 << 8);
1097 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1100 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1101 rxr = &adapter->rx_rings[i];
1102 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1103 if (hw->mac.type == ixgbe_mac_82598EB) {
1109 rxdctl &= ~0x3FFFFF;
1112 rxdctl |= IXGBE_RXDCTL_ENABLE;
1113 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1114 for (; j < 10; j++) {
1115 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1116 IXGBE_RXDCTL_ENABLE)
1124 * In netmap mode, we must preserve the buffers made
1125 * available to userspace before the if_init()
1126 * (this is true by default on the TX side, because
1127 * init makes all buffers available to userspace).
1129 * netmap_reset() and the device specific routines
1130 * (e.g. ixgbe_setup_receive_rings()) map these
1131 * buffers at the end of the NIC ring, so here we
1132 * must set the RDT (tail) register to make sure
1133 * they are not overwritten.
1135 * In this driver the NIC ring starts at RDH = 0,
1136 * RDT points to the last slot available for reception (?),
1137 * so RDT = num_rx_desc - 1 means the whole ring is available.
1139 if (ifp->if_capenable & IFCAP_NETMAP) {
1140 struct netmap_adapter *na = NA(adapter->ifp);
1141 struct netmap_kring *kring = &na->rx_rings[i];
1142 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1144 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1146 #endif /* DEV_NETMAP */
1147 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
1150 /* Enable Receive engine */
1151 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1152 if (hw->mac.type == ixgbe_mac_82598EB)
1153 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1154 rxctrl |= IXGBE_RXCTRL_RXEN;
1155 ixgbe_enable_rx_dma(hw, rxctrl);
1157 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1159 /* Set up MSI/X routing */
1160 if (ixgbe_enable_msix) {
1161 ixgbe_configure_ivars(adapter);
1162 /* Set up auto-mask */
1163 if (hw->mac.type == ixgbe_mac_82598EB)
1164 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1166 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1167 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1169 } else { /* Simple settings for Legacy/MSI */
1170 ixgbe_set_ivar(adapter, 0, 0, 0);
1171 ixgbe_set_ivar(adapter, 0, 0, 1);
1172 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1176 /* Init Flow director */
1177 if (hw->mac.type != ixgbe_mac_82598EB) {
1178 u32 hdrm = 32 << fdir_pballoc;
1180 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1181 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1186 * Check on any SFP devices that
1187 * need to be kick-started
1189 if (hw->phy.type == ixgbe_phy_none) {
1190 int err = hw->phy.ops.identify(hw);
1191 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1193 "Unsupported SFP+ module type was detected.\n");
1198 /* Set moderation on the Link interrupt */
1199 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1201 /* Configure Energy Efficient Ethernet for supported devices */
1202 ixgbe_setup_eee(hw, adapter->eee_enabled);
1204 /* Config/Enable Link */
1205 ixgbe_config_link(adapter);
1207 /* Hardware Packet Buffer & Flow Control setup */
1208 ixgbe_config_delay_values(adapter);
1210 /* Initialize the FC settings */
1213 /* Set up VLAN support and filter */
1214 ixgbe_setup_vlan_hw_support(adapter);
1216 /* Setup DMA Coalescing */
1217 ixgbe_config_dmac(adapter);
1219 /* And now turn on interrupts */
1220 ixgbe_enable_intr(adapter);
1223 /* Enable the use of the MBX by the VF's */
1225 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1226 reg |= IXGBE_CTRL_EXT_PFRSTD;
1227 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
1231 /* Now inform the stack we're ready */
1232 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1238 ixgbe_init(void *arg)
1240 struct adapter *adapter = arg;
1242 IXGBE_CORE_LOCK(adapter);
1243 ixgbe_init_locked(adapter);
1244 IXGBE_CORE_UNLOCK(adapter);
1249 ixgbe_config_gpie(struct adapter *adapter)
1251 struct ixgbe_hw *hw = &adapter->hw;
1254 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1256 /* Fan Failure Interrupt */
1257 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1258 gpie |= IXGBE_SDP1_GPIEN;
1261 * Module detection (SDP2)
1262 * Media ready (SDP1)
1264 if (hw->mac.type == ixgbe_mac_82599EB) {
1265 gpie |= IXGBE_SDP2_GPIEN;
1266 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
1267 gpie |= IXGBE_SDP1_GPIEN;
1271 * Thermal Failure Detection (X540)
1272 * Link Detection (X557)
1274 if (hw->mac.type == ixgbe_mac_X540 ||
1275 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1276 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1277 gpie |= IXGBE_SDP0_GPIEN_X540;
1279 if (adapter->msix > 1) {
1280 /* Enable Enhanced MSIX mode */
1281 gpie |= IXGBE_GPIE_MSIX_MODE;
1282 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1286 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1291 * Requires adapter->max_frame_size to be set.
1294 ixgbe_config_delay_values(struct adapter *adapter)
1296 struct ixgbe_hw *hw = &adapter->hw;
1297 u32 rxpb, frame, size, tmp;
1299 frame = adapter->max_frame_size;
1301 /* Calculate High Water */
1302 switch (hw->mac.type) {
1303 case ixgbe_mac_X540:
1304 case ixgbe_mac_X550:
1305 case ixgbe_mac_X550EM_x:
1306 tmp = IXGBE_DV_X540(frame, frame);
1309 tmp = IXGBE_DV(frame, frame);
1312 size = IXGBE_BT2KB(tmp);
1313 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1314 hw->fc.high_water[0] = rxpb - size;
1316 /* Now calculate Low Water */
1317 switch (hw->mac.type) {
1318 case ixgbe_mac_X540:
1319 case ixgbe_mac_X550:
1320 case ixgbe_mac_X550EM_x:
1321 tmp = IXGBE_LOW_DV_X540(frame);
1324 tmp = IXGBE_LOW_DV(frame);
1327 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1329 hw->fc.requested_mode = adapter->fc;
1330 hw->fc.pause_time = IXGBE_FC_PAUSE;
1331 hw->fc.send_xon = TRUE;
1336 ** MSIX Interrupt Handlers and Tasklets
1341 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1343 struct ixgbe_hw *hw = &adapter->hw;
1344 u64 queue = (u64)(1 << vector);
1347 if (hw->mac.type == ixgbe_mac_82598EB) {
1348 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1349 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1351 mask = (queue & 0xFFFFFFFF);
1353 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1354 mask = (queue >> 32);
1356 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1361 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1363 struct ixgbe_hw *hw = &adapter->hw;
1364 u64 queue = (u64)(1 << vector);
1367 if (hw->mac.type == ixgbe_mac_82598EB) {
1368 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1369 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1371 mask = (queue & 0xFFFFFFFF);
1373 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1374 mask = (queue >> 32);
1376 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1381 ixgbe_handle_que(void *context, int pending)
1383 struct ix_queue *que = context;
1384 struct adapter *adapter = que->adapter;
1385 struct tx_ring *txr = que->txr;
1386 struct ifnet *ifp = adapter->ifp;
1388 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1392 #ifndef IXGBE_LEGACY_TX
1393 if (!drbr_empty(ifp, txr->br))
1394 ixgbe_mq_start_locked(ifp, txr);
1396 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1397 ixgbe_start_locked(txr, ifp);
1399 IXGBE_TX_UNLOCK(txr);
1402 /* Reenable this interrupt */
1403 if (que->res != NULL)
1404 ixgbe_enable_queue(adapter, que->msix);
1406 ixgbe_enable_intr(adapter);
1411 /*********************************************************************
1413 * Legacy Interrupt Service routine
1415 **********************************************************************/
1418 ixgbe_legacy_irq(void *arg)
1420 struct ix_queue *que = arg;
1421 struct adapter *adapter = que->adapter;
1422 struct ixgbe_hw *hw = &adapter->hw;
1423 struct ifnet *ifp = adapter->ifp;
1424 struct tx_ring *txr = adapter->tx_rings;
1429 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1432 if (reg_eicr == 0) {
1433 ixgbe_enable_intr(adapter);
1437 more = ixgbe_rxeof(que);
1441 #ifdef IXGBE_LEGACY_TX
1442 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1443 ixgbe_start_locked(txr, ifp);
1445 if (!drbr_empty(ifp, txr->br))
1446 ixgbe_mq_start_locked(ifp, txr);
1448 IXGBE_TX_UNLOCK(txr);
1450 /* Check for fan failure */
1451 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1452 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1453 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1454 "REPLACE IMMEDIATELY!!\n");
1455 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1458 /* Link status change */
1459 if (reg_eicr & IXGBE_EICR_LSC)
1460 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1462 /* External PHY interrupt */
1463 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1464 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1465 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1468 taskqueue_enqueue(que->tq, &que->que_task);
1470 ixgbe_enable_intr(adapter);
1475 /*********************************************************************
1477 * MSIX Queue Interrupt Service routine
1479 **********************************************************************/
1481 ixgbe_msix_que(void *arg)
1483 struct ix_queue *que = arg;
1484 struct adapter *adapter = que->adapter;
1485 struct ifnet *ifp = adapter->ifp;
1486 struct tx_ring *txr = que->txr;
1487 struct rx_ring *rxr = que->rxr;
1492 /* Protect against spurious interrupts */
1493 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1496 ixgbe_disable_queue(adapter, que->msix);
1499 more = ixgbe_rxeof(que);
1503 #ifdef IXGBE_LEGACY_TX
1504 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1505 ixgbe_start_locked(txr, ifp);
1507 if (!drbr_empty(ifp, txr->br))
1508 ixgbe_mq_start_locked(ifp, txr);
1510 IXGBE_TX_UNLOCK(txr);
1514 if (ixgbe_enable_aim == FALSE)
1517 ** Do Adaptive Interrupt Moderation:
1518 ** - Write out last calculated setting
1519 ** - Calculate based on average size over
1520 ** the last interval.
1522 if (que->eitr_setting)
1523 IXGBE_WRITE_REG(&adapter->hw,
1524 IXGBE_EITR(que->msix), que->eitr_setting);
1526 que->eitr_setting = 0;
1528 /* Idle, do nothing */
1529 if ((txr->bytes == 0) && (rxr->bytes == 0))
1532 if ((txr->bytes) && (txr->packets))
1533 newitr = txr->bytes/txr->packets;
1534 if ((rxr->bytes) && (rxr->packets))
1535 newitr = max(newitr,
1536 (rxr->bytes / rxr->packets));
1537 newitr += 24; /* account for hardware frame, crc */
1539 /* set an upper boundary */
1540 newitr = min(newitr, 3000);
1542 /* Be nice to the mid range */
1543 if ((newitr > 300) && (newitr < 1200))
1544 newitr = (newitr / 3);
1546 newitr = (newitr / 2);
1548 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1549 newitr |= newitr << 16;
1551 newitr |= IXGBE_EITR_CNT_WDIS;
1553 /* save for next interrupt */
1554 que->eitr_setting = newitr;
1564 taskqueue_enqueue(que->tq, &que->que_task);
1566 ixgbe_enable_queue(adapter, que->msix);
1572 ixgbe_msix_link(void *arg)
1574 struct adapter *adapter = arg;
1575 struct ixgbe_hw *hw = &adapter->hw;
1576 u32 reg_eicr, mod_mask;
1578 ++adapter->link_irq;
1580 /* First get the cause */
1581 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1582 /* Be sure the queue bits are not cleared */
1583 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1584 /* Clear interrupt with write */
1585 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1587 /* Link status change */
1588 if (reg_eicr & IXGBE_EICR_LSC)
1589 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1591 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1593 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1594 /* This is probably overkill :) */
1595 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1597 /* Disable the interrupt */
1598 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1599 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1602 if (reg_eicr & IXGBE_EICR_ECC) {
1603 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1604 "Please Reboot!!\n");
1605 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1608 /* Check for over temp condition */
1609 if (reg_eicr & IXGBE_EICR_TS) {
1610 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1611 "PHY IS SHUT DOWN!!\n");
1612 device_printf(adapter->dev, "System shutdown required!\n");
1613 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1616 if (reg_eicr & IXGBE_EICR_MAILBOX)
1617 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1621 /* Pluggable optics-related interrupt */
1622 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1623 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1625 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1627 if (ixgbe_is_sfp(hw)) {
1628 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1629 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1630 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1631 } else if (reg_eicr & mod_mask) {
1632 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1633 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1637 /* Check for fan failure */
1638 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1639 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1640 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1641 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1642 "REPLACE IMMEDIATELY!!\n");
1645 /* External PHY interrupt */
1646 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1647 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1648 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1649 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1652 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1656 /*********************************************************************
1658 * Media Ioctl callback
1660 * This routine is called whenever the user queries the status of
1661 * the interface using ifconfig.
1663 **********************************************************************/
1665 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1667 struct adapter *adapter = ifp->if_softc;
1668 struct ixgbe_hw *hw = &adapter->hw;
1671 INIT_DEBUGOUT("ixgbe_media_status: begin");
1672 IXGBE_CORE_LOCK(adapter);
1673 ixgbe_update_link_status(adapter);
1675 ifmr->ifm_status = IFM_AVALID;
1676 ifmr->ifm_active = IFM_ETHER;
1678 if (!adapter->link_active) {
1679 IXGBE_CORE_UNLOCK(adapter);
1683 ifmr->ifm_status |= IFM_ACTIVE;
1684 layer = adapter->phy_layer;
1686 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1687 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1688 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1689 switch (adapter->link_speed) {
1690 case IXGBE_LINK_SPEED_10GB_FULL:
1691 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1693 case IXGBE_LINK_SPEED_1GB_FULL:
1694 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1696 case IXGBE_LINK_SPEED_100_FULL:
1697 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1700 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1701 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1702 switch (adapter->link_speed) {
1703 case IXGBE_LINK_SPEED_10GB_FULL:
1704 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1707 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1708 switch (adapter->link_speed) {
1709 case IXGBE_LINK_SPEED_10GB_FULL:
1710 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1712 case IXGBE_LINK_SPEED_1GB_FULL:
1713 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1716 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1717 switch (adapter->link_speed) {
1718 case IXGBE_LINK_SPEED_10GB_FULL:
1719 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1721 case IXGBE_LINK_SPEED_1GB_FULL:
1722 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1725 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1726 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1727 switch (adapter->link_speed) {
1728 case IXGBE_LINK_SPEED_10GB_FULL:
1729 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1731 case IXGBE_LINK_SPEED_1GB_FULL:
1732 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1735 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1736 switch (adapter->link_speed) {
1737 case IXGBE_LINK_SPEED_10GB_FULL:
1738 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1742 ** XXX: These need to use the proper media types once
1745 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1746 switch (adapter->link_speed) {
1747 case IXGBE_LINK_SPEED_10GB_FULL:
1748 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1750 case IXGBE_LINK_SPEED_2_5GB_FULL:
1751 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1753 case IXGBE_LINK_SPEED_1GB_FULL:
1754 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1757 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1758 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1759 switch (adapter->link_speed) {
1760 case IXGBE_LINK_SPEED_10GB_FULL:
1761 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1763 case IXGBE_LINK_SPEED_2_5GB_FULL:
1764 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1766 case IXGBE_LINK_SPEED_1GB_FULL:
1767 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1771 /* If nothing is recognized... */
1772 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1773 ifmr->ifm_active |= IFM_UNKNOWN;
1775 #if __FreeBSD_version >= 900025
1776 /* Display current flow control setting used on link */
1777 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1778 hw->fc.current_mode == ixgbe_fc_full)
1779 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1780 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1781 hw->fc.current_mode == ixgbe_fc_full)
1782 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1785 IXGBE_CORE_UNLOCK(adapter);
1790 /*********************************************************************
1792 * Media Ioctl callback
1794 * This routine is called when the user changes speed/duplex using
1795 * media/mediopt option with ifconfig.
1797 **********************************************************************/
1799 ixgbe_media_change(struct ifnet * ifp)
1801 struct adapter *adapter = ifp->if_softc;
1802 struct ifmedia *ifm = &adapter->media;
1803 struct ixgbe_hw *hw = &adapter->hw;
1804 ixgbe_link_speed speed = 0;
1806 INIT_DEBUGOUT("ixgbe_media_change: begin");
1808 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1811 if (hw->phy.media_type == ixgbe_media_type_backplane)
1815 ** We don't actually need to check against the supported
1816 ** media types of the adapter; ifmedia will take care of
1819 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1822 speed |= IXGBE_LINK_SPEED_100_FULL;
1824 case IFM_10G_SR: /* KR, too */
1826 case IFM_10G_CX4: /* KX4 */
1827 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1828 case IFM_10G_TWINAX:
1829 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1832 speed |= IXGBE_LINK_SPEED_100_FULL;
1835 case IFM_1000_CX: /* KX */
1836 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1839 speed |= IXGBE_LINK_SPEED_100_FULL;
1845 hw->mac.autotry_restart = TRUE;
1846 hw->mac.ops.setup_link(hw, speed, TRUE);
1847 adapter->advertise =
1848 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1849 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1850 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1855 device_printf(adapter->dev, "Invalid media type!\n");
1860 ixgbe_set_promisc(struct adapter *adapter)
1863 struct ifnet *ifp = adapter->ifp;
1866 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1867 reg_rctl &= (~IXGBE_FCTRL_UPE);
1868 if (ifp->if_flags & IFF_ALLMULTI)
1869 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1871 struct ifmultiaddr *ifma;
1872 #if __FreeBSD_version < 800000
1875 if_maddr_rlock(ifp);
1877 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1878 if (ifma->ifma_addr->sa_family != AF_LINK)
1880 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1884 #if __FreeBSD_version < 800000
1885 IF_ADDR_UNLOCK(ifp);
1887 if_maddr_runlock(ifp);
1890 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1891 reg_rctl &= (~IXGBE_FCTRL_MPE);
1892 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1894 if (ifp->if_flags & IFF_PROMISC) {
1895 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1896 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1897 } else if (ifp->if_flags & IFF_ALLMULTI) {
1898 reg_rctl |= IXGBE_FCTRL_MPE;
1899 reg_rctl &= ~IXGBE_FCTRL_UPE;
1900 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1906 /*********************************************************************
1909 * This routine is called whenever multicast address list is updated.
1911 **********************************************************************/
1912 #define IXGBE_RAR_ENTRIES 16
1915 ixgbe_set_multi(struct adapter *adapter)
1919 struct ifmultiaddr *ifma;
1920 struct ixgbe_mc_addr *mta;
1922 struct ifnet *ifp = adapter->ifp;
1924 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1927 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
1929 #if __FreeBSD_version < 800000
1932 if_maddr_rlock(ifp);
1934 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1935 if (ifma->ifma_addr->sa_family != AF_LINK)
1937 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1939 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1940 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1941 mta[mcnt].vmdq = adapter->pool;
1944 #if __FreeBSD_version < 800000
1945 IF_ADDR_UNLOCK(ifp);
1947 if_maddr_runlock(ifp);
1950 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1951 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1952 if (ifp->if_flags & IFF_PROMISC)
1953 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1954 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1955 ifp->if_flags & IFF_ALLMULTI) {
1956 fctrl |= IXGBE_FCTRL_MPE;
1957 fctrl &= ~IXGBE_FCTRL_UPE;
1959 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1961 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1963 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1964 update_ptr = (u8 *)mta;
1965 ixgbe_update_mc_addr_list(&adapter->hw,
1966 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1973 * This is an iterator function now needed by the multicast
1974 * shared code. It simply feeds the shared code routine the
1975 * addresses in the array of ixgbe_set_multi() one by one.
1978 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1980 struct ixgbe_mc_addr *mta;
1982 mta = (struct ixgbe_mc_addr *)*update_ptr;
1985 *update_ptr = (u8*)(mta + 1);;
1990 /*********************************************************************
1993 * This routine checks for link status,updates statistics,
1994 * and runs the watchdog check.
1996 **********************************************************************/
1999 ixgbe_local_timer(void *arg)
2001 struct adapter *adapter = arg;
2002 device_t dev = adapter->dev;
2003 struct ix_queue *que = adapter->queues;
2007 mtx_assert(&adapter->core_mtx, MA_OWNED);
2009 /* Check for pluggable optics */
2010 if (adapter->sfp_probe)
2011 if (!ixgbe_sfp_probe(adapter))
2012 goto out; /* Nothing to do */
2014 ixgbe_update_link_status(adapter);
2015 ixgbe_update_stats_counters(adapter);
2018 ** Check the TX queues status
2019 ** - mark hung queues so we don't schedule on them
2020 ** - watchdog only if all queues show hung
2022 for (int i = 0; i < adapter->num_queues; i++, que++) {
2023 /* Keep track of queues with work for soft irq */
2025 queues |= ((u64)1 << que->me);
2027 ** Each time txeof runs without cleaning, but there
2028 ** are uncleaned descriptors it increments busy. If
2029 ** we get to the MAX we declare it hung.
2031 if (que->busy == IXGBE_QUEUE_HUNG) {
2033 /* Mark the queue as inactive */
2034 adapter->active_queues &= ~((u64)1 << que->me);
2037 /* Check if we've come back from hung */
2038 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2039 adapter->active_queues |= ((u64)1 << que->me);
2041 if (que->busy >= IXGBE_MAX_TX_BUSY) {
2042 device_printf(dev,"Warning queue %d "
2043 "appears to be hung!\n", i);
2044 que->txr->busy = IXGBE_QUEUE_HUNG;
2050 /* Only truly watchdog if all queues show hung */
2051 if (hung == adapter->num_queues)
2053 else if (queues != 0) { /* Force an IRQ on queues with work */
2054 ixgbe_rearm_queues(adapter, queues);
2058 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2062 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2063 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2064 adapter->watchdog_events++;
2065 ixgbe_init_locked(adapter);
2070 ** Note: this routine updates the OS on the link state
2071 ** the real check of the hardware only happens with
2072 ** a link interrupt.
2075 ixgbe_update_link_status(struct adapter *adapter)
2077 struct ifnet *ifp = adapter->ifp;
2078 device_t dev = adapter->dev;
2080 if (adapter->link_up){
2081 if (adapter->link_active == FALSE) {
2083 device_printf(dev,"Link is up %d Gbps %s \n",
2084 ((adapter->link_speed == 128)? 10:1),
2086 adapter->link_active = TRUE;
2087 /* Update any Flow Control changes */
2088 ixgbe_fc_enable(&adapter->hw);
2089 /* Update DMA coalescing config */
2090 ixgbe_config_dmac(adapter);
2091 if_link_state_change(ifp, LINK_STATE_UP);
2093 ixgbe_ping_all_vfs(adapter);
2096 } else { /* Link down */
2097 if (adapter->link_active == TRUE) {
2099 device_printf(dev,"Link is Down\n");
2100 if_link_state_change(ifp, LINK_STATE_DOWN);
2101 adapter->link_active = FALSE;
2103 ixgbe_ping_all_vfs(adapter);
2112 /*********************************************************************
2114 * This routine disables all traffic on the adapter by issuing a
2115 * global reset on the MAC and deallocates TX/RX buffers.
2117 **********************************************************************/
2120 ixgbe_stop(void *arg)
2123 struct adapter *adapter = arg;
2124 struct ixgbe_hw *hw = &adapter->hw;
2127 mtx_assert(&adapter->core_mtx, MA_OWNED);
2129 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2130 ixgbe_disable_intr(adapter);
2131 callout_stop(&adapter->timer);
2133 /* Let the stack know...*/
2134 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2137 hw->adapter_stopped = FALSE;
2138 ixgbe_stop_adapter(hw);
2139 if (hw->mac.type == ixgbe_mac_82599EB)
2140 ixgbe_stop_mac_link_on_d3_82599(hw);
2141 /* Turn off the laser - noop with no optics */
2142 ixgbe_disable_tx_laser(hw);
2144 /* Update the stack */
2145 adapter->link_up = FALSE;
2146 ixgbe_update_link_status(adapter);
2148 /* reprogram the RAR[0] in case user changed it. */
2149 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2155 /*********************************************************************
2157 * Determine hardware revision.
2159 **********************************************************************/
2161 ixgbe_identify_hardware(struct adapter *adapter)
2163 device_t dev = adapter->dev;
2164 struct ixgbe_hw *hw = &adapter->hw;
2166 /* Save off the information about this board */
2167 hw->vendor_id = pci_get_vendor(dev);
2168 hw->device_id = pci_get_device(dev);
2169 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2170 hw->subsystem_vendor_id =
2171 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2172 hw->subsystem_device_id =
2173 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2176 ** Make sure BUSMASTER is set
2178 pci_enable_busmaster(dev);
2180 /* We need this here to set the num_segs below */
2181 ixgbe_set_mac_type(hw);
2183 /* Pick up the 82599 settings */
2184 if (hw->mac.type != ixgbe_mac_82598EB) {
2185 hw->phy.smart_speed = ixgbe_smart_speed;
2186 adapter->num_segs = IXGBE_82599_SCATTER;
2188 adapter->num_segs = IXGBE_82598_SCATTER;
2193 /*********************************************************************
2195 * Determine optic type
2197 **********************************************************************/
2199 ixgbe_setup_optics(struct adapter *adapter)
2201 struct ixgbe_hw *hw = &adapter->hw;
2204 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2206 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2207 adapter->optics = IFM_10G_T;
2211 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2212 adapter->optics = IFM_1000_T;
2216 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2217 adapter->optics = IFM_1000_SX;
2221 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2222 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2223 adapter->optics = IFM_10G_LR;
2227 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2228 adapter->optics = IFM_10G_SR;
2232 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2233 adapter->optics = IFM_10G_TWINAX;
2237 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2238 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2239 adapter->optics = IFM_10G_CX4;
2243 /* If we get here just set the default */
2244 adapter->optics = IFM_ETHER | IFM_AUTO;
2248 /*********************************************************************
2250 * Setup the Legacy or MSI Interrupt handler
2252 **********************************************************************/
2254 ixgbe_allocate_legacy(struct adapter *adapter)
2256 device_t dev = adapter->dev;
2257 struct ix_queue *que = adapter->queues;
2258 #ifndef IXGBE_LEGACY_TX
2259 struct tx_ring *txr = adapter->tx_rings;
2264 if (adapter->msix == 1)
2267 /* We allocate a single interrupt resource */
2268 adapter->res = bus_alloc_resource_any(dev,
2269 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2270 if (adapter->res == NULL) {
2271 device_printf(dev, "Unable to allocate bus resource: "
2277 * Try allocating a fast interrupt and the associated deferred
2278 * processing contexts.
2280 #ifndef IXGBE_LEGACY_TX
2281 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2283 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2284 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2285 taskqueue_thread_enqueue, &que->tq);
2286 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2287 device_get_nameunit(adapter->dev));
2289 /* Tasklets for Link, SFP and Multispeed Fiber */
2290 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2291 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2292 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2293 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2295 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2297 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2298 taskqueue_thread_enqueue, &adapter->tq);
2299 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2300 device_get_nameunit(adapter->dev));
2302 if ((error = bus_setup_intr(dev, adapter->res,
2303 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2304 que, &adapter->tag)) != 0) {
2305 device_printf(dev, "Failed to register fast interrupt "
2306 "handler: %d\n", error);
2307 taskqueue_free(que->tq);
2308 taskqueue_free(adapter->tq);
2313 /* For simplicity in the handlers */
2314 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2320 /*********************************************************************
2322 * Setup MSIX Interrupt resources and handlers
2324 **********************************************************************/
2326 ixgbe_allocate_msix(struct adapter *adapter)
2328 device_t dev = adapter->dev;
2329 struct ix_queue *que = adapter->queues;
2330 struct tx_ring *txr = adapter->tx_rings;
2331 int error, rid, vector = 0;
2339 * If we're doing RSS, the number of queues needs to
2340 * match the number of RSS buckets that are configured.
2342 * + If there's more queues than RSS buckets, we'll end
2343 * up with queues that get no traffic.
2345 * + If there's more RSS buckets than queues, we'll end
2346 * up having multiple RSS buckets map to the same queue,
2347 * so there'll be some contention.
2349 if (adapter->num_queues != rss_getnumbuckets()) {
2351 "%s: number of queues (%d) != number of RSS buckets (%d)"
2352 "; performance will be impacted.\n",
2354 adapter->num_queues,
2355 rss_getnumbuckets());
2359 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2361 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2362 RF_SHAREABLE | RF_ACTIVE);
2363 if (que->res == NULL) {
2364 device_printf(dev,"Unable to allocate"
2365 " bus resource: que interrupt [%d]\n", vector);
2368 /* Set the handler function */
2369 error = bus_setup_intr(dev, que->res,
2370 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2371 ixgbe_msix_que, que, &que->tag);
2374 device_printf(dev, "Failed to register QUE handler");
2377 #if __FreeBSD_version >= 800504
2378 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2381 adapter->active_queues |= (u64)(1 << que->msix);
2384 * The queue ID is used as the RSS layer bucket ID.
2385 * We look up the queue ID -> RSS CPU ID and select
2388 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2391 * Bind the msix vector, and thus the
2392 * rings to the corresponding cpu.
2394 * This just happens to match the default RSS round-robin
2395 * bucket -> queue -> CPU allocation.
2397 if (adapter->num_queues > 1)
2400 if (adapter->num_queues > 1)
2401 bus_bind_intr(dev, que->res, cpu_id);
2405 "Bound RSS bucket %d to CPU %d\n",
2409 "Bound queue %d to cpu %d\n",
2412 #endif /* IXGBE_DEBUG */
2415 #ifndef IXGBE_LEGACY_TX
2416 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2418 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2419 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2420 taskqueue_thread_enqueue, &que->tq);
2422 CPU_SETOF(cpu_id, &cpu_mask);
2423 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2426 device_get_nameunit(adapter->dev),
2429 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2430 device_get_nameunit(adapter->dev));
2436 adapter->res = bus_alloc_resource_any(dev,
2437 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2438 if (!adapter->res) {
2439 device_printf(dev,"Unable to allocate"
2440 " bus resource: Link interrupt [%d]\n", rid);
2443 /* Set the link handler function */
2444 error = bus_setup_intr(dev, adapter->res,
2445 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2446 ixgbe_msix_link, adapter, &adapter->tag);
2448 adapter->res = NULL;
2449 device_printf(dev, "Failed to register LINK handler");
2452 #if __FreeBSD_version >= 800504
2453 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2455 adapter->vector = vector;
2456 /* Tasklets for Link, SFP and Multispeed Fiber */
2457 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2458 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2459 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2461 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2463 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2465 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2467 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2468 taskqueue_thread_enqueue, &adapter->tq);
2469 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2470 device_get_nameunit(adapter->dev));
2476 * Setup Either MSI/X or MSI
2479 ixgbe_setup_msix(struct adapter *adapter)
2481 device_t dev = adapter->dev;
2482 int rid, want, queues, msgs;
2484 /* Override by tuneable */
2485 if (ixgbe_enable_msix == 0)
2488 /* First try MSI/X */
2489 msgs = pci_msix_count(dev);
2492 rid = PCIR_BAR(MSIX_82598_BAR);
2493 adapter->msix_mem = bus_alloc_resource_any(dev,
2494 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2495 if (adapter->msix_mem == NULL) {
2496 rid += 4; /* 82599 maps in higher BAR */
2497 adapter->msix_mem = bus_alloc_resource_any(dev,
2498 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2500 if (adapter->msix_mem == NULL) {
2501 /* May not be enabled */
2502 device_printf(adapter->dev,
2503 "Unable to map MSIX table \n");
2507 /* Figure out a reasonable auto config value */
2508 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2511 /* If we're doing RSS, clamp at the number of RSS buckets */
2512 if (queues > rss_getnumbuckets())
2513 queues = rss_getnumbuckets();
2516 if (ixgbe_num_queues != 0)
2517 queues = ixgbe_num_queues;
2519 /* reflect correct sysctl value */
2520 ixgbe_num_queues = queues;
2523 ** Want one vector (RX/TX pair) per queue
2524 ** plus an additional for Link.
2530 device_printf(adapter->dev,
2531 "MSIX Configuration Problem, "
2532 "%d vectors but %d queues wanted!\n",
2536 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2537 device_printf(adapter->dev,
2538 "Using MSIX interrupts with %d vectors\n", msgs);
2539 adapter->num_queues = queues;
2543 ** If MSIX alloc failed or provided us with
2544 ** less than needed, free and fall through to MSI
2546 pci_release_msi(dev);
2549 if (adapter->msix_mem != NULL) {
2550 bus_release_resource(dev, SYS_RES_MEMORY,
2551 rid, adapter->msix_mem);
2552 adapter->msix_mem = NULL;
2555 if (pci_alloc_msi(dev, &msgs) == 0) {
2556 device_printf(adapter->dev,"Using an MSI interrupt\n");
2559 device_printf(adapter->dev,"Using a Legacy interrupt\n");
2565 ixgbe_allocate_pci_resources(struct adapter *adapter)
2568 device_t dev = adapter->dev;
2571 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2574 if (!(adapter->pci_mem)) {
2575 device_printf(dev,"Unable to allocate bus resource: memory\n");
2579 adapter->osdep.mem_bus_space_tag =
2580 rman_get_bustag(adapter->pci_mem);
2581 adapter->osdep.mem_bus_space_handle =
2582 rman_get_bushandle(adapter->pci_mem);
2583 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2585 /* Legacy defaults */
2586 adapter->num_queues = 1;
2587 adapter->hw.back = &adapter->osdep;
2590 ** Now setup MSI or MSI/X, should
2591 ** return us the number of supported
2592 ** vectors. (Will be 1 for MSI)
2594 adapter->msix = ixgbe_setup_msix(adapter);
2599 ixgbe_free_pci_resources(struct adapter * adapter)
2601 struct ix_queue *que = adapter->queues;
2602 device_t dev = adapter->dev;
2605 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2606 memrid = PCIR_BAR(MSIX_82598_BAR);
2608 memrid = PCIR_BAR(MSIX_82599_BAR);
2611 ** There is a slight possibility of a failure mode
2612 ** in attach that will result in entering this function
2613 ** before interrupt resources have been initialized, and
2614 ** in that case we do not want to execute the loops below
2615 ** We can detect this reliably by the state of the adapter
2618 if (adapter->res == NULL)
2622 ** Release all msix queue resources:
2624 for (int i = 0; i < adapter->num_queues; i++, que++) {
2625 rid = que->msix + 1;
2626 if (que->tag != NULL) {
2627 bus_teardown_intr(dev, que->res, que->tag);
2630 if (que->res != NULL)
2631 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2635 /* Clean the Legacy or Link interrupt last */
2636 if (adapter->vector) /* we are doing MSIX */
2637 rid = adapter->vector + 1;
2639 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2641 if (adapter->tag != NULL) {
2642 bus_teardown_intr(dev, adapter->res, adapter->tag);
2643 adapter->tag = NULL;
2645 if (adapter->res != NULL)
2646 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2650 pci_release_msi(dev);
2652 if (adapter->msix_mem != NULL)
2653 bus_release_resource(dev, SYS_RES_MEMORY,
2654 memrid, adapter->msix_mem);
2656 if (adapter->pci_mem != NULL)
2657 bus_release_resource(dev, SYS_RES_MEMORY,
2658 PCIR_BAR(0), adapter->pci_mem);
2663 /*********************************************************************
2665 * Setup networking device structure and register an interface.
2667 **********************************************************************/
2669 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2673 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2675 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2677 device_printf(dev, "can not allocate ifnet structure\n");
2680 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2681 ifp->if_baudrate = IF_Gbps(10);
2682 ifp->if_init = ixgbe_init;
2683 ifp->if_softc = adapter;
2684 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2685 ifp->if_ioctl = ixgbe_ioctl;
2686 #if __FreeBSD_version >= 1100036
2687 if_setgetcounterfn(ifp, ixgbe_get_counter);
2689 #if __FreeBSD_version >= 1100045
2690 /* TSO parameters */
2691 ifp->if_hw_tsomax = 65518;
2692 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2693 ifp->if_hw_tsomaxsegsize = 2048;
2695 #ifndef IXGBE_LEGACY_TX
2696 ifp->if_transmit = ixgbe_mq_start;
2697 ifp->if_qflush = ixgbe_qflush;
2699 ifp->if_start = ixgbe_start;
2700 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2701 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2702 IFQ_SET_READY(&ifp->if_snd);
2705 ether_ifattach(ifp, adapter->hw.mac.addr);
2707 adapter->max_frame_size =
2708 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2711 * Tell the upper layer(s) we support long frames.
2713 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2715 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2716 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2717 ifp->if_capabilities |= IFCAP_LRO;
2718 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2722 ifp->if_capenable = ifp->if_capabilities;
2725 ** Don't turn this on by default, if vlans are
2726 ** created on another pseudo device (eg. lagg)
2727 ** then vlan events are not passed thru, breaking
2728 ** operation, but with HW FILTER off it works. If
2729 ** using vlans directly on the ixgbe driver you can
2730 ** enable this and get full hardware tag filtering.
2732 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2735 * Specify the media types supported by this adapter and register
2736 * callbacks to update media and link information
2738 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2739 ixgbe_media_status);
2741 ixgbe_add_media_types(adapter);
2743 /* Autoselect media by default */
2744 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2750 ixgbe_add_media_types(struct adapter *adapter)
2752 struct ixgbe_hw *hw = &adapter->hw;
2753 device_t dev = adapter->dev;
2756 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2758 /* Media types with matching FreeBSD media defines */
2759 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2760 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2761 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2762 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2763 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2764 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2766 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2767 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2768 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2770 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2771 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2772 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2773 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2774 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2775 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2776 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2777 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2780 ** Other (no matching FreeBSD media type):
2781 ** To workaround this, we'll assign these completely
2782 ** inappropriate media types.
2784 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2785 device_printf(dev, "Media supported: 10GbaseKR\n");
2786 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2787 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2789 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2790 device_printf(dev, "Media supported: 10GbaseKX4\n");
2791 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2792 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2794 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2795 device_printf(dev, "Media supported: 1000baseKX\n");
2796 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2797 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2799 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2800 /* Someday, someone will care about you... */
2801 device_printf(dev, "Media supported: 1000baseBX\n");
2804 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2805 ifmedia_add(&adapter->media,
2806 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2807 ifmedia_add(&adapter->media,
2808 IFM_ETHER | IFM_1000_T, 0, NULL);
2811 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2815 ixgbe_config_link(struct adapter *adapter)
2817 struct ixgbe_hw *hw = &adapter->hw;
2818 u32 autoneg, err = 0;
2819 bool sfp, negotiate;
2821 sfp = ixgbe_is_sfp(hw);
2824 if (hw->phy.multispeed_fiber) {
2825 hw->mac.ops.setup_sfp(hw);
2826 ixgbe_enable_tx_laser(hw);
2827 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2829 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2831 if (hw->mac.ops.check_link)
2832 err = ixgbe_check_link(hw, &adapter->link_speed,
2833 &adapter->link_up, FALSE);
2836 autoneg = hw->phy.autoneg_advertised;
2837 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2838 err = hw->mac.ops.get_link_capabilities(hw,
2839 &autoneg, &negotiate);
2842 if (hw->mac.ops.setup_link)
2843 err = hw->mac.ops.setup_link(hw,
2844 autoneg, adapter->link_up);
2851 /*********************************************************************
2853 * Enable transmit units.
2855 **********************************************************************/
2857 ixgbe_initialize_transmit_units(struct adapter *adapter)
2859 struct tx_ring *txr = adapter->tx_rings;
2860 struct ixgbe_hw *hw = &adapter->hw;
2862 /* Setup the Base and Length of the Tx Descriptor Ring */
2864 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2865 u64 tdba = txr->txdma.dma_paddr;
2869 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
2870 (tdba & 0x00000000ffffffffULL));
2871 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
2872 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
2873 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2875 /* Setup the HW Tx Head and Tail descriptor pointers */
2876 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
2877 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
2879 /* Cache the tail address */
2880 txr->tail = IXGBE_TDT(j);
2882 /* Set the processing limit */
2883 txr->process_limit = ixgbe_tx_process_limit;
2885 /* Disable Head Writeback */
2886 switch (hw->mac.type) {
2887 case ixgbe_mac_82598EB:
2888 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
2890 case ixgbe_mac_82599EB:
2891 case ixgbe_mac_X540:
2893 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
2896 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2897 switch (hw->mac.type) {
2898 case ixgbe_mac_82598EB:
2899 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
2901 case ixgbe_mac_82599EB:
2902 case ixgbe_mac_X540:
2904 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
2910 if (hw->mac.type != ixgbe_mac_82598EB) {
2911 u32 dmatxctl, rttdcs;
2913 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
2915 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2916 dmatxctl |= IXGBE_DMATXCTL_TE;
2917 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2918 /* Disable arbiter to set MTQC */
2919 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2920 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2921 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2923 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
2925 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2927 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2928 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2935 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2937 struct ixgbe_hw *hw = &adapter->hw;
2938 u32 reta = 0, mrqc, rss_key[10];
2939 int queue_id, table_size, index_mult;
2941 u32 rss_hash_config;
2944 enum ixgbe_iov_mode mode;
2948 /* Fetch the configured RSS key */
2949 rss_getkey((uint8_t *) &rss_key);
2951 /* set up random bits */
2952 arc4rand(&rss_key, sizeof(rss_key), 0);
2955 /* Set multiplier for RETA setup and table size based on MAC */
2958 switch (adapter->hw.mac.type) {
2959 case ixgbe_mac_82598EB:
2962 case ixgbe_mac_X550:
2963 case ixgbe_mac_X550EM_x:
2970 /* Set up the redirection table */
2971 for (int i = 0, j = 0; i < table_size; i++, j++) {
2972 if (j == adapter->num_queues) j = 0;
2975 * Fetch the RSS bucket id for the given indirection entry.
2976 * Cap it at the number of configured buckets (which is
2979 queue_id = rss_get_indirection_to_bucket(i);
2980 queue_id = queue_id % adapter->num_queues;
2982 queue_id = (j * index_mult);
2985 * The low 8 bits are for hash value (n+0);
2986 * The next 8 bits are for hash value (n+1), etc.
2989 reta = reta | ( ((uint32_t) queue_id) << 24);
2992 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2994 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2999 /* Now fill our hash function seeds */
3000 for (int i = 0; i < 10; i++)
3001 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3003 /* Perform hash on these packet types */
3005 mrqc = IXGBE_MRQC_RSSEN;
3006 rss_hash_config = rss_gethashconfig();
3007 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3008 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3009 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3010 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3011 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3012 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3013 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3014 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3015 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3016 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3017 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3018 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3019 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3020 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3021 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3022 device_printf(adapter->dev,
3023 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
3024 "but not supported\n", __func__);
3025 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3026 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3027 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3028 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3031 * Disable UDP - IP fragments aren't currently being handled
3032 * and so we end up with a mix of 2-tuple and 4-tuple
3035 mrqc = IXGBE_MRQC_RSSEN
3036 | IXGBE_MRQC_RSS_FIELD_IPV4
3037 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3038 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3039 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3040 | IXGBE_MRQC_RSS_FIELD_IPV6
3041 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3045 mode = ixgbe_get_iov_mode(adapter);
3046 mrqc |= ixgbe_get_mrqc(mode);
3048 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3052 /*********************************************************************
3054 * Setup receive registers and features.
3056 **********************************************************************/
3057 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3059 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3062 ixgbe_initialize_receive_units(struct adapter *adapter)
3064 struct rx_ring *rxr = adapter->rx_rings;
3065 struct ixgbe_hw *hw = &adapter->hw;
3066 struct ifnet *ifp = adapter->ifp;
3067 u32 bufsz, fctrl, srrctl, rxcsum;
3072 * Make sure receives are disabled while
3073 * setting up the descriptor ring
3075 ixgbe_disable_rx(hw);
3077 /* Enable broadcasts */
3078 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3079 fctrl |= IXGBE_FCTRL_BAM;
3080 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3081 fctrl |= IXGBE_FCTRL_DPF;
3082 fctrl |= IXGBE_FCTRL_PMCF;
3084 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3086 /* Set for Jumbo Frames? */
3087 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3088 if (ifp->if_mtu > ETHERMTU)
3089 hlreg |= IXGBE_HLREG0_JUMBOEN;
3091 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3093 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
3094 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3095 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3097 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3098 #endif /* DEV_NETMAP */
3099 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3101 bufsz = (adapter->rx_mbuf_sz +
3102 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3104 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3105 u64 rdba = rxr->rxdma.dma_paddr;
3108 /* Setup the Base and Length of the Rx Descriptor Ring */
3109 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3110 (rdba & 0x00000000ffffffffULL));
3111 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3112 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3113 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3115 /* Set up the SRRCTL register */
3116 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3117 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3118 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3120 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3123 * Set DROP_EN iff we have no flow control and >1 queue.
3124 * Note that srrctl was cleared shortly before during reset,
3125 * so we do not need to clear the bit, but do it just in case
3126 * this code is moved elsewhere.
3128 if (adapter->num_queues > 1 &&
3129 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3130 srrctl |= IXGBE_SRRCTL_DROP_EN;
3132 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3135 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3137 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3138 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3139 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3141 /* Set the processing limit */
3142 rxr->process_limit = ixgbe_rx_process_limit;
3144 /* Set the driver rx tail address */
3145 rxr->tail = IXGBE_RDT(rxr->me);
3148 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3149 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3150 IXGBE_PSRTYPE_UDPHDR |
3151 IXGBE_PSRTYPE_IPV4HDR |
3152 IXGBE_PSRTYPE_IPV6HDR;
3153 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3156 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3158 ixgbe_initialise_rss_mapping(adapter);
3160 if (adapter->num_queues > 1) {
3161 /* RSS and RX IPP Checksum are mutually exclusive */
3162 rxcsum |= IXGBE_RXCSUM_PCSD;
3165 if (ifp->if_capenable & IFCAP_RXCSUM)
3166 rxcsum |= IXGBE_RXCSUM_PCSD;
3168 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3169 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3171 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3178 ** This routine is run via an vlan config EVENT,
3179 ** it enables us to use the HW Filter table since
3180 ** we can get the vlan id. This just creates the
3181 ** entry in the soft version of the VFTA, init will
3182 ** repopulate the real table.
3185 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3187 struct adapter *adapter = ifp->if_softc;
3190 if (ifp->if_softc != arg) /* Not our event */
3193 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3196 IXGBE_CORE_LOCK(adapter);
3197 index = (vtag >> 5) & 0x7F;
3199 adapter->shadow_vfta[index] |= (1 << bit);
3200 ++adapter->num_vlans;
3201 ixgbe_setup_vlan_hw_support(adapter);
3202 IXGBE_CORE_UNLOCK(adapter);
3206 ** This routine is run via an vlan
3207 ** unconfig EVENT, remove our entry
3208 ** in the soft vfta.
3211 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3213 struct adapter *adapter = ifp->if_softc;
3216 if (ifp->if_softc != arg)
3219 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3222 IXGBE_CORE_LOCK(adapter);
3223 index = (vtag >> 5) & 0x7F;
3225 adapter->shadow_vfta[index] &= ~(1 << bit);
3226 --adapter->num_vlans;
3227 /* Re-init to load the changes */
3228 ixgbe_setup_vlan_hw_support(adapter);
3229 IXGBE_CORE_UNLOCK(adapter);
3233 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3235 struct ifnet *ifp = adapter->ifp;
3236 struct ixgbe_hw *hw = &adapter->hw;
3237 struct rx_ring *rxr;
3242 ** We get here thru init_locked, meaning
3243 ** a soft reset, this has already cleared
3244 ** the VFTA and other state, so if there
3245 ** have been no vlan's registered do nothing.
3247 if (adapter->num_vlans == 0)
3250 /* Setup the queues for vlans */
3251 for (int i = 0; i < adapter->num_queues; i++) {
3252 rxr = &adapter->rx_rings[i];
3253 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3254 if (hw->mac.type != ixgbe_mac_82598EB) {
3255 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3256 ctrl |= IXGBE_RXDCTL_VME;
3257 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3259 rxr->vtag_strip = TRUE;
3262 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3265 ** A soft reset zero's out the VFTA, so
3266 ** we need to repopulate it now.
3268 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3269 if (adapter->shadow_vfta[i] != 0)
3270 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3271 adapter->shadow_vfta[i]);
3273 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3274 /* Enable the Filter Table if enabled */
3275 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3276 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3277 ctrl |= IXGBE_VLNCTRL_VFE;
3279 if (hw->mac.type == ixgbe_mac_82598EB)
3280 ctrl |= IXGBE_VLNCTRL_VME;
3281 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3285 ixgbe_enable_intr(struct adapter *adapter)
3287 struct ixgbe_hw *hw = &adapter->hw;
3288 struct ix_queue *que = adapter->queues;
3291 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3292 /* Enable Fan Failure detection */
3293 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3294 mask |= IXGBE_EIMS_GPI_SDP1;
3296 switch (adapter->hw.mac.type) {
3297 case ixgbe_mac_82599EB:
3298 mask |= IXGBE_EIMS_ECC;
3299 /* Temperature sensor on some adapters */
3300 mask |= IXGBE_EIMS_GPI_SDP0;
3301 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3302 mask |= IXGBE_EIMS_GPI_SDP1;
3303 mask |= IXGBE_EIMS_GPI_SDP2;
3305 mask |= IXGBE_EIMS_FLOW_DIR;
3308 mask |= IXGBE_EIMS_MAILBOX;
3311 case ixgbe_mac_X540:
3312 /* Detect if Thermal Sensor is enabled */
3313 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3314 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3315 mask |= IXGBE_EIMS_TS;
3316 mask |= IXGBE_EIMS_ECC;
3318 mask |= IXGBE_EIMS_FLOW_DIR;
3321 case ixgbe_mac_X550:
3322 case ixgbe_mac_X550EM_x:
3323 /* MAC thermal sensor is automatically enabled */
3324 mask |= IXGBE_EIMS_TS;
3325 /* Some devices use SDP0 for important information */
3326 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3327 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3328 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3329 mask |= IXGBE_EIMS_ECC;
3331 mask |= IXGBE_EIMS_FLOW_DIR;
3334 mask |= IXGBE_EIMS_MAILBOX;
3341 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3343 /* With MSI-X we use auto clear */
3344 if (adapter->msix_mem) {
3345 mask = IXGBE_EIMS_ENABLE_MASK;
3346 /* Don't autoclear Link */
3347 mask &= ~IXGBE_EIMS_OTHER;
3348 mask &= ~IXGBE_EIMS_LSC;
3350 mask &= ~IXGBE_EIMS_MAILBOX;
3352 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3356 ** Now enable all queues, this is done separately to
3357 ** allow for handling the extended (beyond 32) MSIX
3358 ** vectors that can be used by 82599
3360 for (int i = 0; i < adapter->num_queues; i++, que++)
3361 ixgbe_enable_queue(adapter, que->msix);
3363 IXGBE_WRITE_FLUSH(hw);
3369 ixgbe_disable_intr(struct adapter *adapter)
3371 if (adapter->msix_mem)
3372 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3373 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3374 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3376 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3377 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3378 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3380 IXGBE_WRITE_FLUSH(&adapter->hw);
3385 ** Get the width and transaction speed of
3386 ** the slot this adapter is plugged into.
3389 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3391 device_t dev = ((struct ixgbe_osdep *)hw->back)->dev;
3392 struct ixgbe_mac_info *mac = &hw->mac;
3396 /* For most devices simply call the shared code routine */
3397 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3398 ixgbe_get_bus_info(hw);
3399 /* These devices don't use PCI-E */
3400 switch (hw->mac.type) {
3401 case ixgbe_mac_X550EM_x:
3409 ** For the Quad port adapter we need to parse back
3410 ** up the PCI tree to find the speed of the expansion
3411 ** slot into which this adapter is plugged. A bit more work.
3413 dev = device_get_parent(device_get_parent(dev));
3415 device_printf(dev, "parent pcib = %x,%x,%x\n",
3416 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3418 dev = device_get_parent(device_get_parent(dev));
3420 device_printf(dev, "slot pcib = %x,%x,%x\n",
3421 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3423 /* Now get the PCI Express Capabilities offset */
3424 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3425 /* ...and read the Link Status Register */
3426 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3427 switch (link & IXGBE_PCI_LINK_WIDTH) {
3428 case IXGBE_PCI_LINK_WIDTH_1:
3429 hw->bus.width = ixgbe_bus_width_pcie_x1;
3431 case IXGBE_PCI_LINK_WIDTH_2:
3432 hw->bus.width = ixgbe_bus_width_pcie_x2;
3434 case IXGBE_PCI_LINK_WIDTH_4:
3435 hw->bus.width = ixgbe_bus_width_pcie_x4;
3437 case IXGBE_PCI_LINK_WIDTH_8:
3438 hw->bus.width = ixgbe_bus_width_pcie_x8;
3441 hw->bus.width = ixgbe_bus_width_unknown;
3445 switch (link & IXGBE_PCI_LINK_SPEED) {
3446 case IXGBE_PCI_LINK_SPEED_2500:
3447 hw->bus.speed = ixgbe_bus_speed_2500;
3449 case IXGBE_PCI_LINK_SPEED_5000:
3450 hw->bus.speed = ixgbe_bus_speed_5000;
3452 case IXGBE_PCI_LINK_SPEED_8000:
3453 hw->bus.speed = ixgbe_bus_speed_8000;
3456 hw->bus.speed = ixgbe_bus_speed_unknown;
3460 mac->ops.set_lan_id(hw);
3463 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3464 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3465 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3466 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3467 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3468 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3469 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3472 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3473 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3474 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3475 device_printf(dev, "PCI-Express bandwidth available"
3476 " for this card\n is not sufficient for"
3477 " optimal performance.\n");
3478 device_printf(dev, "For optimal performance a x8 "
3479 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3481 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3482 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3483 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3484 device_printf(dev, "PCI-Express bandwidth available"
3485 " for this card\n is not sufficient for"
3486 " optimal performance.\n");
3487 device_printf(dev, "For optimal performance a x8 "
3488 "PCIE Gen3 slot is required.\n");
3496 ** Setup the correct IVAR register for a particular MSIX interrupt
3497 ** (yes this is all very magic and confusing :)
3498 ** - entry is the register array entry
3499 ** - vector is the MSIX vector for this queue
3500 ** - type is RX/TX/MISC
3503 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3505 struct ixgbe_hw *hw = &adapter->hw;
3508 vector |= IXGBE_IVAR_ALLOC_VAL;
3510 switch (hw->mac.type) {
3512 case ixgbe_mac_82598EB:
3514 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3516 entry += (type * 64);
3517 index = (entry >> 2) & 0x1F;
3518 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3519 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3520 ivar |= (vector << (8 * (entry & 0x3)));
3521 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3524 case ixgbe_mac_82599EB:
3525 case ixgbe_mac_X540:
3526 case ixgbe_mac_X550:
3527 case ixgbe_mac_X550EM_x:
3528 if (type == -1) { /* MISC IVAR */
3529 index = (entry & 1) * 8;
3530 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3531 ivar &= ~(0xFF << index);
3532 ivar |= (vector << index);
3533 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3534 } else { /* RX/TX IVARS */
3535 index = (16 * (entry & 1)) + (8 * type);
3536 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3537 ivar &= ~(0xFF << index);
3538 ivar |= (vector << index);
3539 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3548 ixgbe_configure_ivars(struct adapter *adapter)
3550 struct ix_queue *que = adapter->queues;
3553 if (ixgbe_max_interrupt_rate > 0)
3554 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3557 ** Disable DMA coalescing if interrupt moderation is
3564 for (int i = 0; i < adapter->num_queues; i++, que++) {
3565 struct rx_ring *rxr = &adapter->rx_rings[i];
3566 struct tx_ring *txr = &adapter->tx_rings[i];
3567 /* First the RX queue entry */
3568 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3569 /* ... and the TX */
3570 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3571 /* Set an Initial EITR value */
3572 IXGBE_WRITE_REG(&adapter->hw,
3573 IXGBE_EITR(que->msix), newitr);
3576 /* For the Link interrupt */
3577 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3581 ** ixgbe_sfp_probe - called in the local timer to
3582 ** determine if a port had optics inserted.
3585 ixgbe_sfp_probe(struct adapter *adapter)
3587 struct ixgbe_hw *hw = &adapter->hw;
3588 device_t dev = adapter->dev;
3589 bool result = FALSE;
3591 if ((hw->phy.type == ixgbe_phy_nl) &&
3592 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3593 s32 ret = hw->phy.ops.identify_sfp(hw);
3596 ret = hw->phy.ops.reset(hw);
3597 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3598 device_printf(dev,"Unsupported SFP+ module detected!");
3599 printf(" Reload driver with supported module.\n");
3600 adapter->sfp_probe = FALSE;
3603 device_printf(dev,"SFP+ module detected!\n");
3604 /* We now have supported optics */
3605 adapter->sfp_probe = FALSE;
3606 /* Set the optics type so system reports correctly */
3607 ixgbe_setup_optics(adapter);
3615 ** Tasklet handler for MSIX Link interrupts
3616 ** - do outside interrupt since it might sleep
3619 ixgbe_handle_link(void *context, int pending)
3621 struct adapter *adapter = context;
3623 ixgbe_check_link(&adapter->hw,
3624 &adapter->link_speed, &adapter->link_up, 0);
3625 ixgbe_update_link_status(adapter);
3629 ** Tasklet for handling SFP module interrupts
3632 ixgbe_handle_mod(void *context, int pending)
3634 struct adapter *adapter = context;
3635 struct ixgbe_hw *hw = &adapter->hw;
3636 device_t dev = adapter->dev;
3639 err = hw->phy.ops.identify_sfp(hw);
3640 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3642 "Unsupported SFP+ module type was detected.\n");
3646 err = hw->mac.ops.setup_sfp(hw);
3647 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3649 "Setup failure - unsupported SFP+ module type.\n");
3652 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3658 ** Tasklet for handling MSF (multispeed fiber) interrupts
3661 ixgbe_handle_msf(void *context, int pending)
3663 struct adapter *adapter = context;
3664 struct ixgbe_hw *hw = &adapter->hw;
3669 err = hw->phy.ops.identify_sfp(hw);
3671 ixgbe_setup_optics(adapter);
3672 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3675 autoneg = hw->phy.autoneg_advertised;
3676 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3677 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3678 if (hw->mac.ops.setup_link)
3679 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3681 ifmedia_removeall(&adapter->media);
3682 ixgbe_add_media_types(adapter);
3687 ** Tasklet for handling interrupts from an external PHY
3690 ixgbe_handle_phy(void *context, int pending)
3692 struct adapter *adapter = context;
3693 struct ixgbe_hw *hw = &adapter->hw;
3696 error = hw->phy.ops.handle_lasi(hw);
3697 if (error == IXGBE_ERR_OVERTEMP)
3698 device_printf(adapter->dev,
3699 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3700 " PHY will downshift to lower power state!\n");
3702 device_printf(adapter->dev,
3703 "Error handling LASI interrupt: %d\n",
3710 ** Tasklet for reinitializing the Flow Director filter table
3713 ixgbe_reinit_fdir(void *context, int pending)
3715 struct adapter *adapter = context;
3716 struct ifnet *ifp = adapter->ifp;
3718 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3720 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3721 adapter->fdir_reinit = 0;
3722 /* re-enable flow director interrupts */
3723 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3724 /* Restart the interface */
3725 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3730 /*********************************************************************
3732 * Configure DMA Coalescing
3734 **********************************************************************/
3736 ixgbe_config_dmac(struct adapter *adapter)
3738 struct ixgbe_hw *hw = &adapter->hw;
3739 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3741 if (hw->mac.type < ixgbe_mac_X550 ||
3742 !hw->mac.ops.dmac_config)
3745 if (dcfg->watchdog_timer ^ adapter->dmac ||
3746 dcfg->link_speed ^ adapter->link_speed) {
3747 dcfg->watchdog_timer = adapter->dmac;
3748 dcfg->fcoe_en = false;
3749 dcfg->link_speed = adapter->link_speed;
3752 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3753 dcfg->watchdog_timer, dcfg->link_speed);
3755 hw->mac.ops.dmac_config(hw);
3760 * Checks whether the adapter supports Energy Efficient Ethernet
3761 * or not, based on device ID.
3764 ixgbe_check_eee_support(struct adapter *adapter)
3766 struct ixgbe_hw *hw = &adapter->hw;
3768 adapter->eee_enabled = !!(hw->mac.ops.setup_eee);
3772 * Checks whether the adapter's ports are capable of
3773 * Wake On LAN by reading the adapter's NVM.
3775 * Sets each port's hw->wol_enabled value depending
3776 * on the value read here.
3779 ixgbe_check_wol_support(struct adapter *adapter)
3781 struct ixgbe_hw *hw = &adapter->hw;
3784 /* Find out WoL support for port */
3785 adapter->wol_support = hw->wol_enabled = 0;
3786 ixgbe_get_device_caps(hw, &dev_caps);
3787 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3788 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3790 adapter->wol_support = hw->wol_enabled = 1;
3792 /* Save initial wake up filter configuration */
3793 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3799 * Prepare the adapter/port for LPLU and/or WoL
3802 ixgbe_setup_low_power_mode(struct adapter *adapter)
3804 struct ixgbe_hw *hw = &adapter->hw;
3805 device_t dev = adapter->dev;
3808 mtx_assert(&adapter->core_mtx, MA_OWNED);
3810 /* Limit power management flow to X550EM baseT */
3811 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3812 && hw->phy.ops.enter_lplu) {
3813 /* Turn off support for APM wakeup. (Using ACPI instead) */
3814 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3815 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3818 * Clear Wake Up Status register to prevent any previous wakeup
3819 * events from waking us up immediately after we suspend.
3821 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3824 * Program the Wakeup Filter Control register with user filter
3827 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3829 /* Enable wakeups and power management in Wakeup Control */
3830 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3831 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3833 /* X550EM baseT adapters need a special LPLU flow */
3834 hw->phy.reset_disable = true;
3835 ixgbe_stop(adapter);
3836 error = hw->phy.ops.enter_lplu(hw);
3839 "Error entering LPLU: %d\n", error);
3840 hw->phy.reset_disable = false;
3842 /* Just stop for other adapters */
3843 ixgbe_stop(adapter);
3849 /**********************************************************************
3851 * Update the board statistics counters.
3853 **********************************************************************/
3855 ixgbe_update_stats_counters(struct adapter *adapter)
3857 struct ixgbe_hw *hw = &adapter->hw;
3858 u32 missed_rx = 0, bprc, lxon, lxoff, total;
3859 u64 total_missed_rx = 0;
3861 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3862 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3863 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3864 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3866 for (int i = 0; i < 16; i++) {
3867 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3868 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3869 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3871 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3872 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3873 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3875 /* Hardware workaround, gprc counts missed packets */
3876 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3877 adapter->stats.pf.gprc -= missed_rx;
3879 if (hw->mac.type != ixgbe_mac_82598EB) {
3880 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3881 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3882 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3883 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3884 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3885 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3886 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3887 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3889 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3890 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3891 /* 82598 only has a counter in the high register */
3892 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3893 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3894 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3898 * Workaround: mprc hardware is incorrectly counting
3899 * broadcasts, so for now we subtract those.
3901 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3902 adapter->stats.pf.bprc += bprc;
3903 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3904 if (hw->mac.type == ixgbe_mac_82598EB)
3905 adapter->stats.pf.mprc -= bprc;
3907 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3908 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3909 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3910 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3911 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3912 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3914 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3915 adapter->stats.pf.lxontxc += lxon;
3916 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3917 adapter->stats.pf.lxofftxc += lxoff;
3918 total = lxon + lxoff;
3920 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3921 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3922 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3923 adapter->stats.pf.gptc -= total;
3924 adapter->stats.pf.mptc -= total;
3925 adapter->stats.pf.ptc64 -= total;
3926 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3928 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3929 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3930 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3931 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3932 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3933 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3934 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3935 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3936 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3937 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3938 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3939 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3940 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3941 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3942 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3943 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3944 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3945 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3946 /* Only read FCOE on 82599 */
3947 if (hw->mac.type != ixgbe_mac_82598EB) {
3948 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3949 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3950 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3951 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3952 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3955 /* Fill out the OS statistics structure */
3956 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3957 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3958 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3959 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3960 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3961 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3962 IXGBE_SET_COLLISIONS(adapter, 0);
3963 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3964 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3965 + adapter->stats.pf.rlec);
3968 #if __FreeBSD_version >= 1100036
3970 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3972 struct adapter *adapter;
3973 struct tx_ring *txr;
3976 adapter = if_getsoftc(ifp);
3979 case IFCOUNTER_IPACKETS:
3980 return (adapter->ipackets);
3981 case IFCOUNTER_OPACKETS:
3982 return (adapter->opackets);
3983 case IFCOUNTER_IBYTES:
3984 return (adapter->ibytes);
3985 case IFCOUNTER_OBYTES:
3986 return (adapter->obytes);
3987 case IFCOUNTER_IMCASTS:
3988 return (adapter->imcasts);
3989 case IFCOUNTER_OMCASTS:
3990 return (adapter->omcasts);
3991 case IFCOUNTER_COLLISIONS:
3993 case IFCOUNTER_IQDROPS:
3994 return (adapter->iqdrops);
3995 case IFCOUNTER_OQDROPS:
3997 txr = adapter->tx_rings;
3998 for (int i = 0; i < adapter->num_queues; i++, txr++)
3999 rv += txr->br->br_drops;
4001 case IFCOUNTER_IERRORS:
4002 return (adapter->ierrors);
4004 return (if_get_counter_default(ifp, cnt));
4009 /** ixgbe_sysctl_tdh_handler - Handler function
4010 * Retrieves the TDH value from the hardware
4013 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4017 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4020 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4021 error = sysctl_handle_int(oidp, &val, 0, req);
4022 if (error || !req->newptr)
4027 /** ixgbe_sysctl_tdt_handler - Handler function
4028 * Retrieves the TDT value from the hardware
4031 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4035 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4038 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4039 error = sysctl_handle_int(oidp, &val, 0, req);
4040 if (error || !req->newptr)
4045 /** ixgbe_sysctl_rdh_handler - Handler function
4046 * Retrieves the RDH value from the hardware
4049 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4053 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4056 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4057 error = sysctl_handle_int(oidp, &val, 0, req);
4058 if (error || !req->newptr)
4063 /** ixgbe_sysctl_rdt_handler - Handler function
4064 * Retrieves the RDT value from the hardware
4067 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4071 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4074 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4075 error = sysctl_handle_int(oidp, &val, 0, req);
4076 if (error || !req->newptr)
4082 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4085 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4086 unsigned int reg, usec, rate;
4088 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4089 usec = ((reg & 0x0FF8) >> 3);
4091 rate = 500000 / usec;
4094 error = sysctl_handle_int(oidp, &rate, 0, req);
4095 if (error || !req->newptr)
4097 reg &= ~0xfff; /* default, no limitation */
4098 ixgbe_max_interrupt_rate = 0;
4099 if (rate > 0 && rate < 500000) {
4102 ixgbe_max_interrupt_rate = rate;
4103 reg |= ((4000000/rate) & 0xff8 );
4105 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4110 ixgbe_add_device_sysctls(struct adapter *adapter)
4112 device_t dev = adapter->dev;
4113 struct ixgbe_hw *hw = &adapter->hw;
4114 struct sysctl_oid_list *child;
4115 struct sysctl_ctx_list *ctx;
4117 ctx = device_get_sysctl_ctx(dev);
4118 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4120 /* Sysctls for all devices */
4121 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
4122 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4123 ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4125 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
4127 &ixgbe_enable_aim, 1, "Interrupt Moderation");
4129 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4130 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4131 ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
4133 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
4134 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4135 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
4137 /* for X550 devices */
4138 if (hw->mac.type >= ixgbe_mac_X550)
4139 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4140 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4141 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
4143 /* for X550T and X550EM backplane devices */
4144 if (hw->mac.ops.setup_eee) {
4145 struct sysctl_oid *eee_node;
4146 struct sysctl_oid_list *eee_list;
4148 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4150 "Energy Efficient Ethernet sysctls");
4151 eee_list = SYSCTL_CHILDREN(eee_node);
4153 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4154 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4155 ixgbe_sysctl_eee_enable, "I",
4156 "Enable or Disable EEE");
4158 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4159 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4160 ixgbe_sysctl_eee_negotiated, "I",
4161 "EEE negotiated on link");
4163 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4164 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4165 ixgbe_sysctl_eee_tx_lpi_status, "I",
4166 "Whether or not TX link is in LPI state");
4168 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4169 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4170 ixgbe_sysctl_eee_rx_lpi_status, "I",
4171 "Whether or not RX link is in LPI state");
4174 /* for certain 10GBaseT devices */
4175 if (hw->device_id == IXGBE_DEV_ID_X550T ||
4176 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4177 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4178 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4179 ixgbe_sysctl_wol_enable, "I",
4180 "Enable/Disable Wake on LAN");
4182 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4183 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4184 ixgbe_sysctl_wufc, "I",
4185 "Enable/Disable Wake Up Filters");
4188 /* for X550EM 10GBaseT devices */
4189 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4190 struct sysctl_oid *phy_node;
4191 struct sysctl_oid_list *phy_list;
4193 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4195 "External PHY sysctls");
4196 phy_list = SYSCTL_CHILDREN(phy_node);
4198 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4199 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4200 ixgbe_sysctl_phy_temp, "I",
4201 "Current External PHY Temperature (Celsius)");
4203 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4204 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4205 ixgbe_sysctl_phy_overtemp_occurred, "I",
4206 "External PHY High Temperature Event Occurred");
4211 * Add sysctl variables, one per statistic, to the system.
4214 ixgbe_add_hw_stats(struct adapter *adapter)
4216 device_t dev = adapter->dev;
4218 struct tx_ring *txr = adapter->tx_rings;
4219 struct rx_ring *rxr = adapter->rx_rings;
4221 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4222 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4223 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4224 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4226 struct sysctl_oid *stat_node, *queue_node;
4227 struct sysctl_oid_list *stat_list, *queue_list;
4229 #define QUEUE_NAME_LEN 32
4230 char namebuf[QUEUE_NAME_LEN];
4232 /* Driver Statistics */
4233 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4234 CTLFLAG_RD, &adapter->dropped_pkts,
4235 "Driver dropped packets");
4236 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4237 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4238 "m_defrag() failed");
4239 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4240 CTLFLAG_RD, &adapter->watchdog_events,
4241 "Watchdog timeouts");
4242 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4243 CTLFLAG_RD, &adapter->link_irq,
4244 "Link MSIX IRQ Handled");
4246 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4247 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4248 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4249 CTLFLAG_RD, NULL, "Queue Name");
4250 queue_list = SYSCTL_CHILDREN(queue_node);
4252 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4253 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4254 sizeof(&adapter->queues[i]),
4255 ixgbe_sysctl_interrupt_rate_handler, "IU",
4257 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4258 CTLFLAG_RD, &(adapter->queues[i].irqs),
4259 "irqs on this queue");
4260 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4261 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4262 ixgbe_sysctl_tdh_handler, "IU",
4263 "Transmit Descriptor Head");
4264 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4265 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4266 ixgbe_sysctl_tdt_handler, "IU",
4267 "Transmit Descriptor Tail");
4268 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4269 CTLFLAG_RD, &txr->tso_tx,
4271 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4272 CTLFLAG_RD, &txr->no_tx_dma_setup,
4273 "Driver tx dma failure in xmit");
4274 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4275 CTLFLAG_RD, &txr->no_desc_avail,
4276 "Queue No Descriptor Available");
4277 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4278 CTLFLAG_RD, &txr->total_packets,
4279 "Queue Packets Transmitted");
4280 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4281 CTLFLAG_RD, &txr->br->br_drops,
4282 "Packets dropped in buf_ring");
4285 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4286 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4287 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4288 CTLFLAG_RD, NULL, "Queue Name");
4289 queue_list = SYSCTL_CHILDREN(queue_node);
4291 struct lro_ctrl *lro = &rxr->lro;
4293 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4294 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4295 CTLFLAG_RD, NULL, "Queue Name");
4296 queue_list = SYSCTL_CHILDREN(queue_node);
4298 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4299 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4300 ixgbe_sysctl_rdh_handler, "IU",
4301 "Receive Descriptor Head");
4302 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4303 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4304 ixgbe_sysctl_rdt_handler, "IU",
4305 "Receive Descriptor Tail");
4306 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4307 CTLFLAG_RD, &rxr->rx_packets,
4308 "Queue Packets Received");
4309 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4310 CTLFLAG_RD, &rxr->rx_bytes,
4311 "Queue Bytes Received");
4312 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4313 CTLFLAG_RD, &rxr->rx_copies,
4314 "Copied RX Frames");
4315 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4316 CTLFLAG_RD, &lro->lro_queued, 0,
4318 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4319 CTLFLAG_RD, &lro->lro_flushed, 0,
4323 /* MAC stats get the own sub node */
4325 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4326 CTLFLAG_RD, NULL, "MAC Statistics");
4327 stat_list = SYSCTL_CHILDREN(stat_node);
4329 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4330 CTLFLAG_RD, &stats->crcerrs,
4332 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4333 CTLFLAG_RD, &stats->illerrc,
4334 "Illegal Byte Errors");
4335 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4336 CTLFLAG_RD, &stats->errbc,
4338 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4339 CTLFLAG_RD, &stats->mspdc,
4340 "MAC Short Packets Discarded");
4341 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4342 CTLFLAG_RD, &stats->mlfc,
4343 "MAC Local Faults");
4344 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4345 CTLFLAG_RD, &stats->mrfc,
4346 "MAC Remote Faults");
4347 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4348 CTLFLAG_RD, &stats->rlec,
4349 "Receive Length Errors");
4351 /* Flow Control stats */
4352 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4353 CTLFLAG_RD, &stats->lxontxc,
4354 "Link XON Transmitted");
4355 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4356 CTLFLAG_RD, &stats->lxonrxc,
4357 "Link XON Received");
4358 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4359 CTLFLAG_RD, &stats->lxofftxc,
4360 "Link XOFF Transmitted");
4361 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4362 CTLFLAG_RD, &stats->lxoffrxc,
4363 "Link XOFF Received");
4365 /* Packet Reception Stats */
4366 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4367 CTLFLAG_RD, &stats->tor,
4368 "Total Octets Received");
4369 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4370 CTLFLAG_RD, &stats->gorc,
4371 "Good Octets Received");
4372 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4373 CTLFLAG_RD, &stats->tpr,
4374 "Total Packets Received");
4375 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4376 CTLFLAG_RD, &stats->gprc,
4377 "Good Packets Received");
4378 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4379 CTLFLAG_RD, &stats->mprc,
4380 "Multicast Packets Received");
4381 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4382 CTLFLAG_RD, &stats->bprc,
4383 "Broadcast Packets Received");
4384 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4385 CTLFLAG_RD, &stats->prc64,
4386 "64 byte frames received ");
4387 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4388 CTLFLAG_RD, &stats->prc127,
4389 "65-127 byte frames received");
4390 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4391 CTLFLAG_RD, &stats->prc255,
4392 "128-255 byte frames received");
4393 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4394 CTLFLAG_RD, &stats->prc511,
4395 "256-511 byte frames received");
4396 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4397 CTLFLAG_RD, &stats->prc1023,
4398 "512-1023 byte frames received");
4399 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4400 CTLFLAG_RD, &stats->prc1522,
4401 "1023-1522 byte frames received");
4402 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4403 CTLFLAG_RD, &stats->ruc,
4404 "Receive Undersized");
4405 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4406 CTLFLAG_RD, &stats->rfc,
4407 "Fragmented Packets Received ");
4408 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4409 CTLFLAG_RD, &stats->roc,
4410 "Oversized Packets Received");
4411 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4412 CTLFLAG_RD, &stats->rjc,
4414 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4415 CTLFLAG_RD, &stats->mngprc,
4416 "Management Packets Received");
4417 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4418 CTLFLAG_RD, &stats->mngptc,
4419 "Management Packets Dropped");
4420 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4421 CTLFLAG_RD, &stats->xec,
4424 /* Packet Transmission Stats */
4425 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4426 CTLFLAG_RD, &stats->gotc,
4427 "Good Octets Transmitted");
4428 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4429 CTLFLAG_RD, &stats->tpt,
4430 "Total Packets Transmitted");
4431 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4432 CTLFLAG_RD, &stats->gptc,
4433 "Good Packets Transmitted");
4434 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4435 CTLFLAG_RD, &stats->bptc,
4436 "Broadcast Packets Transmitted");
4437 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4438 CTLFLAG_RD, &stats->mptc,
4439 "Multicast Packets Transmitted");
4440 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4441 CTLFLAG_RD, &stats->mngptc,
4442 "Management Packets Transmitted");
4443 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4444 CTLFLAG_RD, &stats->ptc64,
4445 "64 byte frames transmitted ");
4446 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4447 CTLFLAG_RD, &stats->ptc127,
4448 "65-127 byte frames transmitted");
4449 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4450 CTLFLAG_RD, &stats->ptc255,
4451 "128-255 byte frames transmitted");
4452 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4453 CTLFLAG_RD, &stats->ptc511,
4454 "256-511 byte frames transmitted");
4455 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4456 CTLFLAG_RD, &stats->ptc1023,
4457 "512-1023 byte frames transmitted");
4458 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4459 CTLFLAG_RD, &stats->ptc1522,
4460 "1024-1522 byte frames transmitted");
4464 ** Set flow control using sysctl:
4465 ** Flow control values:
4472 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4475 struct adapter *adapter = (struct adapter *) arg1;
4478 error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4479 if ((error) || (req->newptr == NULL))
4482 /* Don't bother if it's not changed */
4483 if (adapter->fc == last)
4486 switch (adapter->fc) {
4487 case ixgbe_fc_rx_pause:
4488 case ixgbe_fc_tx_pause:
4490 adapter->hw.fc.requested_mode = adapter->fc;
4491 if (adapter->num_queues > 1)
4492 ixgbe_disable_rx_drop(adapter);
4495 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4496 if (adapter->num_queues > 1)
4497 ixgbe_enable_rx_drop(adapter);
4503 /* Don't autoneg if forcing a value */
4504 adapter->hw.fc.disable_fc_autoneg = TRUE;
4505 ixgbe_fc_enable(&adapter->hw);
4510 ** Control advertised link speed:
4512 ** 0x1 - advertise 100 Mb
4513 ** 0x2 - advertise 1G
4514 ** 0x4 - advertise 10G
4517 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4519 int error = 0, requested;
4520 struct adapter *adapter;
4522 struct ixgbe_hw *hw;
4523 ixgbe_link_speed speed = 0;
4525 adapter = (struct adapter *) arg1;
4529 requested = adapter->advertise;
4530 error = sysctl_handle_int(oidp, &requested, 0, req);
4531 if ((error) || (req->newptr == NULL))
4534 /* Checks to validate new value */
4535 if (adapter->advertise == requested) /* no change */
4538 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4539 (hw->phy.multispeed_fiber))) {
4541 "Advertised speed can only be set on copper or "
4542 "multispeed fiber media types.\n");
4546 if (requested < 0x1 || requested > 0x7) {
4548 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4552 if ((requested & 0x1)
4553 && (hw->mac.type != ixgbe_mac_X540)
4554 && (hw->mac.type != ixgbe_mac_X550)) {
4555 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4559 /* Set new value and report new advertised mode */
4560 if (requested & 0x1)
4561 speed |= IXGBE_LINK_SPEED_100_FULL;
4562 if (requested & 0x2)
4563 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4564 if (requested & 0x4)
4565 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4567 hw->mac.autotry_restart = TRUE;
4568 hw->mac.ops.setup_link(hw, speed, TRUE);
4569 adapter->advertise = requested;
4575 * The following two sysctls are for X550 BaseT devices;
4576 * they deal with the external PHY used in them.
4579 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4581 struct adapter *adapter = (struct adapter *) arg1;
4582 struct ixgbe_hw *hw = &adapter->hw;
4585 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4586 device_printf(adapter->dev,
4587 "Device has no supported external thermal sensor.\n");
4591 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4592 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4594 device_printf(adapter->dev,
4595 "Error reading from PHY's current temperature register\n");
4599 /* Shift temp for output */
4602 return (sysctl_handle_int(oidp, NULL, reg, req));
4606 * Reports whether the current PHY temperature is over
4607 * the overtemp threshold.
4608 * - This is reported directly from the PHY
4611 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4613 struct adapter *adapter = (struct adapter *) arg1;
4614 struct ixgbe_hw *hw = &adapter->hw;
4617 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4618 device_printf(adapter->dev,
4619 "Device has no supported external thermal sensor.\n");
4623 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4624 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4626 device_printf(adapter->dev,
4627 "Error reading from PHY's temperature status register\n");
4631 /* Get occurrence bit */
4632 reg = !!(reg & 0x4000);
4633 return (sysctl_handle_int(oidp, 0, reg, req));
4637 ** Thermal Shutdown Trigger (internal MAC)
4638 ** - Set this to 1 to cause an overtemp event to occur
4641 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4643 struct adapter *adapter = (struct adapter *) arg1;
4644 struct ixgbe_hw *hw = &adapter->hw;
4645 int error, fire = 0;
4647 error = sysctl_handle_int(oidp, &fire, 0, req);
4648 if ((error) || (req->newptr == NULL))
4652 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4653 reg |= IXGBE_EICR_TS;
4654 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4661 ** Manage DMA Coalescing.
4663 ** 0/1 - off / on (use default value of 1000)
4665 ** Legal timer values are:
4666 ** 50,100,250,500,1000,2000,5000,10000
4668 ** Turning off interrupt moderation will also turn this off.
4671 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4673 struct adapter *adapter = (struct adapter *) arg1;
4674 struct ixgbe_hw *hw = &adapter->hw;
4675 struct ifnet *ifp = adapter->ifp;
4679 oldval = adapter->dmac;
4680 error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4681 if ((error) || (req->newptr == NULL))
4684 switch (hw->mac.type) {
4685 case ixgbe_mac_X550:
4686 case ixgbe_mac_X550EM_x:
4689 device_printf(adapter->dev,
4690 "DMA Coalescing is only supported on X550 devices\n");
4694 switch (adapter->dmac) {
4698 case 1: /* Enable and use default */
4699 adapter->dmac = 1000;
4709 /* Legal values - allow */
4712 /* Do nothing, illegal value */
4713 adapter->dmac = oldval;
4717 /* Re-initialize hardware if it's already running */
4718 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4719 ixgbe_init(adapter);
4725 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4731 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4733 struct adapter *adapter = (struct adapter *) arg1;
4734 struct ixgbe_hw *hw = &adapter->hw;
4735 int new_wol_enabled;
4738 new_wol_enabled = hw->wol_enabled;
4739 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4740 if ((error) || (req->newptr == NULL))
4742 if (new_wol_enabled == hw->wol_enabled)
4745 if (new_wol_enabled > 0 && !adapter->wol_support)
4748 hw->wol_enabled = !!(new_wol_enabled);
4754 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4755 * if supported by the adapter.
4761 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4763 struct adapter *adapter = (struct adapter *) arg1;
4764 struct ixgbe_hw *hw = &adapter->hw;
4765 struct ifnet *ifp = adapter->ifp;
4766 int new_eee_enabled, error = 0;
4768 new_eee_enabled = adapter->eee_enabled;
4769 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4770 if ((error) || (req->newptr == NULL))
4772 if (new_eee_enabled == adapter->eee_enabled)
4775 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
4778 adapter->eee_enabled = !!(new_eee_enabled);
4780 /* Re-initialize hardware if it's already running */
4781 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4782 ixgbe_init(adapter);
4788 * Read-only sysctl indicating whether EEE support was negotiated
4792 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4794 struct adapter *adapter = (struct adapter *) arg1;
4795 struct ixgbe_hw *hw = &adapter->hw;
4798 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4800 return (sysctl_handle_int(oidp, 0, status, req));
4804 * Read-only sysctl indicating whether RX Link is in LPI state.
4807 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4809 struct adapter *adapter = (struct adapter *) arg1;
4810 struct ixgbe_hw *hw = &adapter->hw;
4813 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4814 IXGBE_EEE_RX_LPI_STATUS);
4816 return (sysctl_handle_int(oidp, 0, status, req));
4820 * Read-only sysctl indicating whether TX Link is in LPI state.
4823 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4825 struct adapter *adapter = (struct adapter *) arg1;
4826 struct ixgbe_hw *hw = &adapter->hw;
4829 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4830 IXGBE_EEE_TX_LPI_STATUS);
4832 return (sysctl_handle_int(oidp, 0, status, req));
4836 * Sysctl to enable/disable the types of packets that the
4837 * adapter will wake up on upon receipt.
4838 * WUFC - Wake Up Filter Control
4840 * 0x1 - Link Status Change
4841 * 0x2 - Magic Packet
4842 * 0x4 - Direct Exact
4843 * 0x8 - Directed Multicast
4845 * 0x20 - ARP/IPv4 Request Packet
4846 * 0x40 - Direct IPv4 Packet
4847 * 0x80 - Direct IPv6 Packet
4849 * Setting another flag will cause the sysctl to return an
4853 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4855 struct adapter *adapter = (struct adapter *) arg1;
4859 new_wufc = adapter->wufc;
4861 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4862 if ((error) || (req->newptr == NULL))
4864 if (new_wufc == adapter->wufc)
4867 if (new_wufc & 0xffffff00)
4871 new_wufc |= (0xffffff & adapter->wufc);
4872 adapter->wufc = new_wufc;
4879 ** Enable the hardware to drop packets when the buffer is
4880 ** full. This is useful when multiqueue,so that no single
4881 ** queue being full stalls the entire RX engine. We only
4882 ** enable this when Multiqueue AND when Flow Control is
4886 ixgbe_enable_rx_drop(struct adapter *adapter)
4888 struct ixgbe_hw *hw = &adapter->hw;
4890 for (int i = 0; i < adapter->num_queues; i++) {
4891 struct rx_ring *rxr = &adapter->rx_rings[i];
4892 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4893 srrctl |= IXGBE_SRRCTL_DROP_EN;
4894 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4897 /* enable drop for each vf */
4898 for (int i = 0; i < adapter->num_vfs; i++) {
4899 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4900 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4907 ixgbe_disable_rx_drop(struct adapter *adapter)
4909 struct ixgbe_hw *hw = &adapter->hw;
4911 for (int i = 0; i < adapter->num_queues; i++) {
4912 struct rx_ring *rxr = &adapter->rx_rings[i];
4913 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4914 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4915 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4918 /* disable drop for each vf */
4919 for (int i = 0; i < adapter->num_vfs; i++) {
4920 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4921 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4927 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4931 switch (adapter->hw.mac.type) {
4932 case ixgbe_mac_82598EB:
4933 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4934 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4936 case ixgbe_mac_82599EB:
4937 case ixgbe_mac_X540:
4938 case ixgbe_mac_X550:
4939 case ixgbe_mac_X550EM_x:
4940 mask = (queues & 0xFFFFFFFF);
4941 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4942 mask = (queues >> 32);
4943 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
4953 ** Support functions for SRIOV/VF management
4957 ixgbe_ping_all_vfs(struct adapter *adapter)
4959 struct ixgbe_vf *vf;
4961 for (int i = 0; i < adapter->num_vfs; i++) {
4962 vf = &adapter->vfs[i];
4963 if (vf->flags & IXGBE_VF_ACTIVE)
4964 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
4970 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
4973 struct ixgbe_hw *hw;
4974 uint32_t vmolr, vmvir;
4980 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
4982 /* Do not receive packets that pass inexact filters. */
4983 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
4985 /* Disable Multicast Promicuous Mode. */
4986 vmolr &= ~IXGBE_VMOLR_MPE;
4988 /* Accept broadcasts. */
4989 vmolr |= IXGBE_VMOLR_BAM;
4992 /* Accept non-vlan tagged traffic. */
4993 //vmolr |= IXGBE_VMOLR_AUPE;
4995 /* Allow VM to tag outgoing traffic; no default tag. */
4998 /* Require vlan-tagged traffic. */
4999 vmolr &= ~IXGBE_VMOLR_AUPE;
5001 /* Tag all traffic with provided vlan tag. */
5002 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
5004 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
5005 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
5010 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
5014 * Frame size compatibility between PF and VF is only a problem on
5015 * 82599-based cards. X540 and later support any combination of jumbo
5016 * frames on PFs and VFs.
5018 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
5021 switch (vf->api_ver) {
5022 case IXGBE_API_VER_1_0:
5023 case IXGBE_API_VER_UNKNOWN:
5025 * On legacy (1.0 and older) VF versions, we don't support jumbo
5026 * frames on either the PF or the VF.
5028 if (adapter->max_frame_size > ETHER_MAX_LEN ||
5029 vf->max_frame_size > ETHER_MAX_LEN)
5035 case IXGBE_API_VER_1_1:
5038 * 1.1 or later VF versions always work if they aren't using
5041 if (vf->max_frame_size <= ETHER_MAX_LEN)
5045 * Jumbo frames only work with VFs if the PF is also using jumbo
5048 if (adapter->max_frame_size <= ETHER_MAX_LEN)
5058 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
5060 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
5062 // XXX clear multicast addresses
5064 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
5066 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5071 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
5073 struct ixgbe_hw *hw;
5074 uint32_t vf_index, vfte;
5078 vf_index = IXGBE_VF_INDEX(vf->pool);
5079 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
5080 vfte |= IXGBE_VF_BIT(vf->pool);
5081 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
5086 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
5088 struct ixgbe_hw *hw;
5089 uint32_t vf_index, vfre;
5093 vf_index = IXGBE_VF_INDEX(vf->pool);
5094 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
5095 if (ixgbe_vf_frame_size_compatible(adapter, vf))
5096 vfre |= IXGBE_VF_BIT(vf->pool);
5098 vfre &= ~IXGBE_VF_BIT(vf->pool);
5099 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
5104 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5106 struct ixgbe_hw *hw;
5108 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
5112 ixgbe_process_vf_reset(adapter, vf);
5114 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5115 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5116 vf->ether_addr, vf->pool, TRUE);
5117 ack = IXGBE_VT_MSGTYPE_ACK;
5119 ack = IXGBE_VT_MSGTYPE_NACK;
5121 ixgbe_vf_enable_transmit(adapter, vf);
5122 ixgbe_vf_enable_receive(adapter, vf);
5124 vf->flags |= IXGBE_VF_CTS;
5126 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
5127 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
5128 resp[3] = hw->mac.mc_filter_type;
5129 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
5134 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5138 mac = (uint8_t*)&msg[1];
5140 /* Check that the VF has permission to change the MAC address. */
5141 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
5142 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5146 if (ixgbe_validate_mac_addr(mac) != 0) {
5147 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5151 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5153 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
5156 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5161 ** VF multicast addresses are set by using the appropriate bit in
5162 ** 1 of 128 32 bit addresses (4096 possible).
5165 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
5167 u16 *list = (u16*)&msg[1];
5169 u32 vmolr, vec_bit, vec_reg, mta_reg;
5171 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
5172 entries = min(entries, IXGBE_MAX_VF_MC);
5174 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
5176 vf->num_mc_hashes = entries;
5178 /* Set the appropriate MTA bit */
5179 for (int i = 0; i < entries; i++) {
5180 vf->mc_hash[i] = list[i];
5181 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
5182 vec_bit = vf->mc_hash[i] & 0x1F;
5183 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
5184 mta_reg |= (1 << vec_bit);
5185 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
5188 vmolr |= IXGBE_VMOLR_ROMPE;
5189 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
5190 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5196 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5198 struct ixgbe_hw *hw;
5203 enable = IXGBE_VT_MSGINFO(msg[0]);
5204 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
5206 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
5207 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5211 /* It is illegal to enable vlan tag 0. */
5212 if (tag == 0 && enable != 0){
5213 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5217 ixgbe_set_vfta(hw, tag, vf->pool, enable);
5218 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5223 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5225 struct ixgbe_hw *hw;
5226 uint32_t vf_max_size, pf_max_size, mhadd;
5229 vf_max_size = msg[1];
5231 if (vf_max_size < ETHER_CRC_LEN) {
5232 /* We intentionally ACK invalid LPE requests. */
5233 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5237 vf_max_size -= ETHER_CRC_LEN;
5239 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5240 /* We intentionally ACK invalid LPE requests. */
5241 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5245 vf->max_frame_size = vf_max_size;
5246 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5249 * We might have to disable reception to this VF if the frame size is
5250 * not compatible with the config on the PF.
5252 ixgbe_vf_enable_receive(adapter, vf);
5254 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5255 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
5257 if (pf_max_size < adapter->max_frame_size) {
5258 mhadd &= ~IXGBE_MHADD_MFS_MASK;
5259 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5260 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
5263 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5268 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
5271 //XXX implement this
5272 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5277 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
5282 case IXGBE_API_VER_1_0:
5283 case IXGBE_API_VER_1_1:
5284 vf->api_ver = msg[1];
5285 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5288 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5289 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5296 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
5299 struct ixgbe_hw *hw;
5300 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5305 /* GET_QUEUES is not supported on pre-1.1 APIs. */
5307 case IXGBE_API_VER_1_0:
5308 case IXGBE_API_VER_UNKNOWN:
5309 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5313 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
5314 IXGBE_VT_MSGTYPE_CTS;
5316 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
5317 resp[IXGBE_VF_TX_QUEUES] = num_queues;
5318 resp[IXGBE_VF_RX_QUEUES] = num_queues;
5319 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5320 resp[IXGBE_VF_DEF_QUEUE] = 0;
5322 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5327 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
5329 struct ixgbe_hw *hw;
5330 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5335 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
5340 CTR3(KTR_MALLOC, "%s: received msg %x from %d",
5341 adapter->ifp->if_xname, msg[0], vf->pool);
5342 if (msg[0] == IXGBE_VF_RESET) {
5343 ixgbe_vf_reset_msg(adapter, vf, msg);
5347 if (!(vf->flags & IXGBE_VF_CTS)) {
5348 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5352 switch (msg[0] & IXGBE_VT_MSG_MASK) {
5353 case IXGBE_VF_SET_MAC_ADDR:
5354 ixgbe_vf_set_mac(adapter, vf, msg);
5356 case IXGBE_VF_SET_MULTICAST:
5357 ixgbe_vf_set_mc_addr(adapter, vf, msg);
5359 case IXGBE_VF_SET_VLAN:
5360 ixgbe_vf_set_vlan(adapter, vf, msg);
5362 case IXGBE_VF_SET_LPE:
5363 ixgbe_vf_set_lpe(adapter, vf, msg);
5365 case IXGBE_VF_SET_MACVLAN:
5366 ixgbe_vf_set_macvlan(adapter, vf, msg);
5368 case IXGBE_VF_API_NEGOTIATE:
5369 ixgbe_vf_api_negotiate(adapter, vf, msg);
5371 case IXGBE_VF_GET_QUEUES:
5372 ixgbe_vf_get_queues(adapter, vf, msg);
5375 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5381 * Tasklet for handling VF -> PF mailbox messages.
5384 ixgbe_handle_mbx(void *context, int pending)
5386 struct adapter *adapter;
5387 struct ixgbe_hw *hw;
5388 struct ixgbe_vf *vf;
5394 IXGBE_CORE_LOCK(adapter);
5395 for (i = 0; i < adapter->num_vfs; i++) {
5396 vf = &adapter->vfs[i];
5398 if (vf->flags & IXGBE_VF_ACTIVE) {
5399 if (ixgbe_check_for_rst(hw, vf->pool) == 0)
5400 ixgbe_process_vf_reset(adapter, vf);
5402 if (ixgbe_check_for_msg(hw, vf->pool) == 0)
5403 ixgbe_process_vf_msg(adapter, vf);
5405 if (ixgbe_check_for_ack(hw, vf->pool) == 0)
5406 ixgbe_process_vf_ack(adapter, vf);
5409 IXGBE_CORE_UNLOCK(adapter);
5414 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
5416 struct adapter *adapter;
5417 enum ixgbe_iov_mode mode;
5419 adapter = device_get_softc(dev);
5420 adapter->num_vfs = num_vfs;
5421 mode = ixgbe_get_iov_mode(adapter);
5423 if (num_vfs > ixgbe_max_vfs(mode)) {
5424 adapter->num_vfs = 0;
5428 IXGBE_CORE_LOCK(adapter);
5430 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
5433 if (adapter->vfs == NULL) {
5434 adapter->num_vfs = 0;
5435 IXGBE_CORE_UNLOCK(adapter);
5439 ixgbe_init_locked(adapter);
5441 IXGBE_CORE_UNLOCK(adapter);
5448 ixgbe_uninit_iov(device_t dev)
5450 struct ixgbe_hw *hw;
5451 struct adapter *adapter;
5452 uint32_t pf_reg, vf_reg;
5454 adapter = device_get_softc(dev);
5457 IXGBE_CORE_LOCK(adapter);
5459 /* Enable rx/tx for the PF and disable it for all VFs. */
5460 pf_reg = IXGBE_VF_INDEX(adapter->pool);
5461 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
5462 IXGBE_VF_BIT(adapter->pool));
5463 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
5464 IXGBE_VF_BIT(adapter->pool));
5470 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
5471 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
5473 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
5475 free(adapter->vfs, M_IXGBE);
5476 adapter->vfs = NULL;
5477 adapter->num_vfs = 0;
5479 IXGBE_CORE_UNLOCK(adapter);
5484 ixgbe_initialize_iov(struct adapter *adapter)
5486 struct ixgbe_hw *hw = &adapter->hw;
5487 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
5488 enum ixgbe_iov_mode mode;
5491 mode = ixgbe_get_iov_mode(adapter);
5492 if (mode == IXGBE_NO_VM)
5495 IXGBE_CORE_LOCK_ASSERT(adapter);
5497 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
5498 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
5502 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
5505 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
5508 panic("Unexpected SR-IOV mode %d", mode);
5510 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
5512 mtqc = IXGBE_MTQC_VT_ENA;
5515 mtqc |= IXGBE_MTQC_64VF;
5518 mtqc |= IXGBE_MTQC_32VF;
5521 panic("Unexpected SR-IOV mode %d", mode);
5523 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
5526 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
5527 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
5528 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
5531 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
5534 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
5537 panic("Unexpected SR-IOV mode %d", mode);
5539 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
5542 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5543 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
5546 gpie |= IXGBE_GPIE_VTMODE_64;
5549 gpie |= IXGBE_GPIE_VTMODE_32;
5552 panic("Unexpected SR-IOV mode %d", mode);
5554 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5556 /* Enable rx/tx for the PF. */
5557 vf_reg = IXGBE_VF_INDEX(adapter->pool);
5558 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
5559 IXGBE_VF_BIT(adapter->pool));
5560 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
5561 IXGBE_VF_BIT(adapter->pool));
5563 /* Allow VM-to-VM communication. */
5564 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
5566 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
5567 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
5568 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
5570 for (i = 0; i < adapter->num_vfs; i++)
5571 ixgbe_init_vf(adapter, &adapter->vfs[i]);
5576 ** Check the max frame setting of all active VF's
5579 ixgbe_recalculate_max_frame(struct adapter *adapter)
5581 struct ixgbe_vf *vf;
5583 IXGBE_CORE_LOCK_ASSERT(adapter);
5585 for (int i = 0; i < adapter->num_vfs; i++) {
5586 vf = &adapter->vfs[i];
5587 if (vf->flags & IXGBE_VF_ACTIVE)
5588 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5594 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
5596 struct ixgbe_hw *hw;
5597 uint32_t vf_index, pfmbimr;
5599 IXGBE_CORE_LOCK_ASSERT(adapter);
5603 if (!(vf->flags & IXGBE_VF_ACTIVE))
5606 vf_index = IXGBE_VF_INDEX(vf->pool);
5607 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
5608 pfmbimr |= IXGBE_VF_BIT(vf->pool);
5609 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
5611 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
5613 // XXX multicast addresses
5615 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5616 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5617 vf->ether_addr, vf->pool, TRUE);
5620 ixgbe_vf_enable_transmit(adapter, vf);
5621 ixgbe_vf_enable_receive(adapter, vf);
5623 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5627 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
5629 struct adapter *adapter;
5630 struct ixgbe_vf *vf;
5633 adapter = device_get_softc(dev);
5635 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
5636 vfnum, adapter->num_vfs));
5638 IXGBE_CORE_LOCK(adapter);
5639 vf = &adapter->vfs[vfnum];
5642 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
5643 vf->rar_index = vfnum + 1;
5644 vf->default_vlan = 0;
5645 vf->max_frame_size = ETHER_MAX_LEN;
5646 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5648 if (nvlist_exists_binary(config, "mac-addr")) {
5649 mac = nvlist_get_binary(config, "mac-addr", NULL);
5650 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5651 if (nvlist_get_bool(config, "allow-set-mac"))
5652 vf->flags |= IXGBE_VF_CAP_MAC;
5655 * If the administrator has not specified a MAC address then
5656 * we must allow the VF to choose one.
5658 vf->flags |= IXGBE_VF_CAP_MAC;
5660 vf->flags = IXGBE_VF_ACTIVE;
5662 ixgbe_init_vf(adapter, vf);
5663 IXGBE_CORE_UNLOCK(adapter);
5667 #endif /* PCI_IOV */