1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
45 #include <net/rss_config.h>
46 #include <netinet/in_rss.h>
49 /*********************************************************************
50 * Set this to one to display debug statistics
51 *********************************************************************/
52 int ixgbe_display_debug_stats = 0;
54 /*********************************************************************
56 *********************************************************************/
57 char ixgbe_driver_version[] = "3.1.0";
59 /*********************************************************************
62 * Used by probe to select devices to load on
63 * Last field stores an index into ixgbe_strings
64 * Last entry must be all 0s
66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
67 *********************************************************************/
69 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
101 /* required last entry */
105 /*********************************************************************
106 * Table of branding strings
107 *********************************************************************/
109 static char *ixgbe_strings[] = {
110 "Intel(R) PRO/10GbE PCI-Express Network Driver"
113 /*********************************************************************
114 * Function prototypes
115 *********************************************************************/
116 static int ixgbe_probe(device_t);
117 static int ixgbe_attach(device_t);
118 static int ixgbe_detach(device_t);
119 static int ixgbe_shutdown(device_t);
120 static int ixgbe_suspend(device_t);
121 static int ixgbe_resume(device_t);
122 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
123 static void ixgbe_init(void *);
124 static void ixgbe_init_locked(struct adapter *);
125 static void ixgbe_stop(void *);
126 #if __FreeBSD_version >= 1100036
127 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
129 static void ixgbe_add_media_types(struct adapter *);
130 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
131 static int ixgbe_media_change(struct ifnet *);
132 static void ixgbe_identify_hardware(struct adapter *);
133 static int ixgbe_allocate_pci_resources(struct adapter *);
134 static void ixgbe_get_slot_info(struct ixgbe_hw *);
135 static int ixgbe_allocate_msix(struct adapter *);
136 static int ixgbe_allocate_legacy(struct adapter *);
137 static int ixgbe_setup_msix(struct adapter *);
138 static void ixgbe_free_pci_resources(struct adapter *);
139 static void ixgbe_local_timer(void *);
140 static int ixgbe_setup_interface(device_t, struct adapter *);
141 static void ixgbe_config_gpie(struct adapter *);
142 static void ixgbe_config_dmac(struct adapter *);
143 static void ixgbe_config_delay_values(struct adapter *);
144 static void ixgbe_config_link(struct adapter *);
145 static void ixgbe_check_eee_support(struct adapter *);
146 static void ixgbe_check_wol_support(struct adapter *);
147 static int ixgbe_setup_low_power_mode(struct adapter *);
148 static void ixgbe_rearm_queues(struct adapter *, u64);
150 static void ixgbe_initialize_transmit_units(struct adapter *);
151 static void ixgbe_initialize_receive_units(struct adapter *);
152 static void ixgbe_enable_rx_drop(struct adapter *);
153 static void ixgbe_disable_rx_drop(struct adapter *);
155 static void ixgbe_enable_intr(struct adapter *);
156 static void ixgbe_disable_intr(struct adapter *);
157 static void ixgbe_update_stats_counters(struct adapter *);
158 static void ixgbe_set_promisc(struct adapter *);
159 static void ixgbe_set_multi(struct adapter *);
160 static void ixgbe_update_link_status(struct adapter *);
161 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
162 static void ixgbe_configure_ivars(struct adapter *);
163 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
165 static void ixgbe_setup_vlan_hw_support(struct adapter *);
166 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
167 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
169 static void ixgbe_add_device_sysctls(struct adapter *);
170 static void ixgbe_add_hw_stats(struct adapter *);
172 /* Sysctl handlers */
173 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
174 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
175 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
176 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
177 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
178 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
179 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
180 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
181 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
182 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
183 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
184 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
186 /* Support for pluggable optic modules */
187 static bool ixgbe_sfp_probe(struct adapter *);
188 static void ixgbe_setup_optics(struct adapter *);
190 /* Legacy (single vector interrupt handler */
191 static void ixgbe_legacy_irq(void *);
193 /* The MSI/X Interrupt handlers */
194 static void ixgbe_msix_que(void *);
195 static void ixgbe_msix_link(void *);
197 /* Deferred interrupt tasklets */
198 static void ixgbe_handle_que(void *, int);
199 static void ixgbe_handle_link(void *, int);
200 static void ixgbe_handle_msf(void *, int);
201 static void ixgbe_handle_mod(void *, int);
202 static void ixgbe_handle_phy(void *, int);
205 static void ixgbe_reinit_fdir(void *, int);
209 static void ixgbe_ping_all_vfs(struct adapter *);
210 static void ixgbe_handle_mbx(void *, int);
211 static int ixgbe_init_iov(device_t, u16, const nvlist_t *);
212 static void ixgbe_uninit_iov(device_t);
213 static int ixgbe_add_vf(device_t, u16, const nvlist_t *);
214 static void ixgbe_initialize_iov(struct adapter *);
215 static void ixgbe_recalculate_max_frame(struct adapter *);
216 static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
220 /*********************************************************************
221 * FreeBSD Device Interface Entry Points
222 *********************************************************************/
224 static device_method_t ix_methods[] = {
225 /* Device interface */
226 DEVMETHOD(device_probe, ixgbe_probe),
227 DEVMETHOD(device_attach, ixgbe_attach),
228 DEVMETHOD(device_detach, ixgbe_detach),
229 DEVMETHOD(device_shutdown, ixgbe_shutdown),
230 DEVMETHOD(device_suspend, ixgbe_suspend),
231 DEVMETHOD(device_resume, ixgbe_resume),
233 DEVMETHOD(pci_iov_init, ixgbe_init_iov),
234 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
235 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
240 static driver_t ix_driver = {
241 "ix", ix_methods, sizeof(struct adapter),
244 devclass_t ix_devclass;
245 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
247 MODULE_DEPEND(ix, pci, 1, 1, 1);
248 MODULE_DEPEND(ix, ether, 1, 1, 1);
251 ** TUNEABLE PARAMETERS:
254 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
255 "IXGBE driver parameters");
258 ** AIM: Adaptive Interrupt Moderation
259 ** which means that the interrupt rate
260 ** is varied over time based on the
261 ** traffic for that interrupt vector
263 static int ixgbe_enable_aim = TRUE;
264 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
265 "Enable adaptive interrupt moderation");
267 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
268 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
269 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
271 /* How many packets rxeof tries to clean at a time */
272 static int ixgbe_rx_process_limit = 256;
273 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
274 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
275 &ixgbe_rx_process_limit, 0,
276 "Maximum number of received packets to process at a time,"
277 "-1 means unlimited");
279 /* How many packets txeof tries to clean at a time */
280 static int ixgbe_tx_process_limit = 256;
281 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
282 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
283 &ixgbe_tx_process_limit, 0,
284 "Maximum number of sent packets to process at a time,"
285 "-1 means unlimited");
288 ** Smart speed setting, default to on
289 ** this only works as a compile option
290 ** right now as its during attach, set
291 ** this to 'ixgbe_smart_speed_off' to
294 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
297 * MSIX should be the default for best performance,
298 * but this allows it to be forced off for testing.
300 static int ixgbe_enable_msix = 1;
301 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
302 "Enable MSI-X interrupts");
305 * Number of Queues, can be set to 0,
306 * it then autoconfigures based on the
307 * number of cpus with a max of 8. This
308 * can be overriden manually here.
310 static int ixgbe_num_queues = 0;
311 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
312 "Number of queues to configure, 0 indicates autoconfigure");
315 ** Number of TX descriptors per ring,
316 ** setting higher than RX as this seems
317 ** the better performing choice.
319 static int ixgbe_txd = PERFORM_TXD;
320 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
321 "Number of transmit descriptors per queue");
323 /* Number of RX descriptors per ring */
324 static int ixgbe_rxd = PERFORM_RXD;
325 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
326 "Number of receive descriptors per queue");
329 ** Defining this on will allow the use
330 ** of unsupported SFP+ modules, note that
331 ** doing so you are on your own :)
333 static int allow_unsupported_sfp = FALSE;
334 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
336 /* Keep running tab on them for sanity check */
337 static int ixgbe_total_ports;
341 ** Flow Director actually 'steals'
342 ** part of the packet buffer as its
343 ** filter pool, this variable controls
345 ** 0 = 64K, 1 = 128K, 2 = 256K
347 static int fdir_pballoc = 1;
352 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
353 * be a reference on how to implement netmap support in a driver.
354 * Additional comments are in ixgbe_netmap.h .
356 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
357 * that extend the standard driver.
359 #include <dev/netmap/ixgbe_netmap.h>
360 #endif /* DEV_NETMAP */
362 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
364 /*********************************************************************
365 * Device identification routine
367 * ixgbe_probe determines if the driver should be loaded on
368 * adapter based on PCI vendor/device id of the adapter.
370 * return BUS_PROBE_DEFAULT on success, positive on failure
371 *********************************************************************/
374 ixgbe_probe(device_t dev)
376 ixgbe_vendor_info_t *ent;
378 u16 pci_vendor_id = 0;
379 u16 pci_device_id = 0;
380 u16 pci_subvendor_id = 0;
381 u16 pci_subdevice_id = 0;
382 char adapter_name[256];
384 INIT_DEBUGOUT("ixgbe_probe: begin");
386 pci_vendor_id = pci_get_vendor(dev);
387 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
390 pci_device_id = pci_get_device(dev);
391 pci_subvendor_id = pci_get_subvendor(dev);
392 pci_subdevice_id = pci_get_subdevice(dev);
394 ent = ixgbe_vendor_info_array;
395 while (ent->vendor_id != 0) {
396 if ((pci_vendor_id == ent->vendor_id) &&
397 (pci_device_id == ent->device_id) &&
399 ((pci_subvendor_id == ent->subvendor_id) ||
400 (ent->subvendor_id == 0)) &&
402 ((pci_subdevice_id == ent->subdevice_id) ||
403 (ent->subdevice_id == 0))) {
404 sprintf(adapter_name, "%s, Version - %s",
405 ixgbe_strings[ent->index],
406 ixgbe_driver_version);
407 device_set_desc_copy(dev, adapter_name);
409 return (BUS_PROBE_DEFAULT);
416 /*********************************************************************
417 * Device initialization routine
419 * The attach entry point is called when the driver is being loaded.
420 * This routine identifies the type of hardware, allocates all resources
421 * and initializes the hardware.
423 * return 0 on success, positive on failure
424 *********************************************************************/
427 ixgbe_attach(device_t dev)
429 struct adapter *adapter;
435 INIT_DEBUGOUT("ixgbe_attach: begin");
437 /* Allocate, clear, and link in our adapter structure */
438 adapter = device_get_softc(dev);
439 adapter->dev = adapter->osdep.dev = dev;
443 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
445 /* Set up the timer callout */
446 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
448 /* Determine hardware revision */
449 ixgbe_identify_hardware(adapter);
451 /* Do base PCI setup - map BAR0 */
452 if (ixgbe_allocate_pci_resources(adapter)) {
453 device_printf(dev, "Allocation of PCI resources failed\n");
458 /* Do descriptor calc and sanity checks */
459 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
460 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
461 device_printf(dev, "TXD config issue, using default!\n");
462 adapter->num_tx_desc = DEFAULT_TXD;
464 adapter->num_tx_desc = ixgbe_txd;
467 ** With many RX rings it is easy to exceed the
468 ** system mbuf allocation. Tuning nmbclusters
469 ** can alleviate this.
471 if (nmbclusters > 0) {
473 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
474 if (s > nmbclusters) {
475 device_printf(dev, "RX Descriptors exceed "
476 "system mbuf max, using default instead!\n");
477 ixgbe_rxd = DEFAULT_RXD;
481 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
482 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
483 device_printf(dev, "RXD config issue, using default!\n");
484 adapter->num_rx_desc = DEFAULT_RXD;
486 adapter->num_rx_desc = ixgbe_rxd;
488 /* Allocate our TX/RX Queues */
489 if (ixgbe_allocate_queues(adapter)) {
494 /* Allocate multicast array memory. */
495 adapter->mta = malloc(sizeof(*adapter->mta) *
496 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
497 if (adapter->mta == NULL) {
498 device_printf(dev, "Can not allocate multicast setup array\n");
503 /* Initialize the shared code */
504 hw->allow_unsupported_sfp = allow_unsupported_sfp;
505 error = ixgbe_init_shared_code(hw);
506 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
508 ** No optics in this port, set up
509 ** so the timer routine will probe
510 ** for later insertion.
512 adapter->sfp_probe = TRUE;
514 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
515 device_printf(dev,"Unsupported SFP+ module detected!\n");
519 device_printf(dev,"Unable to initialize the shared code\n");
524 /* Make sure we have a good EEPROM before we read from it */
525 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
526 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
531 error = ixgbe_init_hw(hw);
533 case IXGBE_ERR_EEPROM_VERSION:
534 device_printf(dev, "This device is a pre-production adapter/"
535 "LOM. Please be aware there may be issues associated "
536 "with your hardware.\n If you are experiencing problems "
537 "please contact your Intel or hardware representative "
538 "who provided you with this hardware.\n");
540 case IXGBE_ERR_SFP_NOT_SUPPORTED:
541 device_printf(dev,"Unsupported SFP+ Module\n");
544 case IXGBE_ERR_SFP_NOT_PRESENT:
545 device_printf(dev,"No SFP+ Module found\n");
551 /* Detect and set physical type */
552 ixgbe_setup_optics(adapter);
554 if ((adapter->msix > 1) && (ixgbe_enable_msix))
555 error = ixgbe_allocate_msix(adapter);
557 error = ixgbe_allocate_legacy(adapter);
561 /* Setup OS specific network interface */
562 if (ixgbe_setup_interface(dev, adapter) != 0)
565 /* Initialize statistics */
566 ixgbe_update_stats_counters(adapter);
568 /* Register for VLAN events */
569 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
570 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
571 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
572 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
574 /* Check PCIE slot type/speed/width */
575 ixgbe_get_slot_info(hw);
578 /* Set an initial default flow control value */
579 adapter->fc = ixgbe_fc_full;
582 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
583 nvlist_t *pf_schema, *vf_schema;
585 hw->mbx.ops.init_params(hw);
586 pf_schema = pci_iov_schema_alloc_node();
587 vf_schema = pci_iov_schema_alloc_node();
588 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
589 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
590 IOV_SCHEMA_HASDEFAULT, TRUE);
591 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
592 IOV_SCHEMA_HASDEFAULT, FALSE);
593 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
594 IOV_SCHEMA_HASDEFAULT, FALSE);
595 error = pci_iov_attach(dev, pf_schema, vf_schema);
598 "Error %d setting up SR-IOV\n", error);
603 /* Check for certain supported features */
604 ixgbe_check_wol_support(adapter);
605 ixgbe_check_eee_support(adapter);
608 ixgbe_add_device_sysctls(adapter);
609 ixgbe_add_hw_stats(adapter);
611 /* let hardware know driver is loaded */
612 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
613 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
614 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
617 ixgbe_netmap_attach(adapter);
618 #endif /* DEV_NETMAP */
619 INIT_DEBUGOUT("ixgbe_attach: end");
623 ixgbe_free_transmit_structures(adapter);
624 ixgbe_free_receive_structures(adapter);
626 if (adapter->ifp != NULL)
627 if_free(adapter->ifp);
628 ixgbe_free_pci_resources(adapter);
629 free(adapter->mta, M_DEVBUF);
633 /*********************************************************************
634 * Device removal routine
636 * The detach entry point is called when the driver is being removed.
637 * This routine stops the adapter and deallocates all the resources
638 * that were allocated for driver operation.
640 * return 0 on success, positive on failure
641 *********************************************************************/
644 ixgbe_detach(device_t dev)
646 struct adapter *adapter = device_get_softc(dev);
647 struct ix_queue *que = adapter->queues;
648 struct tx_ring *txr = adapter->tx_rings;
651 INIT_DEBUGOUT("ixgbe_detach: begin");
653 /* Make sure VLANS are not using driver */
654 if (adapter->ifp->if_vlantrunk != NULL) {
655 device_printf(dev,"Vlan in use, detach first\n");
660 if (pci_iov_detach(dev) != 0) {
661 device_printf(dev, "SR-IOV in use; detach first.\n");
666 /* Stop the adapter */
667 IXGBE_CORE_LOCK(adapter);
668 ixgbe_setup_low_power_mode(adapter);
669 IXGBE_CORE_UNLOCK(adapter);
671 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
673 #ifndef IXGBE_LEGACY_TX
674 taskqueue_drain(que->tq, &txr->txq_task);
676 taskqueue_drain(que->tq, &que->que_task);
677 taskqueue_free(que->tq);
681 /* Drain the Link queue */
683 taskqueue_drain(adapter->tq, &adapter->link_task);
684 taskqueue_drain(adapter->tq, &adapter->mod_task);
685 taskqueue_drain(adapter->tq, &adapter->msf_task);
687 taskqueue_drain(adapter->tq, &adapter->mbx_task);
689 taskqueue_drain(adapter->tq, &adapter->phy_task);
691 taskqueue_drain(adapter->tq, &adapter->fdir_task);
693 taskqueue_free(adapter->tq);
696 /* let hardware know driver is unloading */
697 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
698 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
699 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
701 /* Unregister VLAN events */
702 if (adapter->vlan_attach != NULL)
703 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
704 if (adapter->vlan_detach != NULL)
705 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
707 ether_ifdetach(adapter->ifp);
708 callout_drain(&adapter->timer);
710 netmap_detach(adapter->ifp);
711 #endif /* DEV_NETMAP */
712 ixgbe_free_pci_resources(adapter);
713 bus_generic_detach(dev);
714 if_free(adapter->ifp);
716 ixgbe_free_transmit_structures(adapter);
717 ixgbe_free_receive_structures(adapter);
718 free(adapter->mta, M_DEVBUF);
720 IXGBE_CORE_LOCK_DESTROY(adapter);
724 /*********************************************************************
726 * Shutdown entry point
728 **********************************************************************/
731 ixgbe_shutdown(device_t dev)
733 struct adapter *adapter = device_get_softc(dev);
736 INIT_DEBUGOUT("ixgbe_shutdown: begin");
738 IXGBE_CORE_LOCK(adapter);
739 error = ixgbe_setup_low_power_mode(adapter);
740 IXGBE_CORE_UNLOCK(adapter);
746 * Methods for going from:
747 * D0 -> D3: ixgbe_suspend
748 * D3 -> D0: ixgbe_resume
751 ixgbe_suspend(device_t dev)
753 struct adapter *adapter = device_get_softc(dev);
756 INIT_DEBUGOUT("ixgbe_suspend: begin");
758 IXGBE_CORE_LOCK(adapter);
760 error = ixgbe_setup_low_power_mode(adapter);
762 /* Save state and power down */
764 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
766 IXGBE_CORE_UNLOCK(adapter);
772 ixgbe_resume(device_t dev)
774 struct adapter *adapter = device_get_softc(dev);
775 struct ifnet *ifp = adapter->ifp;
776 struct ixgbe_hw *hw = &adapter->hw;
779 INIT_DEBUGOUT("ixgbe_resume: begin");
781 IXGBE_CORE_LOCK(adapter);
783 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
784 pci_restore_state(dev);
786 /* Read & clear WUS register */
787 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
789 device_printf(dev, "Woken up by (WUS): %#010x\n",
790 IXGBE_READ_REG(hw, IXGBE_WUS));
791 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
792 /* And clear WUFC until next low-power transition */
793 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
796 * Required after D3->D0 transition;
797 * will re-advertise all previous advertised speeds
799 if (ifp->if_flags & IFF_UP)
800 ixgbe_init_locked(adapter);
802 IXGBE_CORE_UNLOCK(adapter);
804 INIT_DEBUGOUT("ixgbe_resume: end");
809 /*********************************************************************
812 * ixgbe_ioctl is called when the user wants to configure the
815 * return 0 on success, positive on failure
816 **********************************************************************/
819 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
821 struct adapter *adapter = ifp->if_softc;
822 struct ifreq *ifr = (struct ifreq *) data;
823 #if defined(INET) || defined(INET6)
824 struct ifaddr *ifa = (struct ifaddr *)data;
825 bool avoid_reset = FALSE;
833 if (ifa->ifa_addr->sa_family == AF_INET)
837 if (ifa->ifa_addr->sa_family == AF_INET6)
840 #if defined(INET) || defined(INET6)
842 ** Calling init results in link renegotiation,
843 ** so we avoid doing it when possible.
846 ifp->if_flags |= IFF_UP;
847 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
849 if (!(ifp->if_flags & IFF_NOARP))
850 arp_ifinit(ifp, ifa);
852 error = ether_ioctl(ifp, command, data);
856 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
857 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
860 IXGBE_CORE_LOCK(adapter);
861 ifp->if_mtu = ifr->ifr_mtu;
862 adapter->max_frame_size =
863 ifp->if_mtu + IXGBE_MTU_HDR;
864 ixgbe_init_locked(adapter);
866 ixgbe_recalculate_max_frame(adapter);
868 IXGBE_CORE_UNLOCK(adapter);
872 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
873 IXGBE_CORE_LOCK(adapter);
874 if (ifp->if_flags & IFF_UP) {
875 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
876 if ((ifp->if_flags ^ adapter->if_flags) &
877 (IFF_PROMISC | IFF_ALLMULTI)) {
878 ixgbe_set_promisc(adapter);
881 ixgbe_init_locked(adapter);
883 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
885 adapter->if_flags = ifp->if_flags;
886 IXGBE_CORE_UNLOCK(adapter);
890 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
891 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
892 IXGBE_CORE_LOCK(adapter);
893 ixgbe_disable_intr(adapter);
894 ixgbe_set_multi(adapter);
895 ixgbe_enable_intr(adapter);
896 IXGBE_CORE_UNLOCK(adapter);
901 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
902 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
906 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
907 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
908 if (mask & IFCAP_HWCSUM)
909 ifp->if_capenable ^= IFCAP_HWCSUM;
910 if (mask & IFCAP_TSO4)
911 ifp->if_capenable ^= IFCAP_TSO4;
912 if (mask & IFCAP_TSO6)
913 ifp->if_capenable ^= IFCAP_TSO6;
914 if (mask & IFCAP_LRO)
915 ifp->if_capenable ^= IFCAP_LRO;
916 if (mask & IFCAP_VLAN_HWTAGGING)
917 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
918 if (mask & IFCAP_VLAN_HWFILTER)
919 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
920 if (mask & IFCAP_VLAN_HWTSO)
921 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
922 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
923 IXGBE_CORE_LOCK(adapter);
924 ixgbe_init_locked(adapter);
925 IXGBE_CORE_UNLOCK(adapter);
927 VLAN_CAPABILITIES(ifp);
930 #if __FreeBSD_version >= 1100036
933 struct ixgbe_hw *hw = &adapter->hw;
936 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
937 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
940 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
944 if (i2c.len > sizeof(i2c.data)) {
949 for (i = 0; i < i2c.len; i++)
950 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
951 i2c.dev_addr, &i2c.data[i]);
952 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
957 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
958 error = ether_ioctl(ifp, command, data);
965 /*********************************************************************
968 * This routine is used in two ways. It is used by the stack as
969 * init entry point in network interface structure. It is also used
970 * by the driver as a hw/sw initialization routine to get to a
973 * return 0 on success, positive on failure
974 **********************************************************************/
975 #define IXGBE_MHADD_MFS_SHIFT 16
978 ixgbe_init_locked(struct adapter *adapter)
980 struct ifnet *ifp = adapter->ifp;
981 device_t dev = adapter->dev;
982 struct ixgbe_hw *hw = &adapter->hw;
988 enum ixgbe_iov_mode mode;
991 mtx_assert(&adapter->core_mtx, MA_OWNED);
992 INIT_DEBUGOUT("ixgbe_init_locked: begin");
994 hw->adapter_stopped = FALSE;
995 ixgbe_stop_adapter(hw);
996 callout_stop(&adapter->timer);
999 mode = ixgbe_get_iov_mode(adapter);
1000 adapter->pool = ixgbe_max_vfs(mode);
1001 /* Queue indices may change with IOV mode */
1002 for (int i = 0; i < adapter->num_queues; i++) {
1003 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
1004 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
1007 /* reprogram the RAR[0] in case user changed it. */
1008 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1010 /* Get the latest mac address, User can use a LAA */
1011 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1012 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1013 hw->addr_ctrl.rar_used_count = 1;
1015 /* Set the various hardware offload abilities */
1016 ifp->if_hwassist = 0;
1017 if (ifp->if_capenable & IFCAP_TSO)
1018 ifp->if_hwassist |= CSUM_TSO;
1019 if (ifp->if_capenable & IFCAP_TXCSUM) {
1020 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1021 #if __FreeBSD_version >= 800000
1022 if (hw->mac.type != ixgbe_mac_82598EB)
1023 ifp->if_hwassist |= CSUM_SCTP;
1027 /* Prepare transmit descriptors and buffers */
1028 if (ixgbe_setup_transmit_structures(adapter)) {
1029 device_printf(dev, "Could not setup transmit structures\n");
1030 ixgbe_stop(adapter);
1036 ixgbe_initialize_iov(adapter);
1038 ixgbe_initialize_transmit_units(adapter);
1040 /* Setup Multicast table */
1041 ixgbe_set_multi(adapter);
1044 ** Determine the correct mbuf pool
1045 ** for doing jumbo frames
1047 if (adapter->max_frame_size <= MCLBYTES)
1048 adapter->rx_mbuf_sz = MCLBYTES;
1050 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1052 /* Prepare receive descriptors and buffers */
1053 if (ixgbe_setup_receive_structures(adapter)) {
1054 device_printf(dev, "Could not setup receive structures\n");
1055 ixgbe_stop(adapter);
1059 /* Configure RX settings */
1060 ixgbe_initialize_receive_units(adapter);
1062 /* Enable SDP & MSIX interrupts based on adapter */
1063 ixgbe_config_gpie(adapter);
1066 if (ifp->if_mtu > ETHERMTU) {
1067 /* aka IXGBE_MAXFRS on 82599 and newer */
1068 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1069 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1070 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1071 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1074 /* Now enable all the queues */
1075 for (int i = 0; i < adapter->num_queues; i++) {
1076 txr = &adapter->tx_rings[i];
1077 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1078 txdctl |= IXGBE_TXDCTL_ENABLE;
1079 /* Set WTHRESH to 8, burst writeback */
1080 txdctl |= (8 << 16);
1082 * When the internal queue falls below PTHRESH (32),
1083 * start prefetching as long as there are at least
1084 * HTHRESH (1) buffers ready. The values are taken
1085 * from the Intel linux driver 3.8.21.
1086 * Prefetching enables tx line rate even with 1 queue.
1088 txdctl |= (32 << 0) | (1 << 8);
1089 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1092 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1093 rxr = &adapter->rx_rings[i];
1094 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1095 if (hw->mac.type == ixgbe_mac_82598EB) {
1101 rxdctl &= ~0x3FFFFF;
1104 rxdctl |= IXGBE_RXDCTL_ENABLE;
1105 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1106 for (; j < 10; j++) {
1107 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1108 IXGBE_RXDCTL_ENABLE)
1116 * In netmap mode, we must preserve the buffers made
1117 * available to userspace before the if_init()
1118 * (this is true by default on the TX side, because
1119 * init makes all buffers available to userspace).
1121 * netmap_reset() and the device specific routines
1122 * (e.g. ixgbe_setup_receive_rings()) map these
1123 * buffers at the end of the NIC ring, so here we
1124 * must set the RDT (tail) register to make sure
1125 * they are not overwritten.
1127 * In this driver the NIC ring starts at RDH = 0,
1128 * RDT points to the last slot available for reception (?),
1129 * so RDT = num_rx_desc - 1 means the whole ring is available.
1131 if (ifp->if_capenable & IFCAP_NETMAP) {
1132 struct netmap_adapter *na = NA(adapter->ifp);
1133 struct netmap_kring *kring = &na->rx_rings[i];
1134 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1136 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1138 #endif /* DEV_NETMAP */
1139 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
1142 /* Enable Receive engine */
1143 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1144 if (hw->mac.type == ixgbe_mac_82598EB)
1145 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1146 rxctrl |= IXGBE_RXCTRL_RXEN;
1147 ixgbe_enable_rx_dma(hw, rxctrl);
1149 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1151 /* Set up MSI/X routing */
1152 if (ixgbe_enable_msix) {
1153 ixgbe_configure_ivars(adapter);
1154 /* Set up auto-mask */
1155 if (hw->mac.type == ixgbe_mac_82598EB)
1156 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1158 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1159 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1161 } else { /* Simple settings for Legacy/MSI */
1162 ixgbe_set_ivar(adapter, 0, 0, 0);
1163 ixgbe_set_ivar(adapter, 0, 0, 1);
1164 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1168 /* Init Flow director */
1169 if (hw->mac.type != ixgbe_mac_82598EB) {
1170 u32 hdrm = 32 << fdir_pballoc;
1172 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1173 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1178 * Check on any SFP devices that
1179 * need to be kick-started
1181 if (hw->phy.type == ixgbe_phy_none) {
1182 int err = hw->phy.ops.identify(hw);
1183 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1185 "Unsupported SFP+ module type was detected.\n");
1190 /* Set moderation on the Link interrupt */
1191 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1193 /* Configure Energy Efficient Ethernet for supported devices */
1194 ixgbe_setup_eee(hw, adapter->eee_enabled);
1196 /* Config/Enable Link */
1197 ixgbe_config_link(adapter);
1199 /* Hardware Packet Buffer & Flow Control setup */
1200 ixgbe_config_delay_values(adapter);
1202 /* Initialize the FC settings */
1205 /* Set up VLAN support and filter */
1206 ixgbe_setup_vlan_hw_support(adapter);
1208 /* Setup DMA Coalescing */
1209 ixgbe_config_dmac(adapter);
1211 /* And now turn on interrupts */
1212 ixgbe_enable_intr(adapter);
1215 /* Enable the use of the MBX by the VF's */
1217 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1218 reg |= IXGBE_CTRL_EXT_PFRSTD;
1219 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
1223 /* Now inform the stack we're ready */
1224 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1230 ixgbe_init(void *arg)
1232 struct adapter *adapter = arg;
1234 IXGBE_CORE_LOCK(adapter);
1235 ixgbe_init_locked(adapter);
1236 IXGBE_CORE_UNLOCK(adapter);
1241 ixgbe_config_gpie(struct adapter *adapter)
1243 struct ixgbe_hw *hw = &adapter->hw;
1246 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1248 /* Fan Failure Interrupt */
1249 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1250 gpie |= IXGBE_SDP1_GPIEN;
1253 * Module detection (SDP2)
1254 * Media ready (SDP1)
1256 if (hw->mac.type == ixgbe_mac_82599EB) {
1257 gpie |= IXGBE_SDP2_GPIEN;
1258 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
1259 gpie |= IXGBE_SDP1_GPIEN;
1263 * Thermal Failure Detection (X540)
1264 * Link Detection (X557)
1266 if (hw->mac.type == ixgbe_mac_X540 ||
1267 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1268 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1269 gpie |= IXGBE_SDP0_GPIEN_X540;
1271 if (adapter->msix > 1) {
1272 /* Enable Enhanced MSIX mode */
1273 gpie |= IXGBE_GPIE_MSIX_MODE;
1274 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1278 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1283 * Requires adapter->max_frame_size to be set.
1286 ixgbe_config_delay_values(struct adapter *adapter)
1288 struct ixgbe_hw *hw = &adapter->hw;
1289 u32 rxpb, frame, size, tmp;
1291 frame = adapter->max_frame_size;
1293 /* Calculate High Water */
1294 switch (hw->mac.type) {
1295 case ixgbe_mac_X540:
1296 case ixgbe_mac_X550:
1297 case ixgbe_mac_X550EM_x:
1298 tmp = IXGBE_DV_X540(frame, frame);
1301 tmp = IXGBE_DV(frame, frame);
1304 size = IXGBE_BT2KB(tmp);
1305 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1306 hw->fc.high_water[0] = rxpb - size;
1308 /* Now calculate Low Water */
1309 switch (hw->mac.type) {
1310 case ixgbe_mac_X540:
1311 case ixgbe_mac_X550:
1312 case ixgbe_mac_X550EM_x:
1313 tmp = IXGBE_LOW_DV_X540(frame);
1316 tmp = IXGBE_LOW_DV(frame);
1319 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1321 hw->fc.requested_mode = adapter->fc;
1322 hw->fc.pause_time = IXGBE_FC_PAUSE;
1323 hw->fc.send_xon = TRUE;
1328 ** MSIX Interrupt Handlers and Tasklets
1333 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1335 struct ixgbe_hw *hw = &adapter->hw;
1336 u64 queue = (u64)(1 << vector);
1339 if (hw->mac.type == ixgbe_mac_82598EB) {
1340 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1341 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1343 mask = (queue & 0xFFFFFFFF);
1345 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1346 mask = (queue >> 32);
1348 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1353 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1355 struct ixgbe_hw *hw = &adapter->hw;
1356 u64 queue = (u64)(1 << vector);
1359 if (hw->mac.type == ixgbe_mac_82598EB) {
1360 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1361 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1363 mask = (queue & 0xFFFFFFFF);
1365 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1366 mask = (queue >> 32);
1368 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1373 ixgbe_handle_que(void *context, int pending)
1375 struct ix_queue *que = context;
1376 struct adapter *adapter = que->adapter;
1377 struct tx_ring *txr = que->txr;
1378 struct ifnet *ifp = adapter->ifp;
1380 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1384 #ifndef IXGBE_LEGACY_TX
1385 if (!drbr_empty(ifp, txr->br))
1386 ixgbe_mq_start_locked(ifp, txr);
1388 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1389 ixgbe_start_locked(txr, ifp);
1391 IXGBE_TX_UNLOCK(txr);
1394 /* Reenable this interrupt */
1395 if (que->res != NULL)
1396 ixgbe_enable_queue(adapter, que->msix);
1398 ixgbe_enable_intr(adapter);
1403 /*********************************************************************
1405 * Legacy Interrupt Service routine
1407 **********************************************************************/
1410 ixgbe_legacy_irq(void *arg)
1412 struct ix_queue *que = arg;
1413 struct adapter *adapter = que->adapter;
1414 struct ixgbe_hw *hw = &adapter->hw;
1415 struct ifnet *ifp = adapter->ifp;
1416 struct tx_ring *txr = adapter->tx_rings;
1421 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1424 if (reg_eicr == 0) {
1425 ixgbe_enable_intr(adapter);
1429 more = ixgbe_rxeof(que);
1433 #ifdef IXGBE_LEGACY_TX
1434 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1435 ixgbe_start_locked(txr, ifp);
1437 if (!drbr_empty(ifp, txr->br))
1438 ixgbe_mq_start_locked(ifp, txr);
1440 IXGBE_TX_UNLOCK(txr);
1442 /* Check for fan failure */
1443 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1444 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1445 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1446 "REPLACE IMMEDIATELY!!\n");
1447 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1450 /* Link status change */
1451 if (reg_eicr & IXGBE_EICR_LSC)
1452 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1454 /* External PHY interrupt */
1455 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1456 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1457 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1460 taskqueue_enqueue(que->tq, &que->que_task);
1462 ixgbe_enable_intr(adapter);
1467 /*********************************************************************
1469 * MSIX Queue Interrupt Service routine
1471 **********************************************************************/
1473 ixgbe_msix_que(void *arg)
1475 struct ix_queue *que = arg;
1476 struct adapter *adapter = que->adapter;
1477 struct ifnet *ifp = adapter->ifp;
1478 struct tx_ring *txr = que->txr;
1479 struct rx_ring *rxr = que->rxr;
1484 /* Protect against spurious interrupts */
1485 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1488 ixgbe_disable_queue(adapter, que->msix);
1491 more = ixgbe_rxeof(que);
1495 #ifdef IXGBE_LEGACY_TX
1496 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1497 ixgbe_start_locked(txr, ifp);
1499 if (!drbr_empty(ifp, txr->br))
1500 ixgbe_mq_start_locked(ifp, txr);
1502 IXGBE_TX_UNLOCK(txr);
1506 if (ixgbe_enable_aim == FALSE)
1509 ** Do Adaptive Interrupt Moderation:
1510 ** - Write out last calculated setting
1511 ** - Calculate based on average size over
1512 ** the last interval.
1514 if (que->eitr_setting)
1515 IXGBE_WRITE_REG(&adapter->hw,
1516 IXGBE_EITR(que->msix), que->eitr_setting);
1518 que->eitr_setting = 0;
1520 /* Idle, do nothing */
1521 if ((txr->bytes == 0) && (rxr->bytes == 0))
1524 if ((txr->bytes) && (txr->packets))
1525 newitr = txr->bytes/txr->packets;
1526 if ((rxr->bytes) && (rxr->packets))
1527 newitr = max(newitr,
1528 (rxr->bytes / rxr->packets));
1529 newitr += 24; /* account for hardware frame, crc */
1531 /* set an upper boundary */
1532 newitr = min(newitr, 3000);
1534 /* Be nice to the mid range */
1535 if ((newitr > 300) && (newitr < 1200))
1536 newitr = (newitr / 3);
1538 newitr = (newitr / 2);
1540 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1541 newitr |= newitr << 16;
1543 newitr |= IXGBE_EITR_CNT_WDIS;
1545 /* save for next interrupt */
1546 que->eitr_setting = newitr;
1556 taskqueue_enqueue(que->tq, &que->que_task);
1558 ixgbe_enable_queue(adapter, que->msix);
1564 ixgbe_msix_link(void *arg)
1566 struct adapter *adapter = arg;
1567 struct ixgbe_hw *hw = &adapter->hw;
1568 u32 reg_eicr, mod_mask;
1570 ++adapter->link_irq;
1572 /* First get the cause */
1573 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1574 /* Be sure the queue bits are not cleared */
1575 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1576 /* Clear interrupt with write */
1577 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1579 /* Link status change */
1580 if (reg_eicr & IXGBE_EICR_LSC)
1581 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1583 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1585 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1586 /* This is probably overkill :) */
1587 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1589 /* Disable the interrupt */
1590 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1591 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1594 if (reg_eicr & IXGBE_EICR_ECC) {
1595 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1596 "Please Reboot!!\n");
1597 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1600 /* Check for over temp condition */
1601 if (reg_eicr & IXGBE_EICR_TS) {
1602 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1603 "PHY IS SHUT DOWN!!\n");
1604 device_printf(adapter->dev, "System shutdown required!\n");
1605 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1608 if (reg_eicr & IXGBE_EICR_MAILBOX)
1609 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1613 /* Pluggable optics-related interrupt */
1614 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1615 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1617 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1619 if (ixgbe_is_sfp(hw)) {
1620 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1621 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1622 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1623 } else if (reg_eicr & mod_mask) {
1624 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1625 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1629 /* Check for fan failure */
1630 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1631 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1632 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1633 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1634 "REPLACE IMMEDIATELY!!\n");
1637 /* External PHY interrupt */
1638 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1639 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1640 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1641 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1644 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1648 /*********************************************************************
1650 * Media Ioctl callback
1652 * This routine is called whenever the user queries the status of
1653 * the interface using ifconfig.
1655 **********************************************************************/
1657 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1659 struct adapter *adapter = ifp->if_softc;
1660 struct ixgbe_hw *hw = &adapter->hw;
1663 INIT_DEBUGOUT("ixgbe_media_status: begin");
1664 IXGBE_CORE_LOCK(adapter);
1665 ixgbe_update_link_status(adapter);
1667 ifmr->ifm_status = IFM_AVALID;
1668 ifmr->ifm_active = IFM_ETHER;
1670 if (!adapter->link_active) {
1671 IXGBE_CORE_UNLOCK(adapter);
1675 ifmr->ifm_status |= IFM_ACTIVE;
1676 layer = adapter->phy_layer;
1678 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1679 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1680 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1681 switch (adapter->link_speed) {
1682 case IXGBE_LINK_SPEED_10GB_FULL:
1683 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1685 case IXGBE_LINK_SPEED_1GB_FULL:
1686 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1688 case IXGBE_LINK_SPEED_100_FULL:
1689 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1692 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1693 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1694 switch (adapter->link_speed) {
1695 case IXGBE_LINK_SPEED_10GB_FULL:
1696 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1699 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1700 switch (adapter->link_speed) {
1701 case IXGBE_LINK_SPEED_10GB_FULL:
1702 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1704 case IXGBE_LINK_SPEED_1GB_FULL:
1705 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1708 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1709 switch (adapter->link_speed) {
1710 case IXGBE_LINK_SPEED_10GB_FULL:
1711 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1713 case IXGBE_LINK_SPEED_1GB_FULL:
1714 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1717 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1718 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1719 switch (adapter->link_speed) {
1720 case IXGBE_LINK_SPEED_10GB_FULL:
1721 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1723 case IXGBE_LINK_SPEED_1GB_FULL:
1724 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1727 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1728 switch (adapter->link_speed) {
1729 case IXGBE_LINK_SPEED_10GB_FULL:
1730 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1734 ** XXX: These need to use the proper media types once
1737 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1738 switch (adapter->link_speed) {
1739 case IXGBE_LINK_SPEED_10GB_FULL:
1740 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1742 case IXGBE_LINK_SPEED_2_5GB_FULL:
1743 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1745 case IXGBE_LINK_SPEED_1GB_FULL:
1746 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1749 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1750 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1751 switch (adapter->link_speed) {
1752 case IXGBE_LINK_SPEED_10GB_FULL:
1753 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1755 case IXGBE_LINK_SPEED_2_5GB_FULL:
1756 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1758 case IXGBE_LINK_SPEED_1GB_FULL:
1759 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1763 /* If nothing is recognized... */
1764 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1765 ifmr->ifm_active |= IFM_UNKNOWN;
1767 #if __FreeBSD_version >= 900025
1768 /* Display current flow control setting used on link */
1769 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1770 hw->fc.current_mode == ixgbe_fc_full)
1771 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1772 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1773 hw->fc.current_mode == ixgbe_fc_full)
1774 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1777 IXGBE_CORE_UNLOCK(adapter);
1782 /*********************************************************************
1784 * Media Ioctl callback
1786 * This routine is called when the user changes speed/duplex using
1787 * media/mediopt option with ifconfig.
1789 **********************************************************************/
1791 ixgbe_media_change(struct ifnet * ifp)
1793 struct adapter *adapter = ifp->if_softc;
1794 struct ifmedia *ifm = &adapter->media;
1795 struct ixgbe_hw *hw = &adapter->hw;
1796 ixgbe_link_speed speed = 0;
1798 INIT_DEBUGOUT("ixgbe_media_change: begin");
1800 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1803 if (hw->phy.media_type == ixgbe_media_type_backplane)
1807 ** We don't actually need to check against the supported
1808 ** media types of the adapter; ifmedia will take care of
1811 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1814 speed |= IXGBE_LINK_SPEED_100_FULL;
1816 case IFM_10G_SR: /* KR, too */
1818 case IFM_10G_CX4: /* KX4 */
1819 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1820 case IFM_10G_TWINAX:
1821 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1824 speed |= IXGBE_LINK_SPEED_100_FULL;
1827 case IFM_1000_CX: /* KX */
1828 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1831 speed |= IXGBE_LINK_SPEED_100_FULL;
1837 hw->mac.autotry_restart = TRUE;
1838 hw->mac.ops.setup_link(hw, speed, TRUE);
1839 adapter->advertise =
1840 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1841 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1842 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1847 device_printf(adapter->dev, "Invalid media type!\n");
1852 ixgbe_set_promisc(struct adapter *adapter)
1855 struct ifnet *ifp = adapter->ifp;
1858 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1859 reg_rctl &= (~IXGBE_FCTRL_UPE);
1860 if (ifp->if_flags & IFF_ALLMULTI)
1861 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1863 struct ifmultiaddr *ifma;
1864 #if __FreeBSD_version < 800000
1867 if_maddr_rlock(ifp);
1869 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1870 if (ifma->ifma_addr->sa_family != AF_LINK)
1872 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1876 #if __FreeBSD_version < 800000
1877 IF_ADDR_UNLOCK(ifp);
1879 if_maddr_runlock(ifp);
1882 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1883 reg_rctl &= (~IXGBE_FCTRL_MPE);
1884 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1886 if (ifp->if_flags & IFF_PROMISC) {
1887 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1888 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1889 } else if (ifp->if_flags & IFF_ALLMULTI) {
1890 reg_rctl |= IXGBE_FCTRL_MPE;
1891 reg_rctl &= ~IXGBE_FCTRL_UPE;
1892 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1898 /*********************************************************************
1901 * This routine is called whenever multicast address list is updated.
1903 **********************************************************************/
1904 #define IXGBE_RAR_ENTRIES 16
1907 ixgbe_set_multi(struct adapter *adapter)
1911 struct ifmultiaddr *ifma;
1912 struct ixgbe_mc_addr *mta;
1914 struct ifnet *ifp = adapter->ifp;
1916 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1919 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
1921 #if __FreeBSD_version < 800000
1924 if_maddr_rlock(ifp);
1926 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1927 if (ifma->ifma_addr->sa_family != AF_LINK)
1929 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1931 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1932 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1933 mta[mcnt].vmdq = adapter->pool;
1936 #if __FreeBSD_version < 800000
1937 IF_ADDR_UNLOCK(ifp);
1939 if_maddr_runlock(ifp);
1942 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1943 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1944 if (ifp->if_flags & IFF_PROMISC)
1945 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1946 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1947 ifp->if_flags & IFF_ALLMULTI) {
1948 fctrl |= IXGBE_FCTRL_MPE;
1949 fctrl &= ~IXGBE_FCTRL_UPE;
1951 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1953 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1955 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1956 update_ptr = (u8 *)mta;
1957 ixgbe_update_mc_addr_list(&adapter->hw,
1958 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1965 * This is an iterator function now needed by the multicast
1966 * shared code. It simply feeds the shared code routine the
1967 * addresses in the array of ixgbe_set_multi() one by one.
1970 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1972 struct ixgbe_mc_addr *mta;
1974 mta = (struct ixgbe_mc_addr *)*update_ptr;
1977 *update_ptr = (u8*)(mta + 1);;
1982 /*********************************************************************
1985 * This routine checks for link status,updates statistics,
1986 * and runs the watchdog check.
1988 **********************************************************************/
1991 ixgbe_local_timer(void *arg)
1993 struct adapter *adapter = arg;
1994 device_t dev = adapter->dev;
1995 struct ix_queue *que = adapter->queues;
1999 mtx_assert(&adapter->core_mtx, MA_OWNED);
2001 /* Check for pluggable optics */
2002 if (adapter->sfp_probe)
2003 if (!ixgbe_sfp_probe(adapter))
2004 goto out; /* Nothing to do */
2006 ixgbe_update_link_status(adapter);
2007 ixgbe_update_stats_counters(adapter);
2010 ** Check the TX queues status
2011 ** - mark hung queues so we don't schedule on them
2012 ** - watchdog only if all queues show hung
2014 for (int i = 0; i < adapter->num_queues; i++, que++) {
2015 /* Keep track of queues with work for soft irq */
2017 queues |= ((u64)1 << que->me);
2019 ** Each time txeof runs without cleaning, but there
2020 ** are uncleaned descriptors it increments busy. If
2021 ** we get to the MAX we declare it hung.
2023 if (que->busy == IXGBE_QUEUE_HUNG) {
2025 /* Mark the queue as inactive */
2026 adapter->active_queues &= ~((u64)1 << que->me);
2029 /* Check if we've come back from hung */
2030 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2031 adapter->active_queues |= ((u64)1 << que->me);
2033 if (que->busy >= IXGBE_MAX_TX_BUSY) {
2034 device_printf(dev,"Warning queue %d "
2035 "appears to be hung!\n", i);
2036 que->txr->busy = IXGBE_QUEUE_HUNG;
2042 /* Only truly watchdog if all queues show hung */
2043 if (hung == adapter->num_queues)
2045 else if (queues != 0) { /* Force an IRQ on queues with work */
2046 ixgbe_rearm_queues(adapter, queues);
2050 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2054 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2055 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2056 adapter->watchdog_events++;
2057 ixgbe_init_locked(adapter);
2062 ** Note: this routine updates the OS on the link state
2063 ** the real check of the hardware only happens with
2064 ** a link interrupt.
2067 ixgbe_update_link_status(struct adapter *adapter)
2069 struct ifnet *ifp = adapter->ifp;
2070 device_t dev = adapter->dev;
2072 if (adapter->link_up){
2073 if (adapter->link_active == FALSE) {
2075 device_printf(dev,"Link is up %d Gbps %s \n",
2076 ((adapter->link_speed == 128)? 10:1),
2078 adapter->link_active = TRUE;
2079 /* Update any Flow Control changes */
2080 ixgbe_fc_enable(&adapter->hw);
2081 /* Update DMA coalescing config */
2082 ixgbe_config_dmac(adapter);
2083 if_link_state_change(ifp, LINK_STATE_UP);
2085 ixgbe_ping_all_vfs(adapter);
2088 } else { /* Link down */
2089 if (adapter->link_active == TRUE) {
2091 device_printf(dev,"Link is Down\n");
2092 if_link_state_change(ifp, LINK_STATE_DOWN);
2093 adapter->link_active = FALSE;
2095 ixgbe_ping_all_vfs(adapter);
2104 /*********************************************************************
2106 * This routine disables all traffic on the adapter by issuing a
2107 * global reset on the MAC and deallocates TX/RX buffers.
2109 **********************************************************************/
2112 ixgbe_stop(void *arg)
2115 struct adapter *adapter = arg;
2116 struct ixgbe_hw *hw = &adapter->hw;
2119 mtx_assert(&adapter->core_mtx, MA_OWNED);
2121 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2122 ixgbe_disable_intr(adapter);
2123 callout_stop(&adapter->timer);
2125 /* Let the stack know...*/
2126 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2129 hw->adapter_stopped = FALSE;
2130 ixgbe_stop_adapter(hw);
2131 if (hw->mac.type == ixgbe_mac_82599EB)
2132 ixgbe_stop_mac_link_on_d3_82599(hw);
2133 /* Turn off the laser - noop with no optics */
2134 ixgbe_disable_tx_laser(hw);
2136 /* Update the stack */
2137 adapter->link_up = FALSE;
2138 ixgbe_update_link_status(adapter);
2140 /* reprogram the RAR[0] in case user changed it. */
2141 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2147 /*********************************************************************
2149 * Determine hardware revision.
2151 **********************************************************************/
2153 ixgbe_identify_hardware(struct adapter *adapter)
2155 device_t dev = adapter->dev;
2156 struct ixgbe_hw *hw = &adapter->hw;
2158 /* Save off the information about this board */
2159 hw->vendor_id = pci_get_vendor(dev);
2160 hw->device_id = pci_get_device(dev);
2161 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2162 hw->subsystem_vendor_id =
2163 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2164 hw->subsystem_device_id =
2165 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2168 ** Make sure BUSMASTER is set
2170 pci_enable_busmaster(dev);
2172 /* We need this here to set the num_segs below */
2173 ixgbe_set_mac_type(hw);
2175 /* Pick up the 82599 settings */
2176 if (hw->mac.type != ixgbe_mac_82598EB) {
2177 hw->phy.smart_speed = ixgbe_smart_speed;
2178 adapter->num_segs = IXGBE_82599_SCATTER;
2180 adapter->num_segs = IXGBE_82598_SCATTER;
2185 /*********************************************************************
2187 * Determine optic type
2189 **********************************************************************/
2191 ixgbe_setup_optics(struct adapter *adapter)
2193 struct ixgbe_hw *hw = &adapter->hw;
2196 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2198 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2199 adapter->optics = IFM_10G_T;
2203 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2204 adapter->optics = IFM_1000_T;
2208 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2209 adapter->optics = IFM_1000_SX;
2213 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2214 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2215 adapter->optics = IFM_10G_LR;
2219 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2220 adapter->optics = IFM_10G_SR;
2224 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2225 adapter->optics = IFM_10G_TWINAX;
2229 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2230 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2231 adapter->optics = IFM_10G_CX4;
2235 /* If we get here just set the default */
2236 adapter->optics = IFM_ETHER | IFM_AUTO;
2240 /*********************************************************************
2242 * Setup the Legacy or MSI Interrupt handler
2244 **********************************************************************/
2246 ixgbe_allocate_legacy(struct adapter *adapter)
2248 device_t dev = adapter->dev;
2249 struct ix_queue *que = adapter->queues;
2250 #ifndef IXGBE_LEGACY_TX
2251 struct tx_ring *txr = adapter->tx_rings;
2256 if (adapter->msix == 1)
2259 /* We allocate a single interrupt resource */
2260 adapter->res = bus_alloc_resource_any(dev,
2261 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2262 if (adapter->res == NULL) {
2263 device_printf(dev, "Unable to allocate bus resource: "
2269 * Try allocating a fast interrupt and the associated deferred
2270 * processing contexts.
2272 #ifndef IXGBE_LEGACY_TX
2273 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2275 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2276 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2277 taskqueue_thread_enqueue, &que->tq);
2278 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2279 device_get_nameunit(adapter->dev));
2281 /* Tasklets for Link, SFP and Multispeed Fiber */
2282 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2283 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2284 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2285 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2287 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2289 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2290 taskqueue_thread_enqueue, &adapter->tq);
2291 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2292 device_get_nameunit(adapter->dev));
2294 if ((error = bus_setup_intr(dev, adapter->res,
2295 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2296 que, &adapter->tag)) != 0) {
2297 device_printf(dev, "Failed to register fast interrupt "
2298 "handler: %d\n", error);
2299 taskqueue_free(que->tq);
2300 taskqueue_free(adapter->tq);
2305 /* For simplicity in the handlers */
2306 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2312 /*********************************************************************
2314 * Setup MSIX Interrupt resources and handlers
2316 **********************************************************************/
2318 ixgbe_allocate_msix(struct adapter *adapter)
2320 device_t dev = adapter->dev;
2321 struct ix_queue *que = adapter->queues;
2322 struct tx_ring *txr = adapter->tx_rings;
2323 int error, rid, vector = 0;
2331 * If we're doing RSS, the number of queues needs to
2332 * match the number of RSS buckets that are configured.
2334 * + If there's more queues than RSS buckets, we'll end
2335 * up with queues that get no traffic.
2337 * + If there's more RSS buckets than queues, we'll end
2338 * up having multiple RSS buckets map to the same queue,
2339 * so there'll be some contention.
2341 if (adapter->num_queues != rss_getnumbuckets()) {
2343 "%s: number of queues (%d) != number of RSS buckets (%d)"
2344 "; performance will be impacted.\n",
2346 adapter->num_queues,
2347 rss_getnumbuckets());
2351 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2353 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2354 RF_SHAREABLE | RF_ACTIVE);
2355 if (que->res == NULL) {
2356 device_printf(dev,"Unable to allocate"
2357 " bus resource: que interrupt [%d]\n", vector);
2360 /* Set the handler function */
2361 error = bus_setup_intr(dev, que->res,
2362 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2363 ixgbe_msix_que, que, &que->tag);
2366 device_printf(dev, "Failed to register QUE handler");
2369 #if __FreeBSD_version >= 800504
2370 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2373 adapter->active_queues |= (u64)(1 << que->msix);
2376 * The queue ID is used as the RSS layer bucket ID.
2377 * We look up the queue ID -> RSS CPU ID and select
2380 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2383 * Bind the msix vector, and thus the
2384 * rings to the corresponding cpu.
2386 * This just happens to match the default RSS round-robin
2387 * bucket -> queue -> CPU allocation.
2389 if (adapter->num_queues > 1)
2392 if (adapter->num_queues > 1)
2393 bus_bind_intr(dev, que->res, cpu_id);
2397 "Bound RSS bucket %d to CPU %d\n",
2401 "Bound queue %d to cpu %d\n",
2404 #endif /* IXGBE_DEBUG */
2407 #ifndef IXGBE_LEGACY_TX
2408 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2410 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2411 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2412 taskqueue_thread_enqueue, &que->tq);
2414 CPU_SETOF(cpu_id, &cpu_mask);
2415 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2418 device_get_nameunit(adapter->dev),
2421 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2422 device_get_nameunit(adapter->dev));
2428 adapter->res = bus_alloc_resource_any(dev,
2429 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2430 if (!adapter->res) {
2431 device_printf(dev,"Unable to allocate"
2432 " bus resource: Link interrupt [%d]\n", rid);
2435 /* Set the link handler function */
2436 error = bus_setup_intr(dev, adapter->res,
2437 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2438 ixgbe_msix_link, adapter, &adapter->tag);
2440 adapter->res = NULL;
2441 device_printf(dev, "Failed to register LINK handler");
2444 #if __FreeBSD_version >= 800504
2445 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2447 adapter->vector = vector;
2448 /* Tasklets for Link, SFP and Multispeed Fiber */
2449 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2450 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2451 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2453 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2455 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2457 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2459 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2460 taskqueue_thread_enqueue, &adapter->tq);
2461 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2462 device_get_nameunit(adapter->dev));
2468 * Setup Either MSI/X or MSI
2471 ixgbe_setup_msix(struct adapter *adapter)
2473 device_t dev = adapter->dev;
2474 int rid, want, queues, msgs;
2476 /* Override by tuneable */
2477 if (ixgbe_enable_msix == 0)
2480 /* First try MSI/X */
2481 msgs = pci_msix_count(dev);
2484 rid = PCIR_BAR(MSIX_82598_BAR);
2485 adapter->msix_mem = bus_alloc_resource_any(dev,
2486 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2487 if (adapter->msix_mem == NULL) {
2488 rid += 4; /* 82599 maps in higher BAR */
2489 adapter->msix_mem = bus_alloc_resource_any(dev,
2490 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2492 if (adapter->msix_mem == NULL) {
2493 /* May not be enabled */
2494 device_printf(adapter->dev,
2495 "Unable to map MSIX table \n");
2499 /* Figure out a reasonable auto config value */
2500 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2503 /* If we're doing RSS, clamp at the number of RSS buckets */
2504 if (queues > rss_getnumbuckets())
2505 queues = rss_getnumbuckets();
2508 if (ixgbe_num_queues != 0)
2509 queues = ixgbe_num_queues;
2511 /* reflect correct sysctl value */
2512 ixgbe_num_queues = queues;
2515 ** Want one vector (RX/TX pair) per queue
2516 ** plus an additional for Link.
2522 device_printf(adapter->dev,
2523 "MSIX Configuration Problem, "
2524 "%d vectors but %d queues wanted!\n",
2528 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2529 device_printf(adapter->dev,
2530 "Using MSIX interrupts with %d vectors\n", msgs);
2531 adapter->num_queues = queues;
2535 ** If MSIX alloc failed or provided us with
2536 ** less than needed, free and fall through to MSI
2538 pci_release_msi(dev);
2541 if (adapter->msix_mem != NULL) {
2542 bus_release_resource(dev, SYS_RES_MEMORY,
2543 rid, adapter->msix_mem);
2544 adapter->msix_mem = NULL;
2547 if (pci_alloc_msi(dev, &msgs) == 0) {
2548 device_printf(adapter->dev,"Using an MSI interrupt\n");
2551 device_printf(adapter->dev,"Using a Legacy interrupt\n");
2557 ixgbe_allocate_pci_resources(struct adapter *adapter)
2560 device_t dev = adapter->dev;
2563 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2566 if (!(adapter->pci_mem)) {
2567 device_printf(dev,"Unable to allocate bus resource: memory\n");
2571 adapter->osdep.mem_bus_space_tag =
2572 rman_get_bustag(adapter->pci_mem);
2573 adapter->osdep.mem_bus_space_handle =
2574 rman_get_bushandle(adapter->pci_mem);
2575 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2577 /* Legacy defaults */
2578 adapter->num_queues = 1;
2579 adapter->hw.back = &adapter->osdep;
2582 ** Now setup MSI or MSI/X, should
2583 ** return us the number of supported
2584 ** vectors. (Will be 1 for MSI)
2586 adapter->msix = ixgbe_setup_msix(adapter);
2591 ixgbe_free_pci_resources(struct adapter * adapter)
2593 struct ix_queue *que = adapter->queues;
2594 device_t dev = adapter->dev;
2597 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2598 memrid = PCIR_BAR(MSIX_82598_BAR);
2600 memrid = PCIR_BAR(MSIX_82599_BAR);
2603 ** There is a slight possibility of a failure mode
2604 ** in attach that will result in entering this function
2605 ** before interrupt resources have been initialized, and
2606 ** in that case we do not want to execute the loops below
2607 ** We can detect this reliably by the state of the adapter
2610 if (adapter->res == NULL)
2614 ** Release all msix queue resources:
2616 for (int i = 0; i < adapter->num_queues; i++, que++) {
2617 rid = que->msix + 1;
2618 if (que->tag != NULL) {
2619 bus_teardown_intr(dev, que->res, que->tag);
2622 if (que->res != NULL)
2623 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2627 /* Clean the Legacy or Link interrupt last */
2628 if (adapter->vector) /* we are doing MSIX */
2629 rid = adapter->vector + 1;
2631 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2633 if (adapter->tag != NULL) {
2634 bus_teardown_intr(dev, adapter->res, adapter->tag);
2635 adapter->tag = NULL;
2637 if (adapter->res != NULL)
2638 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2642 pci_release_msi(dev);
2644 if (adapter->msix_mem != NULL)
2645 bus_release_resource(dev, SYS_RES_MEMORY,
2646 memrid, adapter->msix_mem);
2648 if (adapter->pci_mem != NULL)
2649 bus_release_resource(dev, SYS_RES_MEMORY,
2650 PCIR_BAR(0), adapter->pci_mem);
2655 /*********************************************************************
2657 * Setup networking device structure and register an interface.
2659 **********************************************************************/
2661 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2665 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2667 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2669 device_printf(dev, "can not allocate ifnet structure\n");
2672 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2673 ifp->if_baudrate = IF_Gbps(10);
2674 ifp->if_init = ixgbe_init;
2675 ifp->if_softc = adapter;
2676 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2677 ifp->if_ioctl = ixgbe_ioctl;
2678 #if __FreeBSD_version >= 1100036
2679 if_setgetcounterfn(ifp, ixgbe_get_counter);
2681 #if __FreeBSD_version >= 1100045
2682 /* TSO parameters */
2683 ifp->if_hw_tsomax = 65518;
2684 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2685 ifp->if_hw_tsomaxsegsize = 2048;
2687 #ifndef IXGBE_LEGACY_TX
2688 ifp->if_transmit = ixgbe_mq_start;
2689 ifp->if_qflush = ixgbe_qflush;
2691 ifp->if_start = ixgbe_start;
2692 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2693 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2694 IFQ_SET_READY(&ifp->if_snd);
2697 ether_ifattach(ifp, adapter->hw.mac.addr);
2699 adapter->max_frame_size =
2700 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2703 * Tell the upper layer(s) we support long frames.
2705 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2707 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2708 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2709 ifp->if_capabilities |= IFCAP_LRO;
2710 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2714 ifp->if_capenable = ifp->if_capabilities;
2717 ** Don't turn this on by default, if vlans are
2718 ** created on another pseudo device (eg. lagg)
2719 ** then vlan events are not passed thru, breaking
2720 ** operation, but with HW FILTER off it works. If
2721 ** using vlans directly on the ixgbe driver you can
2722 ** enable this and get full hardware tag filtering.
2724 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2727 * Specify the media types supported by this adapter and register
2728 * callbacks to update media and link information
2730 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2731 ixgbe_media_status);
2733 ixgbe_add_media_types(adapter);
2735 /* Autoselect media by default */
2736 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2742 ixgbe_add_media_types(struct adapter *adapter)
2744 struct ixgbe_hw *hw = &adapter->hw;
2745 device_t dev = adapter->dev;
2748 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2750 /* Media types with matching FreeBSD media defines */
2751 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2752 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2753 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2754 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2755 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2756 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2758 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2759 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2760 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2762 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2763 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2764 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2765 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2766 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2767 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2768 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2769 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2772 ** Other (no matching FreeBSD media type):
2773 ** To workaround this, we'll assign these completely
2774 ** inappropriate media types.
2776 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2777 device_printf(dev, "Media supported: 10GbaseKR\n");
2778 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2779 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2781 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2782 device_printf(dev, "Media supported: 10GbaseKX4\n");
2783 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2784 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2786 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2787 device_printf(dev, "Media supported: 1000baseKX\n");
2788 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2789 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2791 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2792 /* Someday, someone will care about you... */
2793 device_printf(dev, "Media supported: 1000baseBX\n");
2796 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2797 ifmedia_add(&adapter->media,
2798 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2799 ifmedia_add(&adapter->media,
2800 IFM_ETHER | IFM_1000_T, 0, NULL);
2803 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2807 ixgbe_config_link(struct adapter *adapter)
2809 struct ixgbe_hw *hw = &adapter->hw;
2810 u32 autoneg, err = 0;
2811 bool sfp, negotiate;
2813 sfp = ixgbe_is_sfp(hw);
2816 if (hw->phy.multispeed_fiber) {
2817 hw->mac.ops.setup_sfp(hw);
2818 ixgbe_enable_tx_laser(hw);
2819 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2821 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2823 if (hw->mac.ops.check_link)
2824 err = ixgbe_check_link(hw, &adapter->link_speed,
2825 &adapter->link_up, FALSE);
2828 autoneg = hw->phy.autoneg_advertised;
2829 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2830 err = hw->mac.ops.get_link_capabilities(hw,
2831 &autoneg, &negotiate);
2834 if (hw->mac.ops.setup_link)
2835 err = hw->mac.ops.setup_link(hw,
2836 autoneg, adapter->link_up);
2843 /*********************************************************************
2845 * Enable transmit units.
2847 **********************************************************************/
2849 ixgbe_initialize_transmit_units(struct adapter *adapter)
2851 struct tx_ring *txr = adapter->tx_rings;
2852 struct ixgbe_hw *hw = &adapter->hw;
2854 /* Setup the Base and Length of the Tx Descriptor Ring */
2856 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2857 u64 tdba = txr->txdma.dma_paddr;
2861 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
2862 (tdba & 0x00000000ffffffffULL));
2863 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
2864 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
2865 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2867 /* Setup the HW Tx Head and Tail descriptor pointers */
2868 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
2869 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
2871 /* Cache the tail address */
2872 txr->tail = IXGBE_TDT(j);
2874 /* Set the processing limit */
2875 txr->process_limit = ixgbe_tx_process_limit;
2877 /* Disable Head Writeback */
2878 switch (hw->mac.type) {
2879 case ixgbe_mac_82598EB:
2880 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
2882 case ixgbe_mac_82599EB:
2883 case ixgbe_mac_X540:
2885 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
2888 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2889 switch (hw->mac.type) {
2890 case ixgbe_mac_82598EB:
2891 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
2893 case ixgbe_mac_82599EB:
2894 case ixgbe_mac_X540:
2896 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
2902 if (hw->mac.type != ixgbe_mac_82598EB) {
2903 u32 dmatxctl, rttdcs;
2905 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
2907 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2908 dmatxctl |= IXGBE_DMATXCTL_TE;
2909 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2910 /* Disable arbiter to set MTQC */
2911 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2912 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2913 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2915 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
2917 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2919 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2920 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2927 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2929 struct ixgbe_hw *hw = &adapter->hw;
2930 u32 reta = 0, mrqc, rss_key[10];
2931 int queue_id, table_size, index_mult;
2933 u32 rss_hash_config;
2936 enum ixgbe_iov_mode mode;
2940 /* Fetch the configured RSS key */
2941 rss_getkey((uint8_t *) &rss_key);
2943 /* set up random bits */
2944 arc4rand(&rss_key, sizeof(rss_key), 0);
2947 /* Set multiplier for RETA setup and table size based on MAC */
2950 switch (adapter->hw.mac.type) {
2951 case ixgbe_mac_82598EB:
2954 case ixgbe_mac_X550:
2955 case ixgbe_mac_X550EM_x:
2962 /* Set up the redirection table */
2963 for (int i = 0, j = 0; i < table_size; i++, j++) {
2964 if (j == adapter->num_queues) j = 0;
2967 * Fetch the RSS bucket id for the given indirection entry.
2968 * Cap it at the number of configured buckets (which is
2971 queue_id = rss_get_indirection_to_bucket(i);
2972 queue_id = queue_id % adapter->num_queues;
2974 queue_id = (j * index_mult);
2977 * The low 8 bits are for hash value (n+0);
2978 * The next 8 bits are for hash value (n+1), etc.
2981 reta = reta | ( ((uint32_t) queue_id) << 24);
2984 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2986 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2991 /* Now fill our hash function seeds */
2992 for (int i = 0; i < 10; i++)
2993 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2995 /* Perform hash on these packet types */
2997 mrqc = IXGBE_MRQC_RSSEN;
2998 rss_hash_config = rss_gethashconfig();
2999 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3000 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3001 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3002 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3003 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3004 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3005 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3006 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3007 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3008 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3009 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3010 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3011 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3012 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3013 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3014 device_printf(adapter->dev,
3015 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
3016 "but not supported\n", __func__);
3017 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3018 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3019 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3020 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3023 * Disable UDP - IP fragments aren't currently being handled
3024 * and so we end up with a mix of 2-tuple and 4-tuple
3027 mrqc = IXGBE_MRQC_RSSEN
3028 | IXGBE_MRQC_RSS_FIELD_IPV4
3029 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3030 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3031 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3032 | IXGBE_MRQC_RSS_FIELD_IPV6
3033 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3037 mode = ixgbe_get_iov_mode(adapter);
3038 mrqc |= ixgbe_get_mrqc(mode);
3040 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3044 /*********************************************************************
3046 * Setup receive registers and features.
3048 **********************************************************************/
3049 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3051 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3054 ixgbe_initialize_receive_units(struct adapter *adapter)
3056 struct rx_ring *rxr = adapter->rx_rings;
3057 struct ixgbe_hw *hw = &adapter->hw;
3058 struct ifnet *ifp = adapter->ifp;
3059 u32 bufsz, fctrl, srrctl, rxcsum;
3064 * Make sure receives are disabled while
3065 * setting up the descriptor ring
3067 ixgbe_disable_rx(hw);
3069 /* Enable broadcasts */
3070 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3071 fctrl |= IXGBE_FCTRL_BAM;
3072 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3073 fctrl |= IXGBE_FCTRL_DPF;
3074 fctrl |= IXGBE_FCTRL_PMCF;
3076 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3078 /* Set for Jumbo Frames? */
3079 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3080 if (ifp->if_mtu > ETHERMTU)
3081 hlreg |= IXGBE_HLREG0_JUMBOEN;
3083 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3085 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
3086 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3087 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3089 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3090 #endif /* DEV_NETMAP */
3091 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3093 bufsz = (adapter->rx_mbuf_sz +
3094 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3096 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3097 u64 rdba = rxr->rxdma.dma_paddr;
3100 /* Setup the Base and Length of the Rx Descriptor Ring */
3101 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3102 (rdba & 0x00000000ffffffffULL));
3103 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3104 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3105 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3107 /* Set up the SRRCTL register */
3108 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3109 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3110 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3112 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3115 * Set DROP_EN iff we have no flow control and >1 queue.
3116 * Note that srrctl was cleared shortly before during reset,
3117 * so we do not need to clear the bit, but do it just in case
3118 * this code is moved elsewhere.
3120 if (adapter->num_queues > 1 &&
3121 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3122 srrctl |= IXGBE_SRRCTL_DROP_EN;
3124 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3127 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3129 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3130 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3131 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3133 /* Set the processing limit */
3134 rxr->process_limit = ixgbe_rx_process_limit;
3136 /* Set the driver rx tail address */
3137 rxr->tail = IXGBE_RDT(rxr->me);
3140 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3141 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3142 IXGBE_PSRTYPE_UDPHDR |
3143 IXGBE_PSRTYPE_IPV4HDR |
3144 IXGBE_PSRTYPE_IPV6HDR;
3145 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3148 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3150 ixgbe_initialise_rss_mapping(adapter);
3152 if (adapter->num_queues > 1) {
3153 /* RSS and RX IPP Checksum are mutually exclusive */
3154 rxcsum |= IXGBE_RXCSUM_PCSD;
3157 if (ifp->if_capenable & IFCAP_RXCSUM)
3158 rxcsum |= IXGBE_RXCSUM_PCSD;
3160 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3161 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3163 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3170 ** This routine is run via an vlan config EVENT,
3171 ** it enables us to use the HW Filter table since
3172 ** we can get the vlan id. This just creates the
3173 ** entry in the soft version of the VFTA, init will
3174 ** repopulate the real table.
3177 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3179 struct adapter *adapter = ifp->if_softc;
3182 if (ifp->if_softc != arg) /* Not our event */
3185 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3188 IXGBE_CORE_LOCK(adapter);
3189 index = (vtag >> 5) & 0x7F;
3191 adapter->shadow_vfta[index] |= (1 << bit);
3192 ++adapter->num_vlans;
3193 ixgbe_setup_vlan_hw_support(adapter);
3194 IXGBE_CORE_UNLOCK(adapter);
3198 ** This routine is run via an vlan
3199 ** unconfig EVENT, remove our entry
3200 ** in the soft vfta.
3203 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3205 struct adapter *adapter = ifp->if_softc;
3208 if (ifp->if_softc != arg)
3211 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3214 IXGBE_CORE_LOCK(adapter);
3215 index = (vtag >> 5) & 0x7F;
3217 adapter->shadow_vfta[index] &= ~(1 << bit);
3218 --adapter->num_vlans;
3219 /* Re-init to load the changes */
3220 ixgbe_setup_vlan_hw_support(adapter);
3221 IXGBE_CORE_UNLOCK(adapter);
3225 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3227 struct ifnet *ifp = adapter->ifp;
3228 struct ixgbe_hw *hw = &adapter->hw;
3229 struct rx_ring *rxr;
3234 ** We get here thru init_locked, meaning
3235 ** a soft reset, this has already cleared
3236 ** the VFTA and other state, so if there
3237 ** have been no vlan's registered do nothing.
3239 if (adapter->num_vlans == 0)
3242 /* Setup the queues for vlans */
3243 for (int i = 0; i < adapter->num_queues; i++) {
3244 rxr = &adapter->rx_rings[i];
3245 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3246 if (hw->mac.type != ixgbe_mac_82598EB) {
3247 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3248 ctrl |= IXGBE_RXDCTL_VME;
3249 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3251 rxr->vtag_strip = TRUE;
3254 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3257 ** A soft reset zero's out the VFTA, so
3258 ** we need to repopulate it now.
3260 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3261 if (adapter->shadow_vfta[i] != 0)
3262 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3263 adapter->shadow_vfta[i]);
3265 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3266 /* Enable the Filter Table if enabled */
3267 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3268 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3269 ctrl |= IXGBE_VLNCTRL_VFE;
3271 if (hw->mac.type == ixgbe_mac_82598EB)
3272 ctrl |= IXGBE_VLNCTRL_VME;
3273 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3277 ixgbe_enable_intr(struct adapter *adapter)
3279 struct ixgbe_hw *hw = &adapter->hw;
3280 struct ix_queue *que = adapter->queues;
3283 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3284 /* Enable Fan Failure detection */
3285 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3286 mask |= IXGBE_EIMS_GPI_SDP1;
3288 switch (adapter->hw.mac.type) {
3289 case ixgbe_mac_82599EB:
3290 mask |= IXGBE_EIMS_ECC;
3291 /* Temperature sensor on some adapters */
3292 mask |= IXGBE_EIMS_GPI_SDP0;
3293 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3294 mask |= IXGBE_EIMS_GPI_SDP1;
3295 mask |= IXGBE_EIMS_GPI_SDP2;
3297 mask |= IXGBE_EIMS_FLOW_DIR;
3300 mask |= IXGBE_EIMS_MAILBOX;
3303 case ixgbe_mac_X540:
3304 /* Detect if Thermal Sensor is enabled */
3305 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3306 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3307 mask |= IXGBE_EIMS_TS;
3308 mask |= IXGBE_EIMS_ECC;
3310 mask |= IXGBE_EIMS_FLOW_DIR;
3313 case ixgbe_mac_X550:
3314 case ixgbe_mac_X550EM_x:
3315 /* MAC thermal sensor is automatically enabled */
3316 mask |= IXGBE_EIMS_TS;
3317 /* Some devices use SDP0 for important information */
3318 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3319 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3320 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3321 mask |= IXGBE_EIMS_ECC;
3323 mask |= IXGBE_EIMS_FLOW_DIR;
3326 mask |= IXGBE_EIMS_MAILBOX;
3333 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3335 /* With MSI-X we use auto clear */
3336 if (adapter->msix_mem) {
3337 mask = IXGBE_EIMS_ENABLE_MASK;
3338 /* Don't autoclear Link */
3339 mask &= ~IXGBE_EIMS_OTHER;
3340 mask &= ~IXGBE_EIMS_LSC;
3342 mask &= ~IXGBE_EIMS_MAILBOX;
3344 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3348 ** Now enable all queues, this is done separately to
3349 ** allow for handling the extended (beyond 32) MSIX
3350 ** vectors that can be used by 82599
3352 for (int i = 0; i < adapter->num_queues; i++, que++)
3353 ixgbe_enable_queue(adapter, que->msix);
3355 IXGBE_WRITE_FLUSH(hw);
3361 ixgbe_disable_intr(struct adapter *adapter)
3363 if (adapter->msix_mem)
3364 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3365 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3366 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3368 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3369 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3370 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3372 IXGBE_WRITE_FLUSH(&adapter->hw);
3377 ** Get the width and transaction speed of
3378 ** the slot this adapter is plugged into.
3381 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3383 device_t dev = ((struct ixgbe_osdep *)hw->back)->dev;
3384 struct ixgbe_mac_info *mac = &hw->mac;
3388 /* For most devices simply call the shared code routine */
3389 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3390 ixgbe_get_bus_info(hw);
3391 /* These devices don't use PCI-E */
3392 switch (hw->mac.type) {
3393 case ixgbe_mac_X550EM_x:
3401 ** For the Quad port adapter we need to parse back
3402 ** up the PCI tree to find the speed of the expansion
3403 ** slot into which this adapter is plugged. A bit more work.
3405 dev = device_get_parent(device_get_parent(dev));
3407 device_printf(dev, "parent pcib = %x,%x,%x\n",
3408 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3410 dev = device_get_parent(device_get_parent(dev));
3412 device_printf(dev, "slot pcib = %x,%x,%x\n",
3413 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3415 /* Now get the PCI Express Capabilities offset */
3416 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3417 /* ...and read the Link Status Register */
3418 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3419 switch (link & IXGBE_PCI_LINK_WIDTH) {
3420 case IXGBE_PCI_LINK_WIDTH_1:
3421 hw->bus.width = ixgbe_bus_width_pcie_x1;
3423 case IXGBE_PCI_LINK_WIDTH_2:
3424 hw->bus.width = ixgbe_bus_width_pcie_x2;
3426 case IXGBE_PCI_LINK_WIDTH_4:
3427 hw->bus.width = ixgbe_bus_width_pcie_x4;
3429 case IXGBE_PCI_LINK_WIDTH_8:
3430 hw->bus.width = ixgbe_bus_width_pcie_x8;
3433 hw->bus.width = ixgbe_bus_width_unknown;
3437 switch (link & IXGBE_PCI_LINK_SPEED) {
3438 case IXGBE_PCI_LINK_SPEED_2500:
3439 hw->bus.speed = ixgbe_bus_speed_2500;
3441 case IXGBE_PCI_LINK_SPEED_5000:
3442 hw->bus.speed = ixgbe_bus_speed_5000;
3444 case IXGBE_PCI_LINK_SPEED_8000:
3445 hw->bus.speed = ixgbe_bus_speed_8000;
3448 hw->bus.speed = ixgbe_bus_speed_unknown;
3452 mac->ops.set_lan_id(hw);
3455 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3456 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3457 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3458 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3459 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3460 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3461 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3464 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3465 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3466 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3467 device_printf(dev, "PCI-Express bandwidth available"
3468 " for this card\n is not sufficient for"
3469 " optimal performance.\n");
3470 device_printf(dev, "For optimal performance a x8 "
3471 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3473 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3474 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3475 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3476 device_printf(dev, "PCI-Express bandwidth available"
3477 " for this card\n is not sufficient for"
3478 " optimal performance.\n");
3479 device_printf(dev, "For optimal performance a x8 "
3480 "PCIE Gen3 slot is required.\n");
3488 ** Setup the correct IVAR register for a particular MSIX interrupt
3489 ** (yes this is all very magic and confusing :)
3490 ** - entry is the register array entry
3491 ** - vector is the MSIX vector for this queue
3492 ** - type is RX/TX/MISC
3495 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3497 struct ixgbe_hw *hw = &adapter->hw;
3500 vector |= IXGBE_IVAR_ALLOC_VAL;
3502 switch (hw->mac.type) {
3504 case ixgbe_mac_82598EB:
3506 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3508 entry += (type * 64);
3509 index = (entry >> 2) & 0x1F;
3510 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3511 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3512 ivar |= (vector << (8 * (entry & 0x3)));
3513 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3516 case ixgbe_mac_82599EB:
3517 case ixgbe_mac_X540:
3518 case ixgbe_mac_X550:
3519 case ixgbe_mac_X550EM_x:
3520 if (type == -1) { /* MISC IVAR */
3521 index = (entry & 1) * 8;
3522 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3523 ivar &= ~(0xFF << index);
3524 ivar |= (vector << index);
3525 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3526 } else { /* RX/TX IVARS */
3527 index = (16 * (entry & 1)) + (8 * type);
3528 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3529 ivar &= ~(0xFF << index);
3530 ivar |= (vector << index);
3531 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3540 ixgbe_configure_ivars(struct adapter *adapter)
3542 struct ix_queue *que = adapter->queues;
3545 if (ixgbe_max_interrupt_rate > 0)
3546 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3549 ** Disable DMA coalescing if interrupt moderation is
3556 for (int i = 0; i < adapter->num_queues; i++, que++) {
3557 struct rx_ring *rxr = &adapter->rx_rings[i];
3558 struct tx_ring *txr = &adapter->tx_rings[i];
3559 /* First the RX queue entry */
3560 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3561 /* ... and the TX */
3562 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3563 /* Set an Initial EITR value */
3564 IXGBE_WRITE_REG(&adapter->hw,
3565 IXGBE_EITR(que->msix), newitr);
3568 /* For the Link interrupt */
3569 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3573 ** ixgbe_sfp_probe - called in the local timer to
3574 ** determine if a port had optics inserted.
3577 ixgbe_sfp_probe(struct adapter *adapter)
3579 struct ixgbe_hw *hw = &adapter->hw;
3580 device_t dev = adapter->dev;
3581 bool result = FALSE;
3583 if ((hw->phy.type == ixgbe_phy_nl) &&
3584 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3585 s32 ret = hw->phy.ops.identify_sfp(hw);
3588 ret = hw->phy.ops.reset(hw);
3589 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3590 device_printf(dev,"Unsupported SFP+ module detected!");
3591 printf(" Reload driver with supported module.\n");
3592 adapter->sfp_probe = FALSE;
3595 device_printf(dev,"SFP+ module detected!\n");
3596 /* We now have supported optics */
3597 adapter->sfp_probe = FALSE;
3598 /* Set the optics type so system reports correctly */
3599 ixgbe_setup_optics(adapter);
3607 ** Tasklet handler for MSIX Link interrupts
3608 ** - do outside interrupt since it might sleep
3611 ixgbe_handle_link(void *context, int pending)
3613 struct adapter *adapter = context;
3615 ixgbe_check_link(&adapter->hw,
3616 &adapter->link_speed, &adapter->link_up, 0);
3617 ixgbe_update_link_status(adapter);
3621 ** Tasklet for handling SFP module interrupts
3624 ixgbe_handle_mod(void *context, int pending)
3626 struct adapter *adapter = context;
3627 struct ixgbe_hw *hw = &adapter->hw;
3628 device_t dev = adapter->dev;
3631 err = hw->phy.ops.identify_sfp(hw);
3632 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3634 "Unsupported SFP+ module type was detected.\n");
3638 err = hw->mac.ops.setup_sfp(hw);
3639 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3641 "Setup failure - unsupported SFP+ module type.\n");
3644 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3650 ** Tasklet for handling MSF (multispeed fiber) interrupts
3653 ixgbe_handle_msf(void *context, int pending)
3655 struct adapter *adapter = context;
3656 struct ixgbe_hw *hw = &adapter->hw;
3661 err = hw->phy.ops.identify_sfp(hw);
3663 ixgbe_setup_optics(adapter);
3664 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3667 autoneg = hw->phy.autoneg_advertised;
3668 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3669 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3670 if (hw->mac.ops.setup_link)
3671 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3673 ifmedia_removeall(&adapter->media);
3674 ixgbe_add_media_types(adapter);
3679 ** Tasklet for handling interrupts from an external PHY
3682 ixgbe_handle_phy(void *context, int pending)
3684 struct adapter *adapter = context;
3685 struct ixgbe_hw *hw = &adapter->hw;
3688 error = hw->phy.ops.handle_lasi(hw);
3689 if (error == IXGBE_ERR_OVERTEMP)
3690 device_printf(adapter->dev,
3691 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3692 " PHY will downshift to lower power state!\n");
3694 device_printf(adapter->dev,
3695 "Error handling LASI interrupt: %d\n",
3702 ** Tasklet for reinitializing the Flow Director filter table
3705 ixgbe_reinit_fdir(void *context, int pending)
3707 struct adapter *adapter = context;
3708 struct ifnet *ifp = adapter->ifp;
3710 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3712 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3713 adapter->fdir_reinit = 0;
3714 /* re-enable flow director interrupts */
3715 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3716 /* Restart the interface */
3717 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3722 /*********************************************************************
3724 * Configure DMA Coalescing
3726 **********************************************************************/
3728 ixgbe_config_dmac(struct adapter *adapter)
3730 struct ixgbe_hw *hw = &adapter->hw;
3731 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3733 if (hw->mac.type < ixgbe_mac_X550 ||
3734 !hw->mac.ops.dmac_config)
3737 if (dcfg->watchdog_timer ^ adapter->dmac ||
3738 dcfg->link_speed ^ adapter->link_speed) {
3739 dcfg->watchdog_timer = adapter->dmac;
3740 dcfg->fcoe_en = false;
3741 dcfg->link_speed = adapter->link_speed;
3744 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3745 dcfg->watchdog_timer, dcfg->link_speed);
3747 hw->mac.ops.dmac_config(hw);
3752 * Checks whether the adapter supports Energy Efficient Ethernet
3753 * or not, based on device ID.
3756 ixgbe_check_eee_support(struct adapter *adapter)
3758 struct ixgbe_hw *hw = &adapter->hw;
3760 adapter->eee_enabled = !!(hw->mac.ops.setup_eee);
3764 * Checks whether the adapter's ports are capable of
3765 * Wake On LAN by reading the adapter's NVM.
3767 * Sets each port's hw->wol_enabled value depending
3768 * on the value read here.
3771 ixgbe_check_wol_support(struct adapter *adapter)
3773 struct ixgbe_hw *hw = &adapter->hw;
3776 /* Find out WoL support for port */
3777 adapter->wol_support = hw->wol_enabled = 0;
3778 ixgbe_get_device_caps(hw, &dev_caps);
3779 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3780 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3782 adapter->wol_support = hw->wol_enabled = 1;
3784 /* Save initial wake up filter configuration */
3785 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3791 * Prepare the adapter/port for LPLU and/or WoL
3794 ixgbe_setup_low_power_mode(struct adapter *adapter)
3796 struct ixgbe_hw *hw = &adapter->hw;
3797 device_t dev = adapter->dev;
3800 mtx_assert(&adapter->core_mtx, MA_OWNED);
3802 /* Limit power management flow to X550EM baseT */
3803 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3804 && hw->phy.ops.enter_lplu) {
3805 /* Turn off support for APM wakeup. (Using ACPI instead) */
3806 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3807 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3810 * Clear Wake Up Status register to prevent any previous wakeup
3811 * events from waking us up immediately after we suspend.
3813 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3816 * Program the Wakeup Filter Control register with user filter
3819 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3821 /* Enable wakeups and power management in Wakeup Control */
3822 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3823 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3825 /* X550EM baseT adapters need a special LPLU flow */
3826 hw->phy.reset_disable = true;
3827 ixgbe_stop(adapter);
3828 error = hw->phy.ops.enter_lplu(hw);
3831 "Error entering LPLU: %d\n", error);
3832 hw->phy.reset_disable = false;
3834 /* Just stop for other adapters */
3835 ixgbe_stop(adapter);
3841 /**********************************************************************
3843 * Update the board statistics counters.
3845 **********************************************************************/
3847 ixgbe_update_stats_counters(struct adapter *adapter)
3849 struct ixgbe_hw *hw = &adapter->hw;
3850 u32 missed_rx = 0, bprc, lxon, lxoff, total;
3851 u64 total_missed_rx = 0;
3853 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3854 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3855 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3856 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3858 for (int i = 0; i < 16; i++) {
3859 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3860 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3861 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3863 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3864 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3865 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3867 /* Hardware workaround, gprc counts missed packets */
3868 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3869 adapter->stats.pf.gprc -= missed_rx;
3871 if (hw->mac.type != ixgbe_mac_82598EB) {
3872 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3873 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3874 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3875 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3876 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3877 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3878 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3879 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3881 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3882 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3883 /* 82598 only has a counter in the high register */
3884 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3885 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3886 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3890 * Workaround: mprc hardware is incorrectly counting
3891 * broadcasts, so for now we subtract those.
3893 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3894 adapter->stats.pf.bprc += bprc;
3895 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3896 if (hw->mac.type == ixgbe_mac_82598EB)
3897 adapter->stats.pf.mprc -= bprc;
3899 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3900 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3901 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3902 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3903 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3904 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3906 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3907 adapter->stats.pf.lxontxc += lxon;
3908 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3909 adapter->stats.pf.lxofftxc += lxoff;
3910 total = lxon + lxoff;
3912 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3913 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3914 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3915 adapter->stats.pf.gptc -= total;
3916 adapter->stats.pf.mptc -= total;
3917 adapter->stats.pf.ptc64 -= total;
3918 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3920 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3921 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3922 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3923 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3924 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3925 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3926 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3927 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3928 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3929 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3930 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3931 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3932 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3933 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3934 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3935 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3936 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3937 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3938 /* Only read FCOE on 82599 */
3939 if (hw->mac.type != ixgbe_mac_82598EB) {
3940 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3941 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3942 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3943 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3944 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3947 /* Fill out the OS statistics structure */
3948 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3949 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3950 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3951 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3952 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3953 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3954 IXGBE_SET_COLLISIONS(adapter, 0);
3955 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3956 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3957 + adapter->stats.pf.rlec);
3960 #if __FreeBSD_version >= 1100036
3962 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3964 struct adapter *adapter;
3965 struct tx_ring *txr;
3968 adapter = if_getsoftc(ifp);
3971 case IFCOUNTER_IPACKETS:
3972 return (adapter->ipackets);
3973 case IFCOUNTER_OPACKETS:
3974 return (adapter->opackets);
3975 case IFCOUNTER_IBYTES:
3976 return (adapter->ibytes);
3977 case IFCOUNTER_OBYTES:
3978 return (adapter->obytes);
3979 case IFCOUNTER_IMCASTS:
3980 return (adapter->imcasts);
3981 case IFCOUNTER_OMCASTS:
3982 return (adapter->omcasts);
3983 case IFCOUNTER_COLLISIONS:
3985 case IFCOUNTER_IQDROPS:
3986 return (adapter->iqdrops);
3987 case IFCOUNTER_OQDROPS:
3989 txr = adapter->tx_rings;
3990 for (int i = 0; i < adapter->num_queues; i++, txr++)
3991 rv += txr->br->br_drops;
3993 case IFCOUNTER_IERRORS:
3994 return (adapter->ierrors);
3996 return (if_get_counter_default(ifp, cnt));
4001 /** ixgbe_sysctl_tdh_handler - Handler function
4002 * Retrieves the TDH value from the hardware
4005 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4009 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4012 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4013 error = sysctl_handle_int(oidp, &val, 0, req);
4014 if (error || !req->newptr)
4019 /** ixgbe_sysctl_tdt_handler - Handler function
4020 * Retrieves the TDT value from the hardware
4023 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4027 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4030 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4031 error = sysctl_handle_int(oidp, &val, 0, req);
4032 if (error || !req->newptr)
4037 /** ixgbe_sysctl_rdh_handler - Handler function
4038 * Retrieves the RDH value from the hardware
4041 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4045 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4048 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4049 error = sysctl_handle_int(oidp, &val, 0, req);
4050 if (error || !req->newptr)
4055 /** ixgbe_sysctl_rdt_handler - Handler function
4056 * Retrieves the RDT value from the hardware
4059 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4063 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4066 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4067 error = sysctl_handle_int(oidp, &val, 0, req);
4068 if (error || !req->newptr)
4074 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4077 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4078 unsigned int reg, usec, rate;
4080 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4081 usec = ((reg & 0x0FF8) >> 3);
4083 rate = 500000 / usec;
4086 error = sysctl_handle_int(oidp, &rate, 0, req);
4087 if (error || !req->newptr)
4089 reg &= ~0xfff; /* default, no limitation */
4090 ixgbe_max_interrupt_rate = 0;
4091 if (rate > 0 && rate < 500000) {
4094 ixgbe_max_interrupt_rate = rate;
4095 reg |= ((4000000/rate) & 0xff8 );
4097 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4102 ixgbe_add_device_sysctls(struct adapter *adapter)
4104 device_t dev = adapter->dev;
4105 struct ixgbe_hw *hw = &adapter->hw;
4106 struct sysctl_oid_list *child;
4107 struct sysctl_ctx_list *ctx;
4109 ctx = device_get_sysctl_ctx(dev);
4110 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4112 /* Sysctls for all devices */
4113 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
4114 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4115 ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4117 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
4119 &ixgbe_enable_aim, 1, "Interrupt Moderation");
4121 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4122 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4123 ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
4125 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
4126 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4127 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
4129 /* for X550 devices */
4130 if (hw->mac.type >= ixgbe_mac_X550)
4131 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4132 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4133 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
4135 /* for X550T and X550EM backplane devices */
4136 if (hw->mac.ops.setup_eee) {
4137 struct sysctl_oid *eee_node;
4138 struct sysctl_oid_list *eee_list;
4140 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4142 "Energy Efficient Ethernet sysctls");
4143 eee_list = SYSCTL_CHILDREN(eee_node);
4145 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4146 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4147 ixgbe_sysctl_eee_enable, "I",
4148 "Enable or Disable EEE");
4150 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4151 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4152 ixgbe_sysctl_eee_negotiated, "I",
4153 "EEE negotiated on link");
4155 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4156 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4157 ixgbe_sysctl_eee_tx_lpi_status, "I",
4158 "Whether or not TX link is in LPI state");
4160 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4161 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4162 ixgbe_sysctl_eee_rx_lpi_status, "I",
4163 "Whether or not RX link is in LPI state");
4166 /* for certain 10GBaseT devices */
4167 if (hw->device_id == IXGBE_DEV_ID_X550T ||
4168 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4169 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4170 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4171 ixgbe_sysctl_wol_enable, "I",
4172 "Enable/Disable Wake on LAN");
4174 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4175 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4176 ixgbe_sysctl_wufc, "I",
4177 "Enable/Disable Wake Up Filters");
4180 /* for X550EM 10GBaseT devices */
4181 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4182 struct sysctl_oid *phy_node;
4183 struct sysctl_oid_list *phy_list;
4185 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4187 "External PHY sysctls");
4188 phy_list = SYSCTL_CHILDREN(phy_node);
4190 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4191 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4192 ixgbe_sysctl_phy_temp, "I",
4193 "Current External PHY Temperature (Celsius)");
4195 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4196 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4197 ixgbe_sysctl_phy_overtemp_occurred, "I",
4198 "External PHY High Temperature Event Occurred");
4203 * Add sysctl variables, one per statistic, to the system.
4206 ixgbe_add_hw_stats(struct adapter *adapter)
4208 device_t dev = adapter->dev;
4210 struct tx_ring *txr = adapter->tx_rings;
4211 struct rx_ring *rxr = adapter->rx_rings;
4213 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4214 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4215 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4216 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4218 struct sysctl_oid *stat_node, *queue_node;
4219 struct sysctl_oid_list *stat_list, *queue_list;
4221 #define QUEUE_NAME_LEN 32
4222 char namebuf[QUEUE_NAME_LEN];
4224 /* Driver Statistics */
4225 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4226 CTLFLAG_RD, &adapter->dropped_pkts,
4227 "Driver dropped packets");
4228 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4229 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4230 "m_defrag() failed");
4231 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4232 CTLFLAG_RD, &adapter->watchdog_events,
4233 "Watchdog timeouts");
4234 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4235 CTLFLAG_RD, &adapter->link_irq,
4236 "Link MSIX IRQ Handled");
4238 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4239 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4240 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4241 CTLFLAG_RD, NULL, "Queue Name");
4242 queue_list = SYSCTL_CHILDREN(queue_node);
4244 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4245 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4246 sizeof(&adapter->queues[i]),
4247 ixgbe_sysctl_interrupt_rate_handler, "IU",
4249 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4250 CTLFLAG_RD, &(adapter->queues[i].irqs),
4251 "irqs on this queue");
4252 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4253 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4254 ixgbe_sysctl_tdh_handler, "IU",
4255 "Transmit Descriptor Head");
4256 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4257 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4258 ixgbe_sysctl_tdt_handler, "IU",
4259 "Transmit Descriptor Tail");
4260 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4261 CTLFLAG_RD, &txr->tso_tx,
4263 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4264 CTLFLAG_RD, &txr->no_tx_dma_setup,
4265 "Driver tx dma failure in xmit");
4266 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4267 CTLFLAG_RD, &txr->no_desc_avail,
4268 "Queue No Descriptor Available");
4269 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4270 CTLFLAG_RD, &txr->total_packets,
4271 "Queue Packets Transmitted");
4272 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4273 CTLFLAG_RD, &txr->br->br_drops,
4274 "Packets dropped in buf_ring");
4277 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4278 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4279 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4280 CTLFLAG_RD, NULL, "Queue Name");
4281 queue_list = SYSCTL_CHILDREN(queue_node);
4283 struct lro_ctrl *lro = &rxr->lro;
4285 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4286 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4287 CTLFLAG_RD, NULL, "Queue Name");
4288 queue_list = SYSCTL_CHILDREN(queue_node);
4290 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4291 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4292 ixgbe_sysctl_rdh_handler, "IU",
4293 "Receive Descriptor Head");
4294 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4295 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4296 ixgbe_sysctl_rdt_handler, "IU",
4297 "Receive Descriptor Tail");
4298 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4299 CTLFLAG_RD, &rxr->rx_packets,
4300 "Queue Packets Received");
4301 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4302 CTLFLAG_RD, &rxr->rx_bytes,
4303 "Queue Bytes Received");
4304 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4305 CTLFLAG_RD, &rxr->rx_copies,
4306 "Copied RX Frames");
4307 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4308 CTLFLAG_RD, &lro->lro_queued, 0,
4310 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4311 CTLFLAG_RD, &lro->lro_flushed, 0,
4315 /* MAC stats get the own sub node */
4317 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4318 CTLFLAG_RD, NULL, "MAC Statistics");
4319 stat_list = SYSCTL_CHILDREN(stat_node);
4321 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4322 CTLFLAG_RD, &stats->crcerrs,
4324 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4325 CTLFLAG_RD, &stats->illerrc,
4326 "Illegal Byte Errors");
4327 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4328 CTLFLAG_RD, &stats->errbc,
4330 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4331 CTLFLAG_RD, &stats->mspdc,
4332 "MAC Short Packets Discarded");
4333 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4334 CTLFLAG_RD, &stats->mlfc,
4335 "MAC Local Faults");
4336 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4337 CTLFLAG_RD, &stats->mrfc,
4338 "MAC Remote Faults");
4339 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4340 CTLFLAG_RD, &stats->rlec,
4341 "Receive Length Errors");
4343 /* Flow Control stats */
4344 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4345 CTLFLAG_RD, &stats->lxontxc,
4346 "Link XON Transmitted");
4347 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4348 CTLFLAG_RD, &stats->lxonrxc,
4349 "Link XON Received");
4350 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4351 CTLFLAG_RD, &stats->lxofftxc,
4352 "Link XOFF Transmitted");
4353 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4354 CTLFLAG_RD, &stats->lxoffrxc,
4355 "Link XOFF Received");
4357 /* Packet Reception Stats */
4358 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4359 CTLFLAG_RD, &stats->tor,
4360 "Total Octets Received");
4361 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4362 CTLFLAG_RD, &stats->gorc,
4363 "Good Octets Received");
4364 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4365 CTLFLAG_RD, &stats->tpr,
4366 "Total Packets Received");
4367 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4368 CTLFLAG_RD, &stats->gprc,
4369 "Good Packets Received");
4370 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4371 CTLFLAG_RD, &stats->mprc,
4372 "Multicast Packets Received");
4373 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4374 CTLFLAG_RD, &stats->bprc,
4375 "Broadcast Packets Received");
4376 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4377 CTLFLAG_RD, &stats->prc64,
4378 "64 byte frames received ");
4379 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4380 CTLFLAG_RD, &stats->prc127,
4381 "65-127 byte frames received");
4382 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4383 CTLFLAG_RD, &stats->prc255,
4384 "128-255 byte frames received");
4385 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4386 CTLFLAG_RD, &stats->prc511,
4387 "256-511 byte frames received");
4388 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4389 CTLFLAG_RD, &stats->prc1023,
4390 "512-1023 byte frames received");
4391 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4392 CTLFLAG_RD, &stats->prc1522,
4393 "1023-1522 byte frames received");
4394 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4395 CTLFLAG_RD, &stats->ruc,
4396 "Receive Undersized");
4397 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4398 CTLFLAG_RD, &stats->rfc,
4399 "Fragmented Packets Received ");
4400 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4401 CTLFLAG_RD, &stats->roc,
4402 "Oversized Packets Received");
4403 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4404 CTLFLAG_RD, &stats->rjc,
4406 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4407 CTLFLAG_RD, &stats->mngprc,
4408 "Management Packets Received");
4409 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4410 CTLFLAG_RD, &stats->mngptc,
4411 "Management Packets Dropped");
4412 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4413 CTLFLAG_RD, &stats->xec,
4416 /* Packet Transmission Stats */
4417 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4418 CTLFLAG_RD, &stats->gotc,
4419 "Good Octets Transmitted");
4420 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4421 CTLFLAG_RD, &stats->tpt,
4422 "Total Packets Transmitted");
4423 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4424 CTLFLAG_RD, &stats->gptc,
4425 "Good Packets Transmitted");
4426 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4427 CTLFLAG_RD, &stats->bptc,
4428 "Broadcast Packets Transmitted");
4429 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4430 CTLFLAG_RD, &stats->mptc,
4431 "Multicast Packets Transmitted");
4432 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4433 CTLFLAG_RD, &stats->mngptc,
4434 "Management Packets Transmitted");
4435 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4436 CTLFLAG_RD, &stats->ptc64,
4437 "64 byte frames transmitted ");
4438 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4439 CTLFLAG_RD, &stats->ptc127,
4440 "65-127 byte frames transmitted");
4441 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4442 CTLFLAG_RD, &stats->ptc255,
4443 "128-255 byte frames transmitted");
4444 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4445 CTLFLAG_RD, &stats->ptc511,
4446 "256-511 byte frames transmitted");
4447 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4448 CTLFLAG_RD, &stats->ptc1023,
4449 "512-1023 byte frames transmitted");
4450 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4451 CTLFLAG_RD, &stats->ptc1522,
4452 "1024-1522 byte frames transmitted");
4456 ** Set flow control using sysctl:
4457 ** Flow control values:
4464 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4467 struct adapter *adapter = (struct adapter *) arg1;
4470 error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4471 if ((error) || (req->newptr == NULL))
4474 /* Don't bother if it's not changed */
4475 if (adapter->fc == last)
4478 switch (adapter->fc) {
4479 case ixgbe_fc_rx_pause:
4480 case ixgbe_fc_tx_pause:
4482 adapter->hw.fc.requested_mode = adapter->fc;
4483 if (adapter->num_queues > 1)
4484 ixgbe_disable_rx_drop(adapter);
4487 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4488 if (adapter->num_queues > 1)
4489 ixgbe_enable_rx_drop(adapter);
4495 /* Don't autoneg if forcing a value */
4496 adapter->hw.fc.disable_fc_autoneg = TRUE;
4497 ixgbe_fc_enable(&adapter->hw);
4502 ** Control advertised link speed:
4504 ** 0x1 - advertise 100 Mb
4505 ** 0x2 - advertise 1G
4506 ** 0x4 - advertise 10G
4509 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4511 int error = 0, requested;
4512 struct adapter *adapter;
4514 struct ixgbe_hw *hw;
4515 ixgbe_link_speed speed = 0;
4517 adapter = (struct adapter *) arg1;
4521 requested = adapter->advertise;
4522 error = sysctl_handle_int(oidp, &requested, 0, req);
4523 if ((error) || (req->newptr == NULL))
4526 /* Checks to validate new value */
4527 if (adapter->advertise == requested) /* no change */
4530 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4531 (hw->phy.multispeed_fiber))) {
4533 "Advertised speed can only be set on copper or "
4534 "multispeed fiber media types.\n");
4538 if (requested < 0x1 || requested > 0x7) {
4540 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4544 if ((requested & 0x1)
4545 && (hw->mac.type != ixgbe_mac_X540)
4546 && (hw->mac.type != ixgbe_mac_X550)) {
4547 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4551 /* Set new value and report new advertised mode */
4552 if (requested & 0x1)
4553 speed |= IXGBE_LINK_SPEED_100_FULL;
4554 if (requested & 0x2)
4555 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4556 if (requested & 0x4)
4557 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4559 hw->mac.autotry_restart = TRUE;
4560 hw->mac.ops.setup_link(hw, speed, TRUE);
4561 adapter->advertise = requested;
4567 * The following two sysctls are for X550 BaseT devices;
4568 * they deal with the external PHY used in them.
4571 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4573 struct adapter *adapter = (struct adapter *) arg1;
4574 struct ixgbe_hw *hw = &adapter->hw;
4577 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4578 device_printf(adapter->dev,
4579 "Device has no supported external thermal sensor.\n");
4583 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4584 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4586 device_printf(adapter->dev,
4587 "Error reading from PHY's current temperature register\n");
4591 /* Shift temp for output */
4594 return (sysctl_handle_int(oidp, NULL, reg, req));
4598 * Reports whether the current PHY temperature is over
4599 * the overtemp threshold.
4600 * - This is reported directly from the PHY
4603 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4605 struct adapter *adapter = (struct adapter *) arg1;
4606 struct ixgbe_hw *hw = &adapter->hw;
4609 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4610 device_printf(adapter->dev,
4611 "Device has no supported external thermal sensor.\n");
4615 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4616 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4618 device_printf(adapter->dev,
4619 "Error reading from PHY's temperature status register\n");
4623 /* Get occurrence bit */
4624 reg = !!(reg & 0x4000);
4625 return (sysctl_handle_int(oidp, 0, reg, req));
4629 ** Thermal Shutdown Trigger (internal MAC)
4630 ** - Set this to 1 to cause an overtemp event to occur
4633 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4635 struct adapter *adapter = (struct adapter *) arg1;
4636 struct ixgbe_hw *hw = &adapter->hw;
4637 int error, fire = 0;
4639 error = sysctl_handle_int(oidp, &fire, 0, req);
4640 if ((error) || (req->newptr == NULL))
4644 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4645 reg |= IXGBE_EICR_TS;
4646 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4653 ** Manage DMA Coalescing.
4655 ** 0/1 - off / on (use default value of 1000)
4657 ** Legal timer values are:
4658 ** 50,100,250,500,1000,2000,5000,10000
4660 ** Turning off interrupt moderation will also turn this off.
4663 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4665 struct adapter *adapter = (struct adapter *) arg1;
4666 struct ixgbe_hw *hw = &adapter->hw;
4667 struct ifnet *ifp = adapter->ifp;
4671 oldval = adapter->dmac;
4672 error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4673 if ((error) || (req->newptr == NULL))
4676 switch (hw->mac.type) {
4677 case ixgbe_mac_X550:
4678 case ixgbe_mac_X550EM_x:
4681 device_printf(adapter->dev,
4682 "DMA Coalescing is only supported on X550 devices\n");
4686 switch (adapter->dmac) {
4690 case 1: /* Enable and use default */
4691 adapter->dmac = 1000;
4701 /* Legal values - allow */
4704 /* Do nothing, illegal value */
4705 adapter->dmac = oldval;
4709 /* Re-initialize hardware if it's already running */
4710 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4711 ixgbe_init(adapter);
4717 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4723 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4725 struct adapter *adapter = (struct adapter *) arg1;
4726 struct ixgbe_hw *hw = &adapter->hw;
4727 int new_wol_enabled;
4730 new_wol_enabled = hw->wol_enabled;
4731 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4732 if ((error) || (req->newptr == NULL))
4734 if (new_wol_enabled == hw->wol_enabled)
4737 if (new_wol_enabled > 0 && !adapter->wol_support)
4740 hw->wol_enabled = !!(new_wol_enabled);
4746 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4747 * if supported by the adapter.
4753 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4755 struct adapter *adapter = (struct adapter *) arg1;
4756 struct ixgbe_hw *hw = &adapter->hw;
4757 struct ifnet *ifp = adapter->ifp;
4758 int new_eee_enabled, error = 0;
4760 new_eee_enabled = adapter->eee_enabled;
4761 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4762 if ((error) || (req->newptr == NULL))
4764 if (new_eee_enabled == adapter->eee_enabled)
4767 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
4770 adapter->eee_enabled = !!(new_eee_enabled);
4772 /* Re-initialize hardware if it's already running */
4773 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4774 ixgbe_init(adapter);
4780 * Read-only sysctl indicating whether EEE support was negotiated
4784 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4786 struct adapter *adapter = (struct adapter *) arg1;
4787 struct ixgbe_hw *hw = &adapter->hw;
4790 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4792 return (sysctl_handle_int(oidp, 0, status, req));
4796 * Read-only sysctl indicating whether RX Link is in LPI state.
4799 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4801 struct adapter *adapter = (struct adapter *) arg1;
4802 struct ixgbe_hw *hw = &adapter->hw;
4805 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4806 IXGBE_EEE_RX_LPI_STATUS);
4808 return (sysctl_handle_int(oidp, 0, status, req));
4812 * Read-only sysctl indicating whether TX Link is in LPI state.
4815 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4817 struct adapter *adapter = (struct adapter *) arg1;
4818 struct ixgbe_hw *hw = &adapter->hw;
4821 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4822 IXGBE_EEE_TX_LPI_STATUS);
4824 return (sysctl_handle_int(oidp, 0, status, req));
4828 * Sysctl to enable/disable the types of packets that the
4829 * adapter will wake up on upon receipt.
4830 * WUFC - Wake Up Filter Control
4832 * 0x1 - Link Status Change
4833 * 0x2 - Magic Packet
4834 * 0x4 - Direct Exact
4835 * 0x8 - Directed Multicast
4837 * 0x20 - ARP/IPv4 Request Packet
4838 * 0x40 - Direct IPv4 Packet
4839 * 0x80 - Direct IPv6 Packet
4841 * Setting another flag will cause the sysctl to return an
4845 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4847 struct adapter *adapter = (struct adapter *) arg1;
4851 new_wufc = adapter->wufc;
4853 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4854 if ((error) || (req->newptr == NULL))
4856 if (new_wufc == adapter->wufc)
4859 if (new_wufc & 0xffffff00)
4863 new_wufc |= (0xffffff & adapter->wufc);
4864 adapter->wufc = new_wufc;
4871 ** Enable the hardware to drop packets when the buffer is
4872 ** full. This is useful when multiqueue,so that no single
4873 ** queue being full stalls the entire RX engine. We only
4874 ** enable this when Multiqueue AND when Flow Control is
4878 ixgbe_enable_rx_drop(struct adapter *adapter)
4880 struct ixgbe_hw *hw = &adapter->hw;
4882 for (int i = 0; i < adapter->num_queues; i++) {
4883 struct rx_ring *rxr = &adapter->rx_rings[i];
4884 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4885 srrctl |= IXGBE_SRRCTL_DROP_EN;
4886 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4889 /* enable drop for each vf */
4890 for (int i = 0; i < adapter->num_vfs; i++) {
4891 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4892 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4899 ixgbe_disable_rx_drop(struct adapter *adapter)
4901 struct ixgbe_hw *hw = &adapter->hw;
4903 for (int i = 0; i < adapter->num_queues; i++) {
4904 struct rx_ring *rxr = &adapter->rx_rings[i];
4905 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4906 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4907 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4910 /* disable drop for each vf */
4911 for (int i = 0; i < adapter->num_vfs; i++) {
4912 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4913 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4919 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4923 switch (adapter->hw.mac.type) {
4924 case ixgbe_mac_82598EB:
4925 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4926 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4928 case ixgbe_mac_82599EB:
4929 case ixgbe_mac_X540:
4930 case ixgbe_mac_X550:
4931 case ixgbe_mac_X550EM_x:
4932 mask = (queues & 0xFFFFFFFF);
4933 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4934 mask = (queues >> 32);
4935 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
4945 ** Support functions for SRIOV/VF management
4949 ixgbe_ping_all_vfs(struct adapter *adapter)
4951 struct ixgbe_vf *vf;
4953 for (int i = 0; i < adapter->num_vfs; i++) {
4954 vf = &adapter->vfs[i];
4955 if (vf->flags & IXGBE_VF_ACTIVE)
4956 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
4962 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
4965 struct ixgbe_hw *hw;
4966 uint32_t vmolr, vmvir;
4972 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
4974 /* Do not receive packets that pass inexact filters. */
4975 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
4977 /* Disable Multicast Promicuous Mode. */
4978 vmolr &= ~IXGBE_VMOLR_MPE;
4980 /* Accept broadcasts. */
4981 vmolr |= IXGBE_VMOLR_BAM;
4984 /* Accept non-vlan tagged traffic. */
4985 //vmolr |= IXGBE_VMOLR_AUPE;
4987 /* Allow VM to tag outgoing traffic; no default tag. */
4990 /* Require vlan-tagged traffic. */
4991 vmolr &= ~IXGBE_VMOLR_AUPE;
4993 /* Tag all traffic with provided vlan tag. */
4994 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
4996 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
4997 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
5002 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
5006 * Frame size compatibility between PF and VF is only a problem on
5007 * 82599-based cards. X540 and later support any combination of jumbo
5008 * frames on PFs and VFs.
5010 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
5013 switch (vf->api_ver) {
5014 case IXGBE_API_VER_1_0:
5015 case IXGBE_API_VER_UNKNOWN:
5017 * On legacy (1.0 and older) VF versions, we don't support jumbo
5018 * frames on either the PF or the VF.
5020 if (adapter->max_frame_size > ETHER_MAX_LEN ||
5021 vf->max_frame_size > ETHER_MAX_LEN)
5027 case IXGBE_API_VER_1_1:
5030 * 1.1 or later VF versions always work if they aren't using
5033 if (vf->max_frame_size <= ETHER_MAX_LEN)
5037 * Jumbo frames only work with VFs if the PF is also using jumbo
5040 if (adapter->max_frame_size <= ETHER_MAX_LEN)
5050 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
5052 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
5054 // XXX clear multicast addresses
5056 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
5058 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5063 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
5065 struct ixgbe_hw *hw;
5066 uint32_t vf_index, vfte;
5070 vf_index = IXGBE_VF_INDEX(vf->pool);
5071 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
5072 vfte |= IXGBE_VF_BIT(vf->pool);
5073 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
5078 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
5080 struct ixgbe_hw *hw;
5081 uint32_t vf_index, vfre;
5085 vf_index = IXGBE_VF_INDEX(vf->pool);
5086 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
5087 if (ixgbe_vf_frame_size_compatible(adapter, vf))
5088 vfre |= IXGBE_VF_BIT(vf->pool);
5090 vfre &= ~IXGBE_VF_BIT(vf->pool);
5091 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
5096 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5098 struct ixgbe_hw *hw;
5100 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
5104 ixgbe_process_vf_reset(adapter, vf);
5106 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5107 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5108 vf->ether_addr, vf->pool, TRUE);
5109 ack = IXGBE_VT_MSGTYPE_ACK;
5111 ack = IXGBE_VT_MSGTYPE_NACK;
5113 ixgbe_vf_enable_transmit(adapter, vf);
5114 ixgbe_vf_enable_receive(adapter, vf);
5116 vf->flags |= IXGBE_VF_CTS;
5118 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
5119 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
5120 resp[3] = hw->mac.mc_filter_type;
5121 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
5126 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5130 mac = (uint8_t*)&msg[1];
5132 /* Check that the VF has permission to change the MAC address. */
5133 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
5134 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5138 if (ixgbe_validate_mac_addr(mac) != 0) {
5139 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5143 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5145 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
5148 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5153 ** VF multicast addresses are set by using the appropriate bit in
5154 ** 1 of 128 32 bit addresses (4096 possible).
5157 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
5159 u16 *list = (u16*)&msg[1];
5161 u32 vmolr, vec_bit, vec_reg, mta_reg;
5163 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
5164 entries = min(entries, IXGBE_MAX_VF_MC);
5166 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
5168 vf->num_mc_hashes = entries;
5170 /* Set the appropriate MTA bit */
5171 for (int i = 0; i < entries; i++) {
5172 vf->mc_hash[i] = list[i];
5173 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
5174 vec_bit = vf->mc_hash[i] & 0x1F;
5175 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
5176 mta_reg |= (1 << vec_bit);
5177 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
5180 vmolr |= IXGBE_VMOLR_ROMPE;
5181 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
5182 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5188 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5190 struct ixgbe_hw *hw;
5195 enable = IXGBE_VT_MSGINFO(msg[0]);
5196 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
5198 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
5199 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5203 /* It is illegal to enable vlan tag 0. */
5204 if (tag == 0 && enable != 0){
5205 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5209 ixgbe_set_vfta(hw, tag, vf->pool, enable);
5210 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5215 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5217 struct ixgbe_hw *hw;
5218 uint32_t vf_max_size, pf_max_size, mhadd;
5221 vf_max_size = msg[1];
5223 if (vf_max_size < ETHER_CRC_LEN) {
5224 /* We intentionally ACK invalid LPE requests. */
5225 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5229 vf_max_size -= ETHER_CRC_LEN;
5231 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5232 /* We intentionally ACK invalid LPE requests. */
5233 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5237 vf->max_frame_size = vf_max_size;
5238 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5241 * We might have to disable reception to this VF if the frame size is
5242 * not compatible with the config on the PF.
5244 ixgbe_vf_enable_receive(adapter, vf);
5246 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5247 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
5249 if (pf_max_size < adapter->max_frame_size) {
5250 mhadd &= ~IXGBE_MHADD_MFS_MASK;
5251 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5252 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
5255 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5260 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
5263 //XXX implement this
5264 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5269 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
5274 case IXGBE_API_VER_1_0:
5275 case IXGBE_API_VER_1_1:
5276 vf->api_ver = msg[0];
5277 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5280 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5281 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5288 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
5291 struct ixgbe_hw *hw;
5292 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5297 /* GET_QUEUES is not supported on pre-1.1 APIs. */
5299 case IXGBE_API_VER_1_0:
5300 case IXGBE_API_VER_UNKNOWN:
5301 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5305 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
5306 IXGBE_VT_MSGTYPE_CTS;
5308 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
5309 resp[IXGBE_VF_TX_QUEUES] = num_queues;
5310 resp[IXGBE_VF_RX_QUEUES] = num_queues;
5311 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5312 resp[IXGBE_VF_DEF_QUEUE] = 0;
5314 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5319 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
5321 struct ixgbe_hw *hw;
5322 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5327 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
5332 CTR3(KTR_MALLOC, "%s: received msg %x from %d",
5333 adapter->ifp->if_xname, msg[0], vf->pool);
5334 if (msg[0] == IXGBE_VF_RESET) {
5335 ixgbe_vf_reset_msg(adapter, vf, msg);
5339 if (!(vf->flags & IXGBE_VF_CTS)) {
5340 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5344 switch (msg[0] & IXGBE_VT_MSG_MASK) {
5345 case IXGBE_VF_SET_MAC_ADDR:
5346 ixgbe_vf_set_mac(adapter, vf, msg);
5348 case IXGBE_VF_SET_MULTICAST:
5349 ixgbe_vf_set_mc_addr(adapter, vf, msg);
5351 case IXGBE_VF_SET_VLAN:
5352 ixgbe_vf_set_vlan(adapter, vf, msg);
5354 case IXGBE_VF_SET_LPE:
5355 ixgbe_vf_set_lpe(adapter, vf, msg);
5357 case IXGBE_VF_SET_MACVLAN:
5358 ixgbe_vf_set_macvlan(adapter, vf, msg);
5360 case IXGBE_VF_API_NEGOTIATE:
5361 ixgbe_vf_api_negotiate(adapter, vf, msg);
5363 case IXGBE_VF_GET_QUEUES:
5364 ixgbe_vf_get_queues(adapter, vf, msg);
5367 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5373 * Tasklet for handling VF -> PF mailbox messages.
5376 ixgbe_handle_mbx(void *context, int pending)
5378 struct adapter *adapter;
5379 struct ixgbe_hw *hw;
5380 struct ixgbe_vf *vf;
5386 IXGBE_CORE_LOCK(adapter);
5387 for (i = 0; i < adapter->num_vfs; i++) {
5388 vf = &adapter->vfs[i];
5390 if (vf->flags & IXGBE_VF_ACTIVE) {
5391 if (ixgbe_check_for_rst(hw, vf->pool) == 0)
5392 ixgbe_process_vf_reset(adapter, vf);
5394 if (ixgbe_check_for_msg(hw, vf->pool) == 0)
5395 ixgbe_process_vf_msg(adapter, vf);
5397 if (ixgbe_check_for_ack(hw, vf->pool) == 0)
5398 ixgbe_process_vf_ack(adapter, vf);
5401 IXGBE_CORE_UNLOCK(adapter);
5406 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
5408 struct adapter *adapter;
5409 enum ixgbe_iov_mode mode;
5411 adapter = device_get_softc(dev);
5412 adapter->num_vfs = num_vfs;
5413 mode = ixgbe_get_iov_mode(adapter);
5415 if (num_vfs > ixgbe_max_vfs(mode)) {
5416 adapter->num_vfs = 0;
5420 IXGBE_CORE_LOCK(adapter);
5422 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
5425 if (adapter->vfs == NULL) {
5426 adapter->num_vfs = 0;
5427 IXGBE_CORE_UNLOCK(adapter);
5431 ixgbe_init_locked(adapter);
5433 IXGBE_CORE_UNLOCK(adapter);
5440 ixgbe_uninit_iov(device_t dev)
5442 struct ixgbe_hw *hw;
5443 struct adapter *adapter;
5444 uint32_t pf_reg, vf_reg;
5446 adapter = device_get_softc(dev);
5449 IXGBE_CORE_LOCK(adapter);
5451 /* Enable rx/tx for the PF and disable it for all VFs. */
5452 pf_reg = IXGBE_VF_INDEX(adapter->pool);
5453 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
5454 IXGBE_VF_BIT(adapter->pool));
5455 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
5456 IXGBE_VF_BIT(adapter->pool));
5462 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
5463 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
5465 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
5467 free(adapter->vfs, M_IXGBE);
5468 adapter->vfs = NULL;
5469 adapter->num_vfs = 0;
5471 IXGBE_CORE_UNLOCK(adapter);
5476 ixgbe_initialize_iov(struct adapter *adapter)
5478 struct ixgbe_hw *hw = &adapter->hw;
5479 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
5480 enum ixgbe_iov_mode mode;
5483 mode = ixgbe_get_iov_mode(adapter);
5484 if (mode == IXGBE_NO_VM)
5487 IXGBE_CORE_LOCK_ASSERT(adapter);
5489 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
5490 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
5494 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
5497 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
5500 panic("Unexpected SR-IOV mode %d", mode);
5502 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
5504 mtqc = IXGBE_MTQC_VT_ENA;
5507 mtqc |= IXGBE_MTQC_64VF;
5510 mtqc |= IXGBE_MTQC_32VF;
5513 panic("Unexpected SR-IOV mode %d", mode);
5515 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
5518 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
5519 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
5520 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
5523 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
5526 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
5529 panic("Unexpected SR-IOV mode %d", mode);
5531 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
5534 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5535 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
5538 gpie |= IXGBE_GPIE_VTMODE_64;
5541 gpie |= IXGBE_GPIE_VTMODE_32;
5544 panic("Unexpected SR-IOV mode %d", mode);
5546 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5548 /* Enable rx/tx for the PF. */
5549 vf_reg = IXGBE_VF_INDEX(adapter->pool);
5550 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
5551 IXGBE_VF_BIT(adapter->pool));
5552 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
5553 IXGBE_VF_BIT(adapter->pool));
5555 /* Allow VM-to-VM communication. */
5556 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
5558 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
5559 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
5560 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
5562 for (i = 0; i < adapter->num_vfs; i++)
5563 ixgbe_init_vf(adapter, &adapter->vfs[i]);
5568 ** Check the max frame setting of all active VF's
5571 ixgbe_recalculate_max_frame(struct adapter *adapter)
5573 struct ixgbe_vf *vf;
5575 IXGBE_CORE_LOCK_ASSERT(adapter);
5577 for (int i = 0; i < adapter->num_vfs; i++) {
5578 vf = &adapter->vfs[i];
5579 if (vf->flags & IXGBE_VF_ACTIVE)
5580 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5586 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
5588 struct ixgbe_hw *hw;
5589 uint32_t vf_index, pfmbimr;
5591 IXGBE_CORE_LOCK_ASSERT(adapter);
5595 if (!(vf->flags & IXGBE_VF_ACTIVE))
5598 vf_index = IXGBE_VF_INDEX(vf->pool);
5599 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
5600 pfmbimr |= IXGBE_VF_BIT(vf->pool);
5601 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
5603 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
5605 // XXX multicast addresses
5607 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5608 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5609 vf->ether_addr, vf->pool, TRUE);
5612 ixgbe_vf_enable_transmit(adapter, vf);
5613 ixgbe_vf_enable_receive(adapter, vf);
5615 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5619 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
5621 struct adapter *adapter;
5622 struct ixgbe_vf *vf;
5625 adapter = device_get_softc(dev);
5627 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
5628 vfnum, adapter->num_vfs));
5630 IXGBE_CORE_LOCK(adapter);
5631 vf = &adapter->vfs[vfnum];
5634 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
5635 vf->rar_index = vfnum + 1;
5636 vf->default_vlan = 0;
5637 vf->max_frame_size = ETHER_MAX_LEN;
5638 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5640 if (nvlist_exists_binary(config, "mac-addr")) {
5641 mac = nvlist_get_binary(config, "mac-addr", NULL);
5642 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5643 if (nvlist_get_bool(config, "allow-set-mac"))
5644 vf->flags |= IXGBE_VF_CAP_MAC;
5647 * If the administrator has not specified a MAC address then
5648 * we must allow the VF to choose one.
5650 vf->flags |= IXGBE_VF_CAP_MAC;
5652 vf->flags = IXGBE_VF_ACTIVE;
5654 ixgbe_init_vf(adapter, vf);
5655 IXGBE_CORE_UNLOCK(adapter);
5659 #endif /* PCI_IOV */