1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
45 #include <net/rss_config.h>
46 #include <netinet/in_rss.h>
49 /*********************************************************************
51 *********************************************************************/
52 char ixgbe_driver_version[] = "3.1.13-k";
55 /*********************************************************************
58 * Used by probe to select devices to load on
59 * Last field stores an index into ixgbe_strings
60 * Last entry must be all 0s
62 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
63 *********************************************************************/
65 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
99 /* required last entry */
103 /*********************************************************************
104 * Table of branding strings
105 *********************************************************************/
107 static char *ixgbe_strings[] = {
108 "Intel(R) PRO/10GbE PCI-Express Network Driver"
111 /*********************************************************************
112 * Function prototypes
113 *********************************************************************/
114 static int ixgbe_probe(device_t);
115 static int ixgbe_attach(device_t);
116 static int ixgbe_detach(device_t);
117 static int ixgbe_shutdown(device_t);
118 static int ixgbe_suspend(device_t);
119 static int ixgbe_resume(device_t);
120 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
121 static void ixgbe_init(void *);
122 static void ixgbe_init_locked(struct adapter *);
123 static void ixgbe_stop(void *);
124 #if __FreeBSD_version >= 1100036
125 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
127 static void ixgbe_add_media_types(struct adapter *);
128 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
129 static int ixgbe_media_change(struct ifnet *);
130 static void ixgbe_identify_hardware(struct adapter *);
131 static int ixgbe_allocate_pci_resources(struct adapter *);
132 static void ixgbe_get_slot_info(struct adapter *);
133 static int ixgbe_allocate_msix(struct adapter *);
134 static int ixgbe_allocate_legacy(struct adapter *);
135 static int ixgbe_setup_msix(struct adapter *);
136 static void ixgbe_free_pci_resources(struct adapter *);
137 static void ixgbe_local_timer(void *);
138 static int ixgbe_setup_interface(device_t, struct adapter *);
139 static void ixgbe_config_gpie(struct adapter *);
140 static void ixgbe_config_dmac(struct adapter *);
141 static void ixgbe_config_delay_values(struct adapter *);
142 static void ixgbe_config_link(struct adapter *);
143 static void ixgbe_check_wol_support(struct adapter *);
144 static int ixgbe_setup_low_power_mode(struct adapter *);
145 static void ixgbe_rearm_queues(struct adapter *, u64);
147 static void ixgbe_initialize_transmit_units(struct adapter *);
148 static void ixgbe_initialize_receive_units(struct adapter *);
149 static void ixgbe_enable_rx_drop(struct adapter *);
150 static void ixgbe_disable_rx_drop(struct adapter *);
151 static void ixgbe_initialize_rss_mapping(struct adapter *);
153 static void ixgbe_enable_intr(struct adapter *);
154 static void ixgbe_disable_intr(struct adapter *);
155 static void ixgbe_update_stats_counters(struct adapter *);
156 static void ixgbe_set_promisc(struct adapter *);
157 static void ixgbe_set_multi(struct adapter *);
158 static void ixgbe_update_link_status(struct adapter *);
159 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
160 static void ixgbe_configure_ivars(struct adapter *);
161 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
163 static void ixgbe_setup_vlan_hw_support(struct adapter *);
164 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
165 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
167 static void ixgbe_add_device_sysctls(struct adapter *);
168 static void ixgbe_add_hw_stats(struct adapter *);
169 static int ixgbe_set_flowcntl(struct adapter *, int);
170 static int ixgbe_set_advertise(struct adapter *, int);
172 /* Sysctl handlers */
173 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
174 const char *, int *, int);
175 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
176 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
177 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
178 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
179 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
180 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
182 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
183 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
185 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
186 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
187 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
188 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
189 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
191 static int ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS);
193 /* Support for pluggable optic modules */
194 static bool ixgbe_sfp_probe(struct adapter *);
195 static void ixgbe_setup_optics(struct adapter *);
197 /* Legacy (single vector interrupt handler */
198 static void ixgbe_legacy_irq(void *);
200 /* The MSI/X Interrupt handlers */
201 static void ixgbe_msix_que(void *);
202 static void ixgbe_msix_link(void *);
204 /* Deferred interrupt tasklets */
205 static void ixgbe_handle_que(void *, int);
206 static void ixgbe_handle_link(void *, int);
207 static void ixgbe_handle_msf(void *, int);
208 static void ixgbe_handle_mod(void *, int);
209 static void ixgbe_handle_phy(void *, int);
212 static void ixgbe_reinit_fdir(void *, int);
216 static void ixgbe_ping_all_vfs(struct adapter *);
217 static void ixgbe_handle_mbx(void *, int);
218 static int ixgbe_init_iov(device_t, u16, const nvlist_t *);
219 static void ixgbe_uninit_iov(device_t);
220 static int ixgbe_add_vf(device_t, u16, const nvlist_t *);
221 static void ixgbe_initialize_iov(struct adapter *);
222 static void ixgbe_recalculate_max_frame(struct adapter *);
223 static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
227 /*********************************************************************
228 * FreeBSD Device Interface Entry Points
229 *********************************************************************/
231 static device_method_t ix_methods[] = {
232 /* Device interface */
233 DEVMETHOD(device_probe, ixgbe_probe),
234 DEVMETHOD(device_attach, ixgbe_attach),
235 DEVMETHOD(device_detach, ixgbe_detach),
236 DEVMETHOD(device_shutdown, ixgbe_shutdown),
237 DEVMETHOD(device_suspend, ixgbe_suspend),
238 DEVMETHOD(device_resume, ixgbe_resume),
240 DEVMETHOD(pci_iov_init, ixgbe_init_iov),
241 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
242 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
247 static driver_t ix_driver = {
248 "ix", ix_methods, sizeof(struct adapter),
251 devclass_t ix_devclass;
252 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
254 MODULE_DEPEND(ix, pci, 1, 1, 1);
255 MODULE_DEPEND(ix, ether, 1, 1, 1);
257 MODULE_DEPEND(ix, netmap, 1, 1, 1);
258 #endif /* DEV_NETMAP */
261 ** TUNEABLE PARAMETERS:
264 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
265 "IXGBE driver parameters");
268 ** AIM: Adaptive Interrupt Moderation
269 ** which means that the interrupt rate
270 ** is varied over time based on the
271 ** traffic for that interrupt vector
273 static int ixgbe_enable_aim = TRUE;
274 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
275 "Enable adaptive interrupt moderation");
277 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
278 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
279 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
281 /* How many packets rxeof tries to clean at a time */
282 static int ixgbe_rx_process_limit = 256;
283 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
284 &ixgbe_rx_process_limit, 0,
285 "Maximum number of received packets to process at a time,"
286 "-1 means unlimited");
288 /* How many packets txeof tries to clean at a time */
289 static int ixgbe_tx_process_limit = 256;
290 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
291 &ixgbe_tx_process_limit, 0,
292 "Maximum number of sent packets to process at a time,"
293 "-1 means unlimited");
295 /* Flow control setting, default to full */
296 static int ixgbe_flow_control = ixgbe_fc_full;
297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
298 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
300 /* Advertise Speed, default to 0 (auto) */
301 static int ixgbe_advertise_speed = 0;
302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
306 ** Smart speed setting, default to on
307 ** this only works as a compile option
308 ** right now as its during attach, set
309 ** this to 'ixgbe_smart_speed_off' to
312 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
315 * MSIX should be the default for best performance,
316 * but this allows it to be forced off for testing.
318 static int ixgbe_enable_msix = 1;
319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
320 "Enable MSI-X interrupts");
323 * Number of Queues, can be set to 0,
324 * it then autoconfigures based on the
325 * number of cpus with a max of 8. This
326 * can be overriden manually here.
328 static int ixgbe_num_queues = 0;
329 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
330 "Number of queues to configure, 0 indicates autoconfigure");
333 ** Number of TX descriptors per ring,
334 ** setting higher than RX as this seems
335 ** the better performing choice.
337 static int ixgbe_txd = PERFORM_TXD;
338 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
339 "Number of transmit descriptors per queue");
341 /* Number of RX descriptors per ring */
342 static int ixgbe_rxd = PERFORM_RXD;
343 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
344 "Number of receive descriptors per queue");
347 ** Defining this on will allow the use
348 ** of unsupported SFP+ modules, note that
349 ** doing so you are on your own :)
351 static int allow_unsupported_sfp = FALSE;
352 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
354 /* Keep running tab on them for sanity check */
355 static int ixgbe_total_ports;
359 ** Flow Director actually 'steals'
360 ** part of the packet buffer as its
361 ** filter pool, this variable controls
363 ** 0 = 64K, 1 = 128K, 2 = 256K
365 static int fdir_pballoc = 1;
370 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
371 * be a reference on how to implement netmap support in a driver.
372 * Additional comments are in ixgbe_netmap.h .
374 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
375 * that extend the standard driver.
377 #include <dev/netmap/ixgbe_netmap.h>
378 #endif /* DEV_NETMAP */
380 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
382 /*********************************************************************
383 * Device identification routine
385 * ixgbe_probe determines if the driver should be loaded on
386 * adapter based on PCI vendor/device id of the adapter.
388 * return BUS_PROBE_DEFAULT on success, positive on failure
389 *********************************************************************/
392 ixgbe_probe(device_t dev)
394 ixgbe_vendor_info_t *ent;
396 u16 pci_vendor_id = 0;
397 u16 pci_device_id = 0;
398 u16 pci_subvendor_id = 0;
399 u16 pci_subdevice_id = 0;
400 char adapter_name[256];
402 INIT_DEBUGOUT("ixgbe_probe: begin");
404 pci_vendor_id = pci_get_vendor(dev);
405 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
408 pci_device_id = pci_get_device(dev);
409 pci_subvendor_id = pci_get_subvendor(dev);
410 pci_subdevice_id = pci_get_subdevice(dev);
412 ent = ixgbe_vendor_info_array;
413 while (ent->vendor_id != 0) {
414 if ((pci_vendor_id == ent->vendor_id) &&
415 (pci_device_id == ent->device_id) &&
417 ((pci_subvendor_id == ent->subvendor_id) ||
418 (ent->subvendor_id == 0)) &&
420 ((pci_subdevice_id == ent->subdevice_id) ||
421 (ent->subdevice_id == 0))) {
422 sprintf(adapter_name, "%s, Version - %s",
423 ixgbe_strings[ent->index],
424 ixgbe_driver_version);
425 device_set_desc_copy(dev, adapter_name);
427 return (BUS_PROBE_DEFAULT);
434 /*********************************************************************
435 * Device initialization routine
437 * The attach entry point is called when the driver is being loaded.
438 * This routine identifies the type of hardware, allocates all resources
439 * and initializes the hardware.
441 * return 0 on success, positive on failure
442 *********************************************************************/
445 ixgbe_attach(device_t dev)
447 struct adapter *adapter;
453 INIT_DEBUGOUT("ixgbe_attach: begin");
455 /* Allocate, clear, and link in our adapter structure */
456 adapter = device_get_softc(dev);
461 adapter->init_locked = ixgbe_init_locked;
462 adapter->stop_locked = ixgbe_stop;
466 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
468 /* Set up the timer callout */
469 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
471 /* Determine hardware revision */
472 ixgbe_identify_hardware(adapter);
474 /* Do base PCI setup - map BAR0 */
475 if (ixgbe_allocate_pci_resources(adapter)) {
476 device_printf(dev, "Allocation of PCI resources failed\n");
481 /* Sysctls for limiting the amount of work done in the taskqueues */
482 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
483 "max number of rx packets to process",
484 &adapter->rx_process_limit, ixgbe_rx_process_limit);
486 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
487 "max number of tx packets to process",
488 &adapter->tx_process_limit, ixgbe_tx_process_limit);
490 /* Do descriptor calc and sanity checks */
491 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
492 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
493 device_printf(dev, "TXD config issue, using default!\n");
494 adapter->num_tx_desc = DEFAULT_TXD;
496 adapter->num_tx_desc = ixgbe_txd;
499 ** With many RX rings it is easy to exceed the
500 ** system mbuf allocation. Tuning nmbclusters
501 ** can alleviate this.
503 if (nmbclusters > 0) {
505 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
506 if (s > nmbclusters) {
507 device_printf(dev, "RX Descriptors exceed "
508 "system mbuf max, using default instead!\n");
509 ixgbe_rxd = DEFAULT_RXD;
513 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
514 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
515 device_printf(dev, "RXD config issue, using default!\n");
516 adapter->num_rx_desc = DEFAULT_RXD;
518 adapter->num_rx_desc = ixgbe_rxd;
520 /* Allocate our TX/RX Queues */
521 if (ixgbe_allocate_queues(adapter)) {
526 /* Allocate multicast array memory. */
527 adapter->mta = malloc(sizeof(*adapter->mta) *
528 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
529 if (adapter->mta == NULL) {
530 device_printf(dev, "Can not allocate multicast setup array\n");
535 /* Initialize the shared code */
536 hw->allow_unsupported_sfp = allow_unsupported_sfp;
537 error = ixgbe_init_shared_code(hw);
538 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
540 ** No optics in this port, set up
541 ** so the timer routine will probe
542 ** for later insertion.
544 adapter->sfp_probe = TRUE;
546 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
547 device_printf(dev, "Unsupported SFP+ module detected!\n");
551 device_printf(dev, "Unable to initialize the shared code\n");
556 /* Make sure we have a good EEPROM before we read from it */
557 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
558 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
563 error = ixgbe_init_hw(hw);
565 case IXGBE_ERR_EEPROM_VERSION:
566 device_printf(dev, "This device is a pre-production adapter/"
567 "LOM. Please be aware there may be issues associated "
568 "with your hardware.\nIf you are experiencing problems "
569 "please contact your Intel or hardware representative "
570 "who provided you with this hardware.\n");
572 case IXGBE_ERR_SFP_NOT_SUPPORTED:
573 device_printf(dev, "Unsupported SFP+ Module\n");
576 case IXGBE_ERR_SFP_NOT_PRESENT:
577 device_printf(dev, "No SFP+ Module found\n");
583 /* hw.ix defaults init */
584 ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
585 ixgbe_set_flowcntl(adapter, ixgbe_flow_control);
586 adapter->enable_aim = ixgbe_enable_aim;
588 if ((adapter->msix > 1) && (ixgbe_enable_msix))
589 error = ixgbe_allocate_msix(adapter);
591 error = ixgbe_allocate_legacy(adapter);
595 /* Enable the optics for 82599 SFP+ fiber */
596 ixgbe_enable_tx_laser(hw);
598 /* Enable power to the phy. */
599 ixgbe_set_phy_power(hw, TRUE);
601 /* Setup OS specific network interface */
602 if (ixgbe_setup_interface(dev, adapter) != 0)
605 /* Initialize statistics */
606 ixgbe_update_stats_counters(adapter);
608 /* Register for VLAN events */
609 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
610 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
611 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
612 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
614 /* Check PCIE slot type/speed/width */
615 ixgbe_get_slot_info(adapter);
617 /* Set an initial default flow control & dmac value */
618 adapter->fc = ixgbe_fc_full;
620 adapter->eee_enabled = 0;
623 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
624 nvlist_t *pf_schema, *vf_schema;
626 hw->mbx.ops.init_params(hw);
627 pf_schema = pci_iov_schema_alloc_node();
628 vf_schema = pci_iov_schema_alloc_node();
629 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
630 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
631 IOV_SCHEMA_HASDEFAULT, TRUE);
632 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
633 IOV_SCHEMA_HASDEFAULT, FALSE);
634 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
635 IOV_SCHEMA_HASDEFAULT, FALSE);
636 error = pci_iov_attach(dev, pf_schema, vf_schema);
639 "Error %d setting up SR-IOV\n", error);
644 /* Check for certain supported features */
645 ixgbe_check_wol_support(adapter);
648 ixgbe_add_device_sysctls(adapter);
649 ixgbe_add_hw_stats(adapter);
651 /* let hardware know driver is loaded */
652 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
653 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
654 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
657 ixgbe_netmap_attach(adapter);
658 #endif /* DEV_NETMAP */
659 INIT_DEBUGOUT("ixgbe_attach: end");
663 ixgbe_free_transmit_structures(adapter);
664 ixgbe_free_receive_structures(adapter);
666 if (adapter->ifp != NULL)
667 if_free(adapter->ifp);
668 ixgbe_free_pci_resources(adapter);
669 free(adapter->mta, M_DEVBUF);
673 /*********************************************************************
674 * Device removal routine
676 * The detach entry point is called when the driver is being removed.
677 * This routine stops the adapter and deallocates all the resources
678 * that were allocated for driver operation.
680 * return 0 on success, positive on failure
681 *********************************************************************/
684 ixgbe_detach(device_t dev)
686 struct adapter *adapter = device_get_softc(dev);
687 struct ix_queue *que = adapter->queues;
688 struct tx_ring *txr = adapter->tx_rings;
691 INIT_DEBUGOUT("ixgbe_detach: begin");
693 /* Make sure VLANS are not using driver */
694 if (adapter->ifp->if_vlantrunk != NULL) {
695 device_printf(dev,"Vlan in use, detach first\n");
700 if (pci_iov_detach(dev) != 0) {
701 device_printf(dev, "SR-IOV in use; detach first.\n");
706 ether_ifdetach(adapter->ifp);
707 /* Stop the adapter */
708 IXGBE_CORE_LOCK(adapter);
709 ixgbe_setup_low_power_mode(adapter);
710 IXGBE_CORE_UNLOCK(adapter);
712 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
714 #ifndef IXGBE_LEGACY_TX
715 taskqueue_drain(que->tq, &txr->txq_task);
717 taskqueue_drain(que->tq, &que->que_task);
718 taskqueue_free(que->tq);
722 /* Drain the Link queue */
724 taskqueue_drain(adapter->tq, &adapter->link_task);
725 taskqueue_drain(adapter->tq, &adapter->mod_task);
726 taskqueue_drain(adapter->tq, &adapter->msf_task);
728 taskqueue_drain(adapter->tq, &adapter->mbx_task);
730 taskqueue_drain(adapter->tq, &adapter->phy_task);
732 taskqueue_drain(adapter->tq, &adapter->fdir_task);
734 taskqueue_free(adapter->tq);
737 /* let hardware know driver is unloading */
738 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
739 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
740 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
742 /* Unregister VLAN events */
743 if (adapter->vlan_attach != NULL)
744 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
745 if (adapter->vlan_detach != NULL)
746 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
748 callout_drain(&adapter->timer);
750 netmap_detach(adapter->ifp);
751 #endif /* DEV_NETMAP */
752 ixgbe_free_pci_resources(adapter);
753 bus_generic_detach(dev);
754 if_free(adapter->ifp);
756 ixgbe_free_transmit_structures(adapter);
757 ixgbe_free_receive_structures(adapter);
758 free(adapter->mta, M_DEVBUF);
760 IXGBE_CORE_LOCK_DESTROY(adapter);
764 /*********************************************************************
766 * Shutdown entry point
768 **********************************************************************/
771 ixgbe_shutdown(device_t dev)
773 struct adapter *adapter = device_get_softc(dev);
776 INIT_DEBUGOUT("ixgbe_shutdown: begin");
778 IXGBE_CORE_LOCK(adapter);
779 error = ixgbe_setup_low_power_mode(adapter);
780 IXGBE_CORE_UNLOCK(adapter);
786 * Methods for going from:
787 * D0 -> D3: ixgbe_suspend
788 * D3 -> D0: ixgbe_resume
791 ixgbe_suspend(device_t dev)
793 struct adapter *adapter = device_get_softc(dev);
796 INIT_DEBUGOUT("ixgbe_suspend: begin");
798 IXGBE_CORE_LOCK(adapter);
800 error = ixgbe_setup_low_power_mode(adapter);
802 IXGBE_CORE_UNLOCK(adapter);
808 ixgbe_resume(device_t dev)
810 struct adapter *adapter = device_get_softc(dev);
811 struct ifnet *ifp = adapter->ifp;
812 struct ixgbe_hw *hw = &adapter->hw;
815 INIT_DEBUGOUT("ixgbe_resume: begin");
817 IXGBE_CORE_LOCK(adapter);
819 /* Read & clear WUS register */
820 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
822 device_printf(dev, "Woken up by (WUS): %#010x\n",
823 IXGBE_READ_REG(hw, IXGBE_WUS));
824 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
825 /* And clear WUFC until next low-power transition */
826 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
829 * Required after D3->D0 transition;
830 * will re-advertise all previous advertised speeds
832 if (ifp->if_flags & IFF_UP)
833 ixgbe_init_locked(adapter);
835 IXGBE_CORE_UNLOCK(adapter);
841 /*********************************************************************
844 * ixgbe_ioctl is called when the user wants to configure the
847 * return 0 on success, positive on failure
848 **********************************************************************/
851 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
853 struct adapter *adapter = ifp->if_softc;
854 struct ifreq *ifr = (struct ifreq *) data;
855 #if defined(INET) || defined(INET6)
856 struct ifaddr *ifa = (struct ifaddr *)data;
859 bool avoid_reset = FALSE;
865 if (ifa->ifa_addr->sa_family == AF_INET)
869 if (ifa->ifa_addr->sa_family == AF_INET6)
873 ** Calling init results in link renegotiation,
874 ** so we avoid doing it when possible.
877 ifp->if_flags |= IFF_UP;
878 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
881 if (!(ifp->if_flags & IFF_NOARP))
882 arp_ifinit(ifp, ifa);
885 error = ether_ioctl(ifp, command, data);
888 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
889 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
892 IXGBE_CORE_LOCK(adapter);
893 ifp->if_mtu = ifr->ifr_mtu;
894 adapter->max_frame_size =
895 ifp->if_mtu + IXGBE_MTU_HDR;
896 ixgbe_init_locked(adapter);
898 ixgbe_recalculate_max_frame(adapter);
900 IXGBE_CORE_UNLOCK(adapter);
904 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
905 IXGBE_CORE_LOCK(adapter);
906 if (ifp->if_flags & IFF_UP) {
907 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
908 if ((ifp->if_flags ^ adapter->if_flags) &
909 (IFF_PROMISC | IFF_ALLMULTI)) {
910 ixgbe_set_promisc(adapter);
913 ixgbe_init_locked(adapter);
915 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
917 adapter->if_flags = ifp->if_flags;
918 IXGBE_CORE_UNLOCK(adapter);
922 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
923 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
924 IXGBE_CORE_LOCK(adapter);
925 ixgbe_disable_intr(adapter);
926 ixgbe_set_multi(adapter);
927 ixgbe_enable_intr(adapter);
928 IXGBE_CORE_UNLOCK(adapter);
933 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
934 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
938 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
940 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
944 /* HW cannot turn these on/off separately */
945 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
946 ifp->if_capenable ^= IFCAP_RXCSUM;
947 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
949 if (mask & IFCAP_TXCSUM)
950 ifp->if_capenable ^= IFCAP_TXCSUM;
951 if (mask & IFCAP_TXCSUM_IPV6)
952 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
953 if (mask & IFCAP_TSO4)
954 ifp->if_capenable ^= IFCAP_TSO4;
955 if (mask & IFCAP_TSO6)
956 ifp->if_capenable ^= IFCAP_TSO6;
957 if (mask & IFCAP_LRO)
958 ifp->if_capenable ^= IFCAP_LRO;
959 if (mask & IFCAP_VLAN_HWTAGGING)
960 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
961 if (mask & IFCAP_VLAN_HWFILTER)
962 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
963 if (mask & IFCAP_VLAN_HWTSO)
964 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
966 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
967 IXGBE_CORE_LOCK(adapter);
968 ixgbe_init_locked(adapter);
969 IXGBE_CORE_UNLOCK(adapter);
971 VLAN_CAPABILITIES(ifp);
974 #if __FreeBSD_version >= 1100036
977 struct ixgbe_hw *hw = &adapter->hw;
980 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
981 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
984 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
988 if (i2c.len > sizeof(i2c.data)) {
993 for (i = 0; i < i2c.len; i++)
994 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
995 i2c.dev_addr, &i2c.data[i]);
996 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1001 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
1002 error = ether_ioctl(ifp, command, data);
1010 * Set the various hardware offload abilities.
1012 * This takes the ifnet's if_capenable flags (e.g. set by the user using
1013 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
1014 * mbuf offload flags the driver will understand.
1017 ixgbe_set_if_hwassist(struct adapter *adapter)
1019 struct ifnet *ifp = adapter->ifp;
1020 struct ixgbe_hw *hw = &adapter->hw;
1022 ifp->if_hwassist = 0;
1023 #if __FreeBSD_version >= 1000000
1024 if (ifp->if_capenable & IFCAP_TSO4)
1025 ifp->if_hwassist |= CSUM_IP_TSO;
1026 if (ifp->if_capenable & IFCAP_TSO6)
1027 ifp->if_hwassist |= CSUM_IP6_TSO;
1028 if (ifp->if_capenable & IFCAP_TXCSUM) {
1029 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
1030 if (hw->mac.type != ixgbe_mac_82598EB)
1031 ifp->if_hwassist |= CSUM_IP_SCTP;
1033 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
1034 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
1035 if (hw->mac.type != ixgbe_mac_82598EB)
1036 ifp->if_hwassist |= CSUM_IP6_SCTP;
1039 if (ifp->if_capenable & IFCAP_TSO)
1040 ifp->if_hwassist |= CSUM_TSO;
1041 if (ifp->if_capenable & IFCAP_TXCSUM) {
1042 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1043 if (hw->mac.type != ixgbe_mac_82598EB)
1044 ifp->if_hwassist |= CSUM_SCTP;
1049 /*********************************************************************
1052 * This routine is used in two ways. It is used by the stack as
1053 * init entry point in network interface structure. It is also used
1054 * by the driver as a hw/sw initialization routine to get to a
1057 * return 0 on success, positive on failure
1058 **********************************************************************/
1059 #define IXGBE_MHADD_MFS_SHIFT 16
1062 ixgbe_init_locked(struct adapter *adapter)
1064 struct ifnet *ifp = adapter->ifp;
1065 device_t dev = adapter->dev;
1066 struct ixgbe_hw *hw = &adapter->hw;
1067 struct tx_ring *txr;
1068 struct rx_ring *rxr;
1073 enum ixgbe_iov_mode mode;
1076 mtx_assert(&adapter->core_mtx, MA_OWNED);
1077 INIT_DEBUGOUT("ixgbe_init_locked: begin");
1079 hw->adapter_stopped = FALSE;
1080 ixgbe_stop_adapter(hw);
1081 callout_stop(&adapter->timer);
1084 mode = ixgbe_get_iov_mode(adapter);
1085 adapter->pool = ixgbe_max_vfs(mode);
1086 /* Queue indices may change with IOV mode */
1087 for (int i = 0; i < adapter->num_queues; i++) {
1088 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
1089 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
1092 /* reprogram the RAR[0] in case user changed it. */
1093 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1095 /* Get the latest mac address, User can use a LAA */
1096 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1097 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1098 hw->addr_ctrl.rar_used_count = 1;
1100 /* Set hardware offload abilities from ifnet flags */
1101 ixgbe_set_if_hwassist(adapter);
1103 /* Prepare transmit descriptors and buffers */
1104 if (ixgbe_setup_transmit_structures(adapter)) {
1105 device_printf(dev, "Could not setup transmit structures\n");
1106 ixgbe_stop(adapter);
1112 ixgbe_initialize_iov(adapter);
1114 ixgbe_initialize_transmit_units(adapter);
1116 /* Setup Multicast table */
1117 ixgbe_set_multi(adapter);
1119 /* Determine the correct mbuf pool, based on frame size */
1120 if (adapter->max_frame_size <= MCLBYTES)
1121 adapter->rx_mbuf_sz = MCLBYTES;
1123 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1125 /* Prepare receive descriptors and buffers */
1126 if (ixgbe_setup_receive_structures(adapter)) {
1127 device_printf(dev, "Could not setup receive structures\n");
1128 ixgbe_stop(adapter);
1132 /* Configure RX settings */
1133 ixgbe_initialize_receive_units(adapter);
1135 /* Enable SDP & MSIX interrupts based on adapter */
1136 ixgbe_config_gpie(adapter);
1139 if (ifp->if_mtu > ETHERMTU) {
1140 /* aka IXGBE_MAXFRS on 82599 and newer */
1141 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1142 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1143 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1144 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1147 /* Now enable all the queues */
1148 for (int i = 0; i < adapter->num_queues; i++) {
1149 txr = &adapter->tx_rings[i];
1150 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1151 txdctl |= IXGBE_TXDCTL_ENABLE;
1152 /* Set WTHRESH to 8, burst writeback */
1153 txdctl |= (8 << 16);
1155 * When the internal queue falls below PTHRESH (32),
1156 * start prefetching as long as there are at least
1157 * HTHRESH (1) buffers ready. The values are taken
1158 * from the Intel linux driver 3.8.21.
1159 * Prefetching enables tx line rate even with 1 queue.
1161 txdctl |= (32 << 0) | (1 << 8);
1162 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1165 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1166 rxr = &adapter->rx_rings[i];
1167 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1168 if (hw->mac.type == ixgbe_mac_82598EB) {
1174 rxdctl &= ~0x3FFFFF;
1177 rxdctl |= IXGBE_RXDCTL_ENABLE;
1178 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1179 for (; j < 10; j++) {
1180 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1181 IXGBE_RXDCTL_ENABLE)
1189 * In netmap mode, we must preserve the buffers made
1190 * available to userspace before the if_init()
1191 * (this is true by default on the TX side, because
1192 * init makes all buffers available to userspace).
1194 * netmap_reset() and the device specific routines
1195 * (e.g. ixgbe_setup_receive_rings()) map these
1196 * buffers at the end of the NIC ring, so here we
1197 * must set the RDT (tail) register to make sure
1198 * they are not overwritten.
1200 * In this driver the NIC ring starts at RDH = 0,
1201 * RDT points to the last slot available for reception (?),
1202 * so RDT = num_rx_desc - 1 means the whole ring is available.
1204 if (ifp->if_capenable & IFCAP_NETMAP) {
1205 struct netmap_adapter *na = NA(adapter->ifp);
1206 struct netmap_kring *kring = &na->rx_rings[i];
1207 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1209 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1211 #endif /* DEV_NETMAP */
1212 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
1215 /* Enable Receive engine */
1216 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1217 if (hw->mac.type == ixgbe_mac_82598EB)
1218 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1219 rxctrl |= IXGBE_RXCTRL_RXEN;
1220 ixgbe_enable_rx_dma(hw, rxctrl);
1222 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1224 /* Set up MSI/X routing */
1225 if (ixgbe_enable_msix) {
1226 ixgbe_configure_ivars(adapter);
1227 /* Set up auto-mask */
1228 if (hw->mac.type == ixgbe_mac_82598EB)
1229 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1231 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1232 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1234 } else { /* Simple settings for Legacy/MSI */
1235 ixgbe_set_ivar(adapter, 0, 0, 0);
1236 ixgbe_set_ivar(adapter, 0, 0, 1);
1237 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1241 /* Init Flow director */
1242 if (hw->mac.type != ixgbe_mac_82598EB) {
1243 u32 hdrm = 32 << fdir_pballoc;
1245 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1246 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1251 * Check on any SFP devices that
1252 * need to be kick-started
1254 if (hw->phy.type == ixgbe_phy_none) {
1255 err = hw->phy.ops.identify(hw);
1256 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1258 "Unsupported SFP+ module type was detected.\n");
1263 /* Set moderation on the Link interrupt */
1264 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1266 /* Configure Energy Efficient Ethernet for supported devices */
1267 if (hw->mac.ops.setup_eee) {
1268 err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled);
1270 device_printf(dev, "Error setting up EEE: %d\n", err);
1273 /* Enable power to the phy. */
1274 ixgbe_set_phy_power(hw, TRUE);
1276 /* Config/Enable Link */
1277 ixgbe_config_link(adapter);
1279 /* Hardware Packet Buffer & Flow Control setup */
1280 ixgbe_config_delay_values(adapter);
1282 /* Initialize the FC settings */
1285 /* Set up VLAN support and filter */
1286 ixgbe_setup_vlan_hw_support(adapter);
1288 /* Setup DMA Coalescing */
1289 ixgbe_config_dmac(adapter);
1291 /* And now turn on interrupts */
1292 ixgbe_enable_intr(adapter);
1295 /* Enable the use of the MBX by the VF's */
1297 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1298 reg |= IXGBE_CTRL_EXT_PFRSTD;
1299 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
1303 /* Now inform the stack we're ready */
1304 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1310 ixgbe_init(void *arg)
1312 struct adapter *adapter = arg;
1314 IXGBE_CORE_LOCK(adapter);
1315 ixgbe_init_locked(adapter);
1316 IXGBE_CORE_UNLOCK(adapter);
1321 ixgbe_config_gpie(struct adapter *adapter)
1323 struct ixgbe_hw *hw = &adapter->hw;
1326 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1328 /* Fan Failure Interrupt */
1329 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1330 gpie |= IXGBE_SDP1_GPIEN;
1333 * Module detection (SDP2)
1334 * Media ready (SDP1)
1336 if (hw->mac.type == ixgbe_mac_82599EB) {
1337 gpie |= IXGBE_SDP2_GPIEN;
1338 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
1339 gpie |= IXGBE_SDP1_GPIEN;
1343 * Thermal Failure Detection (X540)
1344 * Link Detection (X552 SFP+, X552/X557-AT)
1346 if (hw->mac.type == ixgbe_mac_X540 ||
1347 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1348 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1349 gpie |= IXGBE_SDP0_GPIEN_X540;
1351 if (adapter->msix > 1) {
1352 /* Enable Enhanced MSIX mode */
1353 gpie |= IXGBE_GPIE_MSIX_MODE;
1354 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1358 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1363 * Requires adapter->max_frame_size to be set.
1366 ixgbe_config_delay_values(struct adapter *adapter)
1368 struct ixgbe_hw *hw = &adapter->hw;
1369 u32 rxpb, frame, size, tmp;
1371 frame = adapter->max_frame_size;
1373 /* Calculate High Water */
1374 switch (hw->mac.type) {
1375 case ixgbe_mac_X540:
1376 case ixgbe_mac_X550:
1377 case ixgbe_mac_X550EM_x:
1378 tmp = IXGBE_DV_X540(frame, frame);
1381 tmp = IXGBE_DV(frame, frame);
1384 size = IXGBE_BT2KB(tmp);
1385 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1386 hw->fc.high_water[0] = rxpb - size;
1388 /* Now calculate Low Water */
1389 switch (hw->mac.type) {
1390 case ixgbe_mac_X540:
1391 case ixgbe_mac_X550:
1392 case ixgbe_mac_X550EM_x:
1393 tmp = IXGBE_LOW_DV_X540(frame);
1396 tmp = IXGBE_LOW_DV(frame);
1399 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1401 hw->fc.requested_mode = adapter->fc;
1402 hw->fc.pause_time = IXGBE_FC_PAUSE;
1403 hw->fc.send_xon = TRUE;
1408 ** MSIX Interrupt Handlers and Tasklets
1413 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1415 struct ixgbe_hw *hw = &adapter->hw;
1416 u64 queue = (u64)(1 << vector);
1419 if (hw->mac.type == ixgbe_mac_82598EB) {
1420 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1421 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1423 mask = (queue & 0xFFFFFFFF);
1425 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1426 mask = (queue >> 32);
1428 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1433 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1435 struct ixgbe_hw *hw = &adapter->hw;
1436 u64 queue = (u64)(1 << vector);
1439 if (hw->mac.type == ixgbe_mac_82598EB) {
1440 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1441 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1443 mask = (queue & 0xFFFFFFFF);
1445 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1446 mask = (queue >> 32);
1448 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1453 ixgbe_handle_que(void *context, int pending)
1455 struct ix_queue *que = context;
1456 struct adapter *adapter = que->adapter;
1457 struct tx_ring *txr = que->txr;
1458 struct ifnet *ifp = adapter->ifp;
1460 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1464 #ifndef IXGBE_LEGACY_TX
1465 if (!drbr_empty(ifp, txr->br))
1466 ixgbe_mq_start_locked(ifp, txr);
1468 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1469 ixgbe_start_locked(txr, ifp);
1471 IXGBE_TX_UNLOCK(txr);
1474 /* Reenable this interrupt */
1475 if (que->res != NULL)
1476 ixgbe_enable_queue(adapter, que->msix);
1478 ixgbe_enable_intr(adapter);
1483 /*********************************************************************
1485 * Legacy Interrupt Service routine
1487 **********************************************************************/
1490 ixgbe_legacy_irq(void *arg)
1492 struct ix_queue *que = arg;
1493 struct adapter *adapter = que->adapter;
1494 struct ixgbe_hw *hw = &adapter->hw;
1495 struct ifnet *ifp = adapter->ifp;
1496 struct tx_ring *txr = adapter->tx_rings;
1501 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1504 if (reg_eicr == 0) {
1505 ixgbe_enable_intr(adapter);
1509 more = ixgbe_rxeof(que);
1513 #ifdef IXGBE_LEGACY_TX
1514 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1515 ixgbe_start_locked(txr, ifp);
1517 if (!drbr_empty(ifp, txr->br))
1518 ixgbe_mq_start_locked(ifp, txr);
1520 IXGBE_TX_UNLOCK(txr);
1522 /* Check for fan failure */
1523 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1524 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1525 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1526 "REPLACE IMMEDIATELY!!\n");
1527 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1530 /* Link status change */
1531 if (reg_eicr & IXGBE_EICR_LSC)
1532 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1534 /* External PHY interrupt */
1535 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1536 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1537 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1540 taskqueue_enqueue(que->tq, &que->que_task);
1542 ixgbe_enable_intr(adapter);
1547 /*********************************************************************
1549 * MSIX Queue Interrupt Service routine
1551 **********************************************************************/
1553 ixgbe_msix_que(void *arg)
1555 struct ix_queue *que = arg;
1556 struct adapter *adapter = que->adapter;
1557 struct ifnet *ifp = adapter->ifp;
1558 struct tx_ring *txr = que->txr;
1559 struct rx_ring *rxr = que->rxr;
1564 /* Protect against spurious interrupts */
1565 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1568 ixgbe_disable_queue(adapter, que->msix);
1571 more = ixgbe_rxeof(que);
1575 #ifdef IXGBE_LEGACY_TX
1576 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1577 ixgbe_start_locked(txr, ifp);
1579 if (!drbr_empty(ifp, txr->br))
1580 ixgbe_mq_start_locked(ifp, txr);
1582 IXGBE_TX_UNLOCK(txr);
1586 if (adapter->enable_aim == FALSE)
1589 ** Do Adaptive Interrupt Moderation:
1590 ** - Write out last calculated setting
1591 ** - Calculate based on average size over
1592 ** the last interval.
1594 if (que->eitr_setting)
1595 IXGBE_WRITE_REG(&adapter->hw,
1596 IXGBE_EITR(que->msix), que->eitr_setting);
1598 que->eitr_setting = 0;
1600 /* Idle, do nothing */
1601 if ((txr->bytes == 0) && (rxr->bytes == 0))
1604 if ((txr->bytes) && (txr->packets))
1605 newitr = txr->bytes/txr->packets;
1606 if ((rxr->bytes) && (rxr->packets))
1607 newitr = max(newitr,
1608 (rxr->bytes / rxr->packets));
1609 newitr += 24; /* account for hardware frame, crc */
1611 /* set an upper boundary */
1612 newitr = min(newitr, 3000);
1614 /* Be nice to the mid range */
1615 if ((newitr > 300) && (newitr < 1200))
1616 newitr = (newitr / 3);
1618 newitr = (newitr / 2);
1620 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1621 newitr |= newitr << 16;
1623 newitr |= IXGBE_EITR_CNT_WDIS;
1625 /* save for next interrupt */
1626 que->eitr_setting = newitr;
1636 taskqueue_enqueue(que->tq, &que->que_task);
1638 ixgbe_enable_queue(adapter, que->msix);
1644 ixgbe_msix_link(void *arg)
1646 struct adapter *adapter = arg;
1647 struct ixgbe_hw *hw = &adapter->hw;
1648 u32 reg_eicr, mod_mask;
1650 ++adapter->link_irq;
1652 /* Pause other interrupts */
1653 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1655 /* First get the cause */
1656 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1657 /* Be sure the queue bits are not cleared */
1658 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1659 /* Clear interrupt with write */
1660 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1662 /* Link status change */
1663 if (reg_eicr & IXGBE_EICR_LSC) {
1664 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1665 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1668 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1670 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1671 /* This is probably overkill :) */
1672 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1674 /* Disable the interrupt */
1675 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1676 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1679 if (reg_eicr & IXGBE_EICR_ECC) {
1680 device_printf(adapter->dev, "CRITICAL: ECC ERROR!! "
1681 "Please Reboot!!\n");
1682 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1685 /* Check for over temp condition */
1686 if (reg_eicr & IXGBE_EICR_TS) {
1687 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! "
1688 "PHY IS SHUT DOWN!!\n");
1689 device_printf(adapter->dev, "System shutdown required!\n");
1690 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1693 if (reg_eicr & IXGBE_EICR_MAILBOX)
1694 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1698 /* Pluggable optics-related interrupt */
1699 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1700 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1702 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1704 if (ixgbe_is_sfp(hw)) {
1705 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1706 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1707 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1708 } else if (reg_eicr & mod_mask) {
1709 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1710 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1714 /* Check for fan failure */
1715 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1716 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1717 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1718 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1719 "REPLACE IMMEDIATELY!!\n");
1722 /* External PHY interrupt */
1723 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1724 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1725 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1726 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1729 /* Re-enable other interrupts */
1730 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1734 /*********************************************************************
1736 * Media Ioctl callback
1738 * This routine is called whenever the user queries the status of
1739 * the interface using ifconfig.
1741 **********************************************************************/
1743 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1745 struct adapter *adapter = ifp->if_softc;
1746 struct ixgbe_hw *hw = &adapter->hw;
1749 INIT_DEBUGOUT("ixgbe_media_status: begin");
1750 IXGBE_CORE_LOCK(adapter);
1751 ixgbe_update_link_status(adapter);
1753 ifmr->ifm_status = IFM_AVALID;
1754 ifmr->ifm_active = IFM_ETHER;
1756 if (!adapter->link_active) {
1757 IXGBE_CORE_UNLOCK(adapter);
1761 ifmr->ifm_status |= IFM_ACTIVE;
1762 layer = adapter->phy_layer;
1764 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1765 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1766 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1767 switch (adapter->link_speed) {
1768 case IXGBE_LINK_SPEED_10GB_FULL:
1769 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1771 case IXGBE_LINK_SPEED_1GB_FULL:
1772 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1774 case IXGBE_LINK_SPEED_100_FULL:
1775 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1778 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1779 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1780 switch (adapter->link_speed) {
1781 case IXGBE_LINK_SPEED_10GB_FULL:
1782 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1785 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1786 switch (adapter->link_speed) {
1787 case IXGBE_LINK_SPEED_10GB_FULL:
1788 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1790 case IXGBE_LINK_SPEED_1GB_FULL:
1791 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1794 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1795 switch (adapter->link_speed) {
1796 case IXGBE_LINK_SPEED_10GB_FULL:
1797 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1799 case IXGBE_LINK_SPEED_1GB_FULL:
1800 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1803 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1804 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1805 switch (adapter->link_speed) {
1806 case IXGBE_LINK_SPEED_10GB_FULL:
1807 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1809 case IXGBE_LINK_SPEED_1GB_FULL:
1810 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1813 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1814 switch (adapter->link_speed) {
1815 case IXGBE_LINK_SPEED_10GB_FULL:
1816 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1820 ** XXX: These need to use the proper media types once
1823 #ifndef IFM_ETH_XTYPE
1824 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1825 switch (adapter->link_speed) {
1826 case IXGBE_LINK_SPEED_10GB_FULL:
1827 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1829 case IXGBE_LINK_SPEED_2_5GB_FULL:
1830 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1832 case IXGBE_LINK_SPEED_1GB_FULL:
1833 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1836 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1837 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1838 switch (adapter->link_speed) {
1839 case IXGBE_LINK_SPEED_10GB_FULL:
1840 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1842 case IXGBE_LINK_SPEED_2_5GB_FULL:
1843 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1845 case IXGBE_LINK_SPEED_1GB_FULL:
1846 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1850 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1851 switch (adapter->link_speed) {
1852 case IXGBE_LINK_SPEED_10GB_FULL:
1853 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
1855 case IXGBE_LINK_SPEED_2_5GB_FULL:
1856 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1858 case IXGBE_LINK_SPEED_1GB_FULL:
1859 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1862 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1863 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1864 switch (adapter->link_speed) {
1865 case IXGBE_LINK_SPEED_10GB_FULL:
1866 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
1868 case IXGBE_LINK_SPEED_2_5GB_FULL:
1869 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1871 case IXGBE_LINK_SPEED_1GB_FULL:
1872 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1877 /* If nothing is recognized... */
1878 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1879 ifmr->ifm_active |= IFM_UNKNOWN;
1881 #if __FreeBSD_version >= 900025
1882 /* Display current flow control setting used on link */
1883 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1884 hw->fc.current_mode == ixgbe_fc_full)
1885 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1886 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1887 hw->fc.current_mode == ixgbe_fc_full)
1888 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1891 IXGBE_CORE_UNLOCK(adapter);
1896 /*********************************************************************
1898 * Media Ioctl callback
1900 * This routine is called when the user changes speed/duplex using
1901 * media/mediopt option with ifconfig.
1903 **********************************************************************/
1905 ixgbe_media_change(struct ifnet * ifp)
1907 struct adapter *adapter = ifp->if_softc;
1908 struct ifmedia *ifm = &adapter->media;
1909 struct ixgbe_hw *hw = &adapter->hw;
1910 ixgbe_link_speed speed = 0;
1912 INIT_DEBUGOUT("ixgbe_media_change: begin");
1914 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1917 if (hw->phy.media_type == ixgbe_media_type_backplane)
1921 ** We don't actually need to check against the supported
1922 ** media types of the adapter; ifmedia will take care of
1925 #ifndef IFM_ETH_XTYPE
1926 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1929 speed |= IXGBE_LINK_SPEED_100_FULL;
1931 case IFM_10G_SR: /* KR, too */
1933 case IFM_10G_CX4: /* KX4 */
1934 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1935 case IFM_10G_TWINAX:
1936 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1939 speed |= IXGBE_LINK_SPEED_100_FULL;
1942 case IFM_1000_CX: /* KX */
1943 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1946 speed |= IXGBE_LINK_SPEED_100_FULL;
1952 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1955 speed |= IXGBE_LINK_SPEED_100_FULL;
1960 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1961 case IFM_10G_TWINAX:
1962 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1965 speed |= IXGBE_LINK_SPEED_100_FULL;
1969 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1972 speed |= IXGBE_LINK_SPEED_100_FULL;
1979 hw->mac.autotry_restart = TRUE;
1980 hw->mac.ops.setup_link(hw, speed, TRUE);
1981 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1982 adapter->advertise = 0;
1984 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
1985 adapter->advertise |= 1 << 2;
1986 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
1987 adapter->advertise |= 1 << 1;
1988 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
1989 adapter->advertise |= 1 << 0;
1995 device_printf(adapter->dev, "Invalid media type!\n");
2000 ixgbe_set_promisc(struct adapter *adapter)
2003 struct ifnet *ifp = adapter->ifp;
2006 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2007 reg_rctl &= (~IXGBE_FCTRL_UPE);
2008 if (ifp->if_flags & IFF_ALLMULTI)
2009 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2011 struct ifmultiaddr *ifma;
2012 #if __FreeBSD_version < 800000
2015 if_maddr_rlock(ifp);
2017 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2018 if (ifma->ifma_addr->sa_family != AF_LINK)
2020 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2024 #if __FreeBSD_version < 800000
2025 IF_ADDR_UNLOCK(ifp);
2027 if_maddr_runlock(ifp);
2030 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2031 reg_rctl &= (~IXGBE_FCTRL_MPE);
2032 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2034 if (ifp->if_flags & IFF_PROMISC) {
2035 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2036 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2037 } else if (ifp->if_flags & IFF_ALLMULTI) {
2038 reg_rctl |= IXGBE_FCTRL_MPE;
2039 reg_rctl &= ~IXGBE_FCTRL_UPE;
2040 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2046 /*********************************************************************
2049 * This routine is called whenever multicast address list is updated.
2051 **********************************************************************/
2052 #define IXGBE_RAR_ENTRIES 16
2055 ixgbe_set_multi(struct adapter *adapter)
2059 struct ifmultiaddr *ifma;
2060 struct ixgbe_mc_addr *mta;
2062 struct ifnet *ifp = adapter->ifp;
2064 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
2067 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
2069 #if __FreeBSD_version < 800000
2072 if_maddr_rlock(ifp);
2074 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2075 if (ifma->ifma_addr->sa_family != AF_LINK)
2077 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2079 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2080 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2081 mta[mcnt].vmdq = adapter->pool;
2084 #if __FreeBSD_version < 800000
2085 IF_ADDR_UNLOCK(ifp);
2087 if_maddr_runlock(ifp);
2090 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2091 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2092 if (ifp->if_flags & IFF_PROMISC)
2093 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2094 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
2095 ifp->if_flags & IFF_ALLMULTI) {
2096 fctrl |= IXGBE_FCTRL_MPE;
2097 fctrl &= ~IXGBE_FCTRL_UPE;
2099 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2101 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2103 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
2104 update_ptr = (u8 *)mta;
2105 ixgbe_update_mc_addr_list(&adapter->hw,
2106 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
2113 * This is an iterator function now needed by the multicast
2114 * shared code. It simply feeds the shared code routine the
2115 * addresses in the array of ixgbe_set_multi() one by one.
2118 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
2120 struct ixgbe_mc_addr *mta;
2122 mta = (struct ixgbe_mc_addr *)*update_ptr;
2125 *update_ptr = (u8*)(mta + 1);
2130 /*********************************************************************
2133 * This routine checks for link status,updates statistics,
2134 * and runs the watchdog check.
2136 **********************************************************************/
2139 ixgbe_local_timer(void *arg)
2141 struct adapter *adapter = arg;
2142 device_t dev = adapter->dev;
2143 struct ix_queue *que = adapter->queues;
2147 mtx_assert(&adapter->core_mtx, MA_OWNED);
2149 /* Check for pluggable optics */
2150 if (adapter->sfp_probe)
2151 if (!ixgbe_sfp_probe(adapter))
2152 goto out; /* Nothing to do */
2154 ixgbe_update_link_status(adapter);
2155 ixgbe_update_stats_counters(adapter);
2158 ** Check the TX queues status
2159 ** - mark hung queues so we don't schedule on them
2160 ** - watchdog only if all queues show hung
2162 for (int i = 0; i < adapter->num_queues; i++, que++) {
2163 /* Keep track of queues with work for soft irq */
2165 queues |= ((u64)1 << que->me);
2167 ** Each time txeof runs without cleaning, but there
2168 ** are uncleaned descriptors it increments busy. If
2169 ** we get to the MAX we declare it hung.
2171 if (que->busy == IXGBE_QUEUE_HUNG) {
2173 /* Mark the queue as inactive */
2174 adapter->active_queues &= ~((u64)1 << que->me);
2177 /* Check if we've come back from hung */
2178 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2179 adapter->active_queues |= ((u64)1 << que->me);
2181 if (que->busy >= IXGBE_MAX_TX_BUSY) {
2182 device_printf(dev,"Warning queue %d "
2183 "appears to be hung!\n", i);
2184 que->txr->busy = IXGBE_QUEUE_HUNG;
2190 /* Only truly watchdog if all queues show hung */
2191 if (hung == adapter->num_queues)
2193 else if (queues != 0) { /* Force an IRQ on queues with work */
2194 ixgbe_rearm_queues(adapter, queues);
2198 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2202 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2203 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2204 adapter->watchdog_events++;
2205 ixgbe_init_locked(adapter);
2210 ** Note: this routine updates the OS on the link state
2211 ** the real check of the hardware only happens with
2212 ** a link interrupt.
2215 ixgbe_update_link_status(struct adapter *adapter)
2217 struct ifnet *ifp = adapter->ifp;
2218 device_t dev = adapter->dev;
2220 if (adapter->link_up){
2221 if (adapter->link_active == FALSE) {
2223 device_printf(dev,"Link is up %d Gbps %s \n",
2224 ((adapter->link_speed == 128)? 10:1),
2226 adapter->link_active = TRUE;
2227 /* Update any Flow Control changes */
2228 ixgbe_fc_enable(&adapter->hw);
2229 /* Update DMA coalescing config */
2230 ixgbe_config_dmac(adapter);
2231 if_link_state_change(ifp, LINK_STATE_UP);
2233 ixgbe_ping_all_vfs(adapter);
2236 } else { /* Link down */
2237 if (adapter->link_active == TRUE) {
2239 device_printf(dev,"Link is Down\n");
2240 if_link_state_change(ifp, LINK_STATE_DOWN);
2241 adapter->link_active = FALSE;
2243 ixgbe_ping_all_vfs(adapter);
2252 /*********************************************************************
2254 * This routine disables all traffic on the adapter by issuing a
2255 * global reset on the MAC and deallocates TX/RX buffers.
2257 **********************************************************************/
2260 ixgbe_stop(void *arg)
2263 struct adapter *adapter = arg;
2264 struct ixgbe_hw *hw = &adapter->hw;
2267 mtx_assert(&adapter->core_mtx, MA_OWNED);
2269 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2270 ixgbe_disable_intr(adapter);
2271 callout_stop(&adapter->timer);
2273 /* Let the stack know...*/
2274 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2277 hw->adapter_stopped = FALSE;
2278 ixgbe_stop_adapter(hw);
2279 if (hw->mac.type == ixgbe_mac_82599EB)
2280 ixgbe_stop_mac_link_on_d3_82599(hw);
2281 /* Turn off the laser - noop with no optics */
2282 ixgbe_disable_tx_laser(hw);
2284 /* Update the stack */
2285 adapter->link_up = FALSE;
2286 ixgbe_update_link_status(adapter);
2288 /* reprogram the RAR[0] in case user changed it. */
2289 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2295 /*********************************************************************
2297 * Determine hardware revision.
2299 **********************************************************************/
2301 ixgbe_identify_hardware(struct adapter *adapter)
2303 device_t dev = adapter->dev;
2304 struct ixgbe_hw *hw = &adapter->hw;
2306 /* Save off the information about this board */
2307 hw->vendor_id = pci_get_vendor(dev);
2308 hw->device_id = pci_get_device(dev);
2309 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2310 hw->subsystem_vendor_id =
2311 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2312 hw->subsystem_device_id =
2313 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2316 ** Make sure BUSMASTER is set
2318 pci_enable_busmaster(dev);
2320 /* We need this here to set the num_segs below */
2321 ixgbe_set_mac_type(hw);
2323 /* Pick up the 82599 settings */
2324 if (hw->mac.type != ixgbe_mac_82598EB) {
2325 hw->phy.smart_speed = ixgbe_smart_speed;
2326 adapter->num_segs = IXGBE_82599_SCATTER;
2328 adapter->num_segs = IXGBE_82598_SCATTER;
2333 /*********************************************************************
2335 * Determine optic type
2337 **********************************************************************/
2339 ixgbe_setup_optics(struct adapter *adapter)
2341 struct ixgbe_hw *hw = &adapter->hw;
2344 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2346 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2347 adapter->optics = IFM_10G_T;
2351 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2352 adapter->optics = IFM_1000_T;
2356 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2357 adapter->optics = IFM_1000_SX;
2361 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2362 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2363 adapter->optics = IFM_10G_LR;
2367 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2368 adapter->optics = IFM_10G_SR;
2372 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2373 adapter->optics = IFM_10G_TWINAX;
2377 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2378 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2379 adapter->optics = IFM_10G_CX4;
2383 /* If we get here just set the default */
2384 adapter->optics = IFM_ETHER | IFM_AUTO;
2388 /*********************************************************************
2390 * Setup the Legacy or MSI Interrupt handler
2392 **********************************************************************/
2394 ixgbe_allocate_legacy(struct adapter *adapter)
2396 device_t dev = adapter->dev;
2397 struct ix_queue *que = adapter->queues;
2398 #ifndef IXGBE_LEGACY_TX
2399 struct tx_ring *txr = adapter->tx_rings;
2404 if (adapter->msix == 1)
2407 /* We allocate a single interrupt resource */
2408 adapter->res = bus_alloc_resource_any(dev,
2409 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2410 if (adapter->res == NULL) {
2411 device_printf(dev, "Unable to allocate bus resource: "
2417 * Try allocating a fast interrupt and the associated deferred
2418 * processing contexts.
2420 #ifndef IXGBE_LEGACY_TX
2421 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2423 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2424 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2425 taskqueue_thread_enqueue, &que->tq);
2426 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2427 device_get_nameunit(adapter->dev));
2429 /* Tasklets for Link, SFP and Multispeed Fiber */
2430 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2431 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2432 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2433 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2435 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2437 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2438 taskqueue_thread_enqueue, &adapter->tq);
2439 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2440 device_get_nameunit(adapter->dev));
2442 if ((error = bus_setup_intr(dev, adapter->res,
2443 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2444 que, &adapter->tag)) != 0) {
2445 device_printf(dev, "Failed to register fast interrupt "
2446 "handler: %d\n", error);
2447 taskqueue_free(que->tq);
2448 taskqueue_free(adapter->tq);
2453 /* For simplicity in the handlers */
2454 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2460 /*********************************************************************
2462 * Setup MSIX Interrupt resources and handlers
2464 **********************************************************************/
2466 ixgbe_allocate_msix(struct adapter *adapter)
2468 device_t dev = adapter->dev;
2469 struct ix_queue *que = adapter->queues;
2470 struct tx_ring *txr = adapter->tx_rings;
2471 int error, rid, vector = 0;
2479 * If we're doing RSS, the number of queues needs to
2480 * match the number of RSS buckets that are configured.
2482 * + If there's more queues than RSS buckets, we'll end
2483 * up with queues that get no traffic.
2485 * + If there's more RSS buckets than queues, we'll end
2486 * up having multiple RSS buckets map to the same queue,
2487 * so there'll be some contention.
2489 if (adapter->num_queues != rss_getnumbuckets()) {
2491 "%s: number of queues (%d) != number of RSS buckets (%d)"
2492 "; performance will be impacted.\n",
2494 adapter->num_queues,
2495 rss_getnumbuckets());
2499 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2501 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2502 RF_SHAREABLE | RF_ACTIVE);
2503 if (que->res == NULL) {
2504 device_printf(dev,"Unable to allocate"
2505 " bus resource: que interrupt [%d]\n", vector);
2508 /* Set the handler function */
2509 error = bus_setup_intr(dev, que->res,
2510 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2511 ixgbe_msix_que, que, &que->tag);
2514 device_printf(dev, "Failed to register QUE handler");
2517 #if __FreeBSD_version >= 800504
2518 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2521 adapter->active_queues |= (u64)(1 << que->msix);
2524 * The queue ID is used as the RSS layer bucket ID.
2525 * We look up the queue ID -> RSS CPU ID and select
2528 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2531 * Bind the msix vector, and thus the
2532 * rings to the corresponding cpu.
2534 * This just happens to match the default RSS round-robin
2535 * bucket -> queue -> CPU allocation.
2537 if (adapter->num_queues > 1)
2540 if (adapter->num_queues > 1)
2541 bus_bind_intr(dev, que->res, cpu_id);
2545 "Bound RSS bucket %d to CPU %d\n",
2549 "Bound queue %d to cpu %d\n",
2552 #endif /* IXGBE_DEBUG */
2555 #ifndef IXGBE_LEGACY_TX
2556 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2558 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2559 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2560 taskqueue_thread_enqueue, &que->tq);
2562 CPU_SETOF(cpu_id, &cpu_mask);
2563 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2566 device_get_nameunit(adapter->dev),
2569 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
2570 device_get_nameunit(adapter->dev), i);
2576 adapter->res = bus_alloc_resource_any(dev,
2577 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2578 if (!adapter->res) {
2579 device_printf(dev,"Unable to allocate"
2580 " bus resource: Link interrupt [%d]\n", rid);
2583 /* Set the link handler function */
2584 error = bus_setup_intr(dev, adapter->res,
2585 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2586 ixgbe_msix_link, adapter, &adapter->tag);
2588 adapter->res = NULL;
2589 device_printf(dev, "Failed to register LINK handler");
2592 #if __FreeBSD_version >= 800504
2593 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2595 adapter->vector = vector;
2596 /* Tasklets for Link, SFP and Multispeed Fiber */
2597 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2598 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2599 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2601 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2603 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2605 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2607 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2608 taskqueue_thread_enqueue, &adapter->tq);
2609 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2610 device_get_nameunit(adapter->dev));
2616 * Setup Either MSI/X or MSI
2619 ixgbe_setup_msix(struct adapter *adapter)
2621 device_t dev = adapter->dev;
2622 int rid, want, queues, msgs;
2624 /* Override by tuneable */
2625 if (ixgbe_enable_msix == 0)
2628 /* First try MSI/X */
2629 msgs = pci_msix_count(dev);
2632 rid = PCIR_BAR(MSIX_82598_BAR);
2633 adapter->msix_mem = bus_alloc_resource_any(dev,
2634 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2635 if (adapter->msix_mem == NULL) {
2636 rid += 4; /* 82599 maps in higher BAR */
2637 adapter->msix_mem = bus_alloc_resource_any(dev,
2638 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2640 if (adapter->msix_mem == NULL) {
2641 /* May not be enabled */
2642 device_printf(adapter->dev,
2643 "Unable to map MSIX table \n");
2647 /* Figure out a reasonable auto config value */
2648 queues = (mp_ncpus > (msgs - 1)) ? (msgs - 1) : mp_ncpus;
2651 /* If we're doing RSS, clamp at the number of RSS buckets */
2652 if (queues > rss_getnumbuckets())
2653 queues = rss_getnumbuckets();
2656 if (ixgbe_num_queues != 0)
2657 queues = ixgbe_num_queues;
2658 /* Set max queues to 8 when autoconfiguring */
2659 else if ((ixgbe_num_queues == 0) && (queues > 8))
2662 /* reflect correct sysctl value */
2663 ixgbe_num_queues = queues;
2666 ** Want one vector (RX/TX pair) per queue
2667 ** plus an additional for Link.
2673 device_printf(adapter->dev,
2674 "MSIX Configuration Problem, "
2675 "%d vectors but %d queues wanted!\n",
2679 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2680 device_printf(adapter->dev,
2681 "Using MSIX interrupts with %d vectors\n", msgs);
2682 adapter->num_queues = queues;
2686 ** If MSIX alloc failed or provided us with
2687 ** less than needed, free and fall through to MSI
2689 pci_release_msi(dev);
2692 if (adapter->msix_mem != NULL) {
2693 bus_release_resource(dev, SYS_RES_MEMORY,
2694 rid, adapter->msix_mem);
2695 adapter->msix_mem = NULL;
2698 if (pci_alloc_msi(dev, &msgs) == 0) {
2699 device_printf(adapter->dev, "Using an MSI interrupt\n");
2702 device_printf(adapter->dev, "Using a Legacy interrupt\n");
2708 ixgbe_allocate_pci_resources(struct adapter *adapter)
2711 device_t dev = adapter->dev;
2714 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2717 if (!(adapter->pci_mem)) {
2718 device_printf(dev, "Unable to allocate bus resource: memory\n");
2722 /* Save bus_space values for READ/WRITE_REG macros */
2723 adapter->osdep.mem_bus_space_tag =
2724 rman_get_bustag(adapter->pci_mem);
2725 adapter->osdep.mem_bus_space_handle =
2726 rman_get_bushandle(adapter->pci_mem);
2727 /* Set hw values for shared code */
2728 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2729 adapter->hw.back = adapter;
2731 /* Default to 1 queue if MSI-X setup fails */
2732 adapter->num_queues = 1;
2735 ** Now setup MSI or MSI-X, should
2736 ** return us the number of supported
2737 ** vectors. (Will be 1 for MSI)
2739 adapter->msix = ixgbe_setup_msix(adapter);
2744 ixgbe_free_pci_resources(struct adapter * adapter)
2746 struct ix_queue *que = adapter->queues;
2747 device_t dev = adapter->dev;
2750 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2751 memrid = PCIR_BAR(MSIX_82598_BAR);
2753 memrid = PCIR_BAR(MSIX_82599_BAR);
2756 ** There is a slight possibility of a failure mode
2757 ** in attach that will result in entering this function
2758 ** before interrupt resources have been initialized, and
2759 ** in that case we do not want to execute the loops below
2760 ** We can detect this reliably by the state of the adapter
2763 if (adapter->res == NULL)
2767 ** Release all msix queue resources:
2769 for (int i = 0; i < adapter->num_queues; i++, que++) {
2770 rid = que->msix + 1;
2771 if (que->tag != NULL) {
2772 bus_teardown_intr(dev, que->res, que->tag);
2775 if (que->res != NULL)
2776 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2780 /* Clean the Legacy or Link interrupt last */
2781 if (adapter->vector) /* we are doing MSIX */
2782 rid = adapter->vector + 1;
2784 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2786 if (adapter->tag != NULL) {
2787 bus_teardown_intr(dev, adapter->res, adapter->tag);
2788 adapter->tag = NULL;
2790 if (adapter->res != NULL)
2791 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2795 pci_release_msi(dev);
2797 if (adapter->msix_mem != NULL)
2798 bus_release_resource(dev, SYS_RES_MEMORY,
2799 memrid, adapter->msix_mem);
2801 if (adapter->pci_mem != NULL)
2802 bus_release_resource(dev, SYS_RES_MEMORY,
2803 PCIR_BAR(0), adapter->pci_mem);
2808 /*********************************************************************
2810 * Setup networking device structure and register an interface.
2812 **********************************************************************/
2814 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2818 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2820 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2822 device_printf(dev, "can not allocate ifnet structure\n");
2825 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2826 ifp->if_baudrate = IF_Gbps(10);
2827 ifp->if_init = ixgbe_init;
2828 ifp->if_softc = adapter;
2829 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2830 ifp->if_ioctl = ixgbe_ioctl;
2831 #if __FreeBSD_version >= 1100036
2832 if_setgetcounterfn(ifp, ixgbe_get_counter);
2834 #if __FreeBSD_version >= 1100045
2835 /* TSO parameters */
2836 ifp->if_hw_tsomax = 65518;
2837 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2838 ifp->if_hw_tsomaxsegsize = 2048;
2840 #ifndef IXGBE_LEGACY_TX
2841 ifp->if_transmit = ixgbe_mq_start;
2842 ifp->if_qflush = ixgbe_qflush;
2844 ifp->if_start = ixgbe_start;
2845 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2846 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2847 IFQ_SET_READY(&ifp->if_snd);
2850 ether_ifattach(ifp, adapter->hw.mac.addr);
2852 adapter->max_frame_size =
2853 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2856 * Tell the upper layer(s) we support long frames.
2858 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2860 /* Set capability flags */
2861 ifp->if_capabilities |= IFCAP_RXCSUM
2868 | IFCAP_VLAN_HWTAGGING
2875 /* Enable the above capabilities by default */
2876 ifp->if_capenable = ifp->if_capabilities;
2879 ** Don't turn this on by default, if vlans are
2880 ** created on another pseudo device (eg. lagg)
2881 ** then vlan events are not passed thru, breaking
2882 ** operation, but with HW FILTER off it works. If
2883 ** using vlans directly on the ixgbe driver you can
2884 ** enable this and get full hardware tag filtering.
2886 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2889 * Specify the media types supported by this adapter and register
2890 * callbacks to update media and link information
2892 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2893 ixgbe_media_status);
2895 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
2896 ixgbe_add_media_types(adapter);
2898 /* Set autoselect media by default */
2899 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2905 ixgbe_add_media_types(struct adapter *adapter)
2907 struct ixgbe_hw *hw = &adapter->hw;
2908 device_t dev = adapter->dev;
2911 layer = adapter->phy_layer;
2913 /* Media types with matching FreeBSD media defines */
2914 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2915 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2916 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2917 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2918 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2919 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2921 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2922 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2923 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2925 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
2926 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2927 if (hw->phy.multispeed_fiber)
2928 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2930 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2931 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2932 if (hw->phy.multispeed_fiber)
2933 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2934 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2935 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2936 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2937 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2939 #ifdef IFM_ETH_XTYPE
2940 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2941 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2942 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
2943 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2944 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2945 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2947 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2948 device_printf(dev, "Media supported: 10GbaseKR\n");
2949 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2950 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2952 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2953 device_printf(dev, "Media supported: 10GbaseKX4\n");
2954 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2955 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2957 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2958 device_printf(dev, "Media supported: 1000baseKX\n");
2959 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2960 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2963 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
2964 device_printf(dev, "Media supported: 1000baseBX\n");
2966 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2967 ifmedia_add(&adapter->media,
2968 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2969 ifmedia_add(&adapter->media,
2970 IFM_ETHER | IFM_1000_T, 0, NULL);
2973 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2977 ixgbe_config_link(struct adapter *adapter)
2979 struct ixgbe_hw *hw = &adapter->hw;
2980 u32 autoneg, err = 0;
2981 bool sfp, negotiate;
2983 sfp = ixgbe_is_sfp(hw);
2986 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2988 if (hw->mac.ops.check_link)
2989 err = ixgbe_check_link(hw, &adapter->link_speed,
2990 &adapter->link_up, FALSE);
2993 autoneg = hw->phy.autoneg_advertised;
2994 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2995 err = hw->mac.ops.get_link_capabilities(hw,
2996 &autoneg, &negotiate);
2999 if (hw->mac.ops.setup_link)
3000 err = hw->mac.ops.setup_link(hw,
3001 autoneg, adapter->link_up);
3008 /*********************************************************************
3010 * Enable transmit units.
3012 **********************************************************************/
3014 ixgbe_initialize_transmit_units(struct adapter *adapter)
3016 struct tx_ring *txr = adapter->tx_rings;
3017 struct ixgbe_hw *hw = &adapter->hw;
3019 /* Setup the Base and Length of the Tx Descriptor Ring */
3020 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3021 u64 tdba = txr->txdma.dma_paddr;
3025 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
3026 (tdba & 0x00000000ffffffffULL));
3027 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
3028 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
3029 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
3031 /* Setup the HW Tx Head and Tail descriptor pointers */
3032 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
3033 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
3035 /* Cache the tail address */
3036 txr->tail = IXGBE_TDT(j);
3038 /* Disable Head Writeback */
3040 * Note: for X550 series devices, these registers are actually
3041 * prefixed with TPH_ isntead of DCA_, but the addresses and
3042 * fields remain the same.
3044 switch (hw->mac.type) {
3045 case ixgbe_mac_82598EB:
3046 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
3049 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
3052 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3053 switch (hw->mac.type) {
3054 case ixgbe_mac_82598EB:
3055 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
3058 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
3064 if (hw->mac.type != ixgbe_mac_82598EB) {
3065 u32 dmatxctl, rttdcs;
3067 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
3069 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3070 dmatxctl |= IXGBE_DMATXCTL_TE;
3071 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3072 /* Disable arbiter to set MTQC */
3073 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3074 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3075 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3077 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
3079 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
3081 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3082 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3089 ixgbe_initialize_rss_mapping(struct adapter *adapter)
3091 struct ixgbe_hw *hw = &adapter->hw;
3092 u32 reta = 0, mrqc, rss_key[10];
3093 int queue_id, table_size, index_mult;
3095 u32 rss_hash_config;
3098 enum ixgbe_iov_mode mode;
3102 /* Fetch the configured RSS key */
3103 rss_getkey((uint8_t *) &rss_key);
3105 /* set up random bits */
3106 arc4rand(&rss_key, sizeof(rss_key), 0);
3109 /* Set multiplier for RETA setup and table size based on MAC */
3112 switch (adapter->hw.mac.type) {
3113 case ixgbe_mac_82598EB:
3116 case ixgbe_mac_X550:
3117 case ixgbe_mac_X550EM_x:
3124 /* Set up the redirection table */
3125 for (int i = 0, j = 0; i < table_size; i++, j++) {
3126 if (j == adapter->num_queues) j = 0;
3129 * Fetch the RSS bucket id for the given indirection entry.
3130 * Cap it at the number of configured buckets (which is
3133 queue_id = rss_get_indirection_to_bucket(i);
3134 queue_id = queue_id % adapter->num_queues;
3136 queue_id = (j * index_mult);
3139 * The low 8 bits are for hash value (n+0);
3140 * The next 8 bits are for hash value (n+1), etc.
3143 reta = reta | ( ((uint32_t) queue_id) << 24);
3146 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3148 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
3153 /* Now fill our hash function seeds */
3154 for (int i = 0; i < 10; i++)
3155 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3157 /* Perform hash on these packet types */
3159 mrqc = IXGBE_MRQC_RSSEN;
3160 rss_hash_config = rss_gethashconfig();
3161 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3162 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3163 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3164 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3165 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3166 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3167 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3168 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3169 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3170 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3171 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3172 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3173 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3174 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3175 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3176 device_printf(adapter->dev,
3177 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
3178 "but not supported\n", __func__);
3179 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3180 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3181 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3182 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3185 * Disable UDP - IP fragments aren't currently being handled
3186 * and so we end up with a mix of 2-tuple and 4-tuple
3189 mrqc = IXGBE_MRQC_RSSEN
3190 | IXGBE_MRQC_RSS_FIELD_IPV4
3191 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3192 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3193 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3194 | IXGBE_MRQC_RSS_FIELD_IPV6
3195 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3199 mode = ixgbe_get_iov_mode(adapter);
3200 mrqc |= ixgbe_get_mrqc(mode);
3202 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3206 /*********************************************************************
3208 * Setup receive registers and features.
3210 **********************************************************************/
3211 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3213 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3216 ixgbe_initialize_receive_units(struct adapter *adapter)
3218 struct rx_ring *rxr = adapter->rx_rings;
3219 struct ixgbe_hw *hw = &adapter->hw;
3220 struct ifnet *ifp = adapter->ifp;
3221 u32 bufsz, fctrl, srrctl, rxcsum;
3225 * Make sure receives are disabled while
3226 * setting up the descriptor ring
3228 ixgbe_disable_rx(hw);
3230 /* Enable broadcasts */
3231 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3232 fctrl |= IXGBE_FCTRL_BAM;
3233 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3234 fctrl |= IXGBE_FCTRL_DPF;
3235 fctrl |= IXGBE_FCTRL_PMCF;
3237 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3239 /* Set for Jumbo Frames? */
3240 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3241 if (ifp->if_mtu > ETHERMTU)
3242 hlreg |= IXGBE_HLREG0_JUMBOEN;
3244 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3246 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
3247 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3248 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3250 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3251 #endif /* DEV_NETMAP */
3252 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3254 bufsz = (adapter->rx_mbuf_sz +
3255 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3257 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3258 u64 rdba = rxr->rxdma.dma_paddr;
3261 /* Setup the Base and Length of the Rx Descriptor Ring */
3262 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3263 (rdba & 0x00000000ffffffffULL));
3264 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3265 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3266 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3268 /* Set up the SRRCTL register */
3269 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3270 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3271 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3273 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3276 * Set DROP_EN iff we have no flow control and >1 queue.
3277 * Note that srrctl was cleared shortly before during reset,
3278 * so we do not need to clear the bit, but do it just in case
3279 * this code is moved elsewhere.
3281 if (adapter->num_queues > 1 &&
3282 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3283 srrctl |= IXGBE_SRRCTL_DROP_EN;
3285 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3288 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3290 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3291 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3292 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3294 /* Set the driver rx tail address */
3295 rxr->tail = IXGBE_RDT(rxr->me);
3298 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3299 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3300 IXGBE_PSRTYPE_UDPHDR |
3301 IXGBE_PSRTYPE_IPV4HDR |
3302 IXGBE_PSRTYPE_IPV6HDR;
3303 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3306 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3308 ixgbe_initialize_rss_mapping(adapter);
3310 if (adapter->num_queues > 1) {
3311 /* RSS and RX IPP Checksum are mutually exclusive */
3312 rxcsum |= IXGBE_RXCSUM_PCSD;
3315 if (ifp->if_capenable & IFCAP_RXCSUM)
3316 rxcsum |= IXGBE_RXCSUM_PCSD;
3318 /* This is useful for calculating UDP/IP fragment checksums */
3319 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3320 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3322 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3329 ** This routine is run via an vlan config EVENT,
3330 ** it enables us to use the HW Filter table since
3331 ** we can get the vlan id. This just creates the
3332 ** entry in the soft version of the VFTA, init will
3333 ** repopulate the real table.
3336 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3338 struct adapter *adapter = ifp->if_softc;
3341 if (ifp->if_softc != arg) /* Not our event */
3344 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3347 IXGBE_CORE_LOCK(adapter);
3348 index = (vtag >> 5) & 0x7F;
3350 adapter->shadow_vfta[index] |= (1 << bit);
3351 ++adapter->num_vlans;
3352 ixgbe_setup_vlan_hw_support(adapter);
3353 IXGBE_CORE_UNLOCK(adapter);
3357 ** This routine is run via an vlan
3358 ** unconfig EVENT, remove our entry
3359 ** in the soft vfta.
3362 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3364 struct adapter *adapter = ifp->if_softc;
3367 if (ifp->if_softc != arg)
3370 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3373 IXGBE_CORE_LOCK(adapter);
3374 index = (vtag >> 5) & 0x7F;
3376 adapter->shadow_vfta[index] &= ~(1 << bit);
3377 --adapter->num_vlans;
3378 /* Re-init to load the changes */
3379 ixgbe_setup_vlan_hw_support(adapter);
3380 IXGBE_CORE_UNLOCK(adapter);
3384 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3386 struct ifnet *ifp = adapter->ifp;
3387 struct ixgbe_hw *hw = &adapter->hw;
3388 struct rx_ring *rxr;
3393 ** We get here thru init_locked, meaning
3394 ** a soft reset, this has already cleared
3395 ** the VFTA and other state, so if there
3396 ** have been no vlan's registered do nothing.
3398 if (adapter->num_vlans == 0)
3401 /* Setup the queues for vlans */
3402 for (int i = 0; i < adapter->num_queues; i++) {
3403 rxr = &adapter->rx_rings[i];
3404 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3405 if (hw->mac.type != ixgbe_mac_82598EB) {
3406 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3407 ctrl |= IXGBE_RXDCTL_VME;
3408 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3410 rxr->vtag_strip = TRUE;
3413 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3416 ** A soft reset zero's out the VFTA, so
3417 ** we need to repopulate it now.
3419 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3420 if (adapter->shadow_vfta[i] != 0)
3421 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3422 adapter->shadow_vfta[i]);
3424 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3425 /* Enable the Filter Table if enabled */
3426 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3427 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3428 ctrl |= IXGBE_VLNCTRL_VFE;
3430 if (hw->mac.type == ixgbe_mac_82598EB)
3431 ctrl |= IXGBE_VLNCTRL_VME;
3432 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3436 ixgbe_enable_intr(struct adapter *adapter)
3438 struct ixgbe_hw *hw = &adapter->hw;
3439 struct ix_queue *que = adapter->queues;
3442 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3443 /* Enable Fan Failure detection */
3444 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3445 mask |= IXGBE_EIMS_GPI_SDP1;
3447 switch (adapter->hw.mac.type) {
3448 case ixgbe_mac_82599EB:
3449 mask |= IXGBE_EIMS_ECC;
3450 /* Temperature sensor on some adapters */
3451 mask |= IXGBE_EIMS_GPI_SDP0;
3452 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3453 mask |= IXGBE_EIMS_GPI_SDP1;
3454 mask |= IXGBE_EIMS_GPI_SDP2;
3456 mask |= IXGBE_EIMS_FLOW_DIR;
3459 mask |= IXGBE_EIMS_MAILBOX;
3462 case ixgbe_mac_X540:
3463 /* Detect if Thermal Sensor is enabled */
3464 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3465 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3466 mask |= IXGBE_EIMS_TS;
3467 mask |= IXGBE_EIMS_ECC;
3469 mask |= IXGBE_EIMS_FLOW_DIR;
3472 case ixgbe_mac_X550:
3473 case ixgbe_mac_X550EM_x:
3474 /* MAC thermal sensor is automatically enabled */
3475 mask |= IXGBE_EIMS_TS;
3476 /* Some devices use SDP0 for important information */
3477 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3478 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3479 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3480 mask |= IXGBE_EIMS_ECC;
3482 mask |= IXGBE_EIMS_FLOW_DIR;
3485 mask |= IXGBE_EIMS_MAILBOX;
3492 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3494 /* With MSI-X we use auto clear */
3495 if (adapter->msix_mem) {
3496 mask = IXGBE_EIMS_ENABLE_MASK;
3497 /* Don't autoclear Link */
3498 mask &= ~IXGBE_EIMS_OTHER;
3499 mask &= ~IXGBE_EIMS_LSC;
3501 mask &= ~IXGBE_EIMS_MAILBOX;
3503 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3507 ** Now enable all queues, this is done separately to
3508 ** allow for handling the extended (beyond 32) MSIX
3509 ** vectors that can be used by 82599
3511 for (int i = 0; i < adapter->num_queues; i++, que++)
3512 ixgbe_enable_queue(adapter, que->msix);
3514 IXGBE_WRITE_FLUSH(hw);
3520 ixgbe_disable_intr(struct adapter *adapter)
3522 if (adapter->msix_mem)
3523 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3524 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3525 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3527 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3528 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3529 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3531 IXGBE_WRITE_FLUSH(&adapter->hw);
3536 ** Get the width and transaction speed of
3537 ** the slot this adapter is plugged into.
3540 ixgbe_get_slot_info(struct adapter *adapter)
3542 device_t dev = adapter->dev;
3543 struct ixgbe_hw *hw = &adapter->hw;
3544 struct ixgbe_mac_info *mac = &hw->mac;
3548 /* For most devices simply call the shared code routine */
3549 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3550 ixgbe_get_bus_info(hw);
3551 /* These devices don't use PCI-E */
3552 switch (hw->mac.type) {
3553 case ixgbe_mac_X550EM_x:
3561 ** For the Quad port adapter we need to parse back
3562 ** up the PCI tree to find the speed of the expansion
3563 ** slot into which this adapter is plugged. A bit more work.
3565 dev = device_get_parent(device_get_parent(dev));
3567 device_printf(dev, "parent pcib = %x,%x,%x\n",
3568 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3570 dev = device_get_parent(device_get_parent(dev));
3572 device_printf(dev, "slot pcib = %x,%x,%x\n",
3573 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3575 /* Now get the PCI Express Capabilities offset */
3576 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3577 /* ...and read the Link Status Register */
3578 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3579 switch (link & IXGBE_PCI_LINK_WIDTH) {
3580 case IXGBE_PCI_LINK_WIDTH_1:
3581 hw->bus.width = ixgbe_bus_width_pcie_x1;
3583 case IXGBE_PCI_LINK_WIDTH_2:
3584 hw->bus.width = ixgbe_bus_width_pcie_x2;
3586 case IXGBE_PCI_LINK_WIDTH_4:
3587 hw->bus.width = ixgbe_bus_width_pcie_x4;
3589 case IXGBE_PCI_LINK_WIDTH_8:
3590 hw->bus.width = ixgbe_bus_width_pcie_x8;
3593 hw->bus.width = ixgbe_bus_width_unknown;
3597 switch (link & IXGBE_PCI_LINK_SPEED) {
3598 case IXGBE_PCI_LINK_SPEED_2500:
3599 hw->bus.speed = ixgbe_bus_speed_2500;
3601 case IXGBE_PCI_LINK_SPEED_5000:
3602 hw->bus.speed = ixgbe_bus_speed_5000;
3604 case IXGBE_PCI_LINK_SPEED_8000:
3605 hw->bus.speed = ixgbe_bus_speed_8000;
3608 hw->bus.speed = ixgbe_bus_speed_unknown;
3612 mac->ops.set_lan_id(hw);
3615 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3616 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3617 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3618 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3619 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3620 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3621 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3624 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3625 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3626 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3627 device_printf(dev, "PCI-Express bandwidth available"
3628 " for this card\n is not sufficient for"
3629 " optimal performance.\n");
3630 device_printf(dev, "For optimal performance a x8 "
3631 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3633 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3634 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3635 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3636 device_printf(dev, "PCI-Express bandwidth available"
3637 " for this card\n is not sufficient for"
3638 " optimal performance.\n");
3639 device_printf(dev, "For optimal performance a x8 "
3640 "PCIE Gen3 slot is required.\n");
3648 ** Setup the correct IVAR register for a particular MSIX interrupt
3649 ** (yes this is all very magic and confusing :)
3650 ** - entry is the register array entry
3651 ** - vector is the MSIX vector for this queue
3652 ** - type is RX/TX/MISC
3655 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3657 struct ixgbe_hw *hw = &adapter->hw;
3660 vector |= IXGBE_IVAR_ALLOC_VAL;
3662 switch (hw->mac.type) {
3664 case ixgbe_mac_82598EB:
3666 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3668 entry += (type * 64);
3669 index = (entry >> 2) & 0x1F;
3670 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3671 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3672 ivar |= (vector << (8 * (entry & 0x3)));
3673 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3676 case ixgbe_mac_82599EB:
3677 case ixgbe_mac_X540:
3678 case ixgbe_mac_X550:
3679 case ixgbe_mac_X550EM_x:
3680 if (type == -1) { /* MISC IVAR */
3681 index = (entry & 1) * 8;
3682 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3683 ivar &= ~(0xFF << index);
3684 ivar |= (vector << index);
3685 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3686 } else { /* RX/TX IVARS */
3687 index = (16 * (entry & 1)) + (8 * type);
3688 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3689 ivar &= ~(0xFF << index);
3690 ivar |= (vector << index);
3691 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3700 ixgbe_configure_ivars(struct adapter *adapter)
3702 struct ix_queue *que = adapter->queues;
3705 if (ixgbe_max_interrupt_rate > 0)
3706 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3709 ** Disable DMA coalescing if interrupt moderation is
3716 for (int i = 0; i < adapter->num_queues; i++, que++) {
3717 struct rx_ring *rxr = &adapter->rx_rings[i];
3718 struct tx_ring *txr = &adapter->tx_rings[i];
3719 /* First the RX queue entry */
3720 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3721 /* ... and the TX */
3722 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3723 /* Set an Initial EITR value */
3724 IXGBE_WRITE_REG(&adapter->hw,
3725 IXGBE_EITR(que->msix), newitr);
3728 /* For the Link interrupt */
3729 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3733 ** ixgbe_sfp_probe - called in the local timer to
3734 ** determine if a port had optics inserted.
3737 ixgbe_sfp_probe(struct adapter *adapter)
3739 struct ixgbe_hw *hw = &adapter->hw;
3740 device_t dev = adapter->dev;
3741 bool result = FALSE;
3743 if ((hw->phy.type == ixgbe_phy_nl) &&
3744 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3745 s32 ret = hw->phy.ops.identify_sfp(hw);
3748 ret = hw->phy.ops.reset(hw);
3749 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3750 device_printf(dev, "Unsupported SFP+ module detected!");
3751 device_printf(dev, "Reload driver with supported module.\n");
3752 adapter->sfp_probe = FALSE;
3755 device_printf(dev, "SFP+ module detected!\n");
3756 /* We now have supported optics */
3757 adapter->sfp_probe = FALSE;
3758 /* Set the optics type so system reports correctly */
3759 ixgbe_setup_optics(adapter);
3767 ** Tasklet handler for MSIX Link interrupts
3768 ** - do outside interrupt since it might sleep
3771 ixgbe_handle_link(void *context, int pending)
3773 struct adapter *adapter = context;
3774 struct ixgbe_hw *hw = &adapter->hw;
3776 ixgbe_check_link(hw,
3777 &adapter->link_speed, &adapter->link_up, 0);
3778 ixgbe_update_link_status(adapter);
3780 /* Re-enable link interrupts */
3781 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3785 ** Tasklet for handling SFP module interrupts
3788 ixgbe_handle_mod(void *context, int pending)
3790 struct adapter *adapter = context;
3791 struct ixgbe_hw *hw = &adapter->hw;
3792 enum ixgbe_phy_type orig_type = hw->phy.type;
3793 device_t dev = adapter->dev;
3796 IXGBE_CORE_LOCK(adapter);
3798 /* Check to see if the PHY type changed */
3799 if (hw->phy.ops.identify) {
3800 hw->phy.type = ixgbe_phy_unknown;
3801 hw->phy.ops.identify(hw);
3804 if (hw->phy.type != orig_type) {
3805 device_printf(dev, "Detected phy_type %d\n", hw->phy.type);
3807 if (hw->phy.type == ixgbe_phy_none) {
3808 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
3812 /* Try to do the initialization that was skipped before */
3813 if (hw->phy.ops.init)
3814 hw->phy.ops.init(hw);
3815 if (hw->phy.ops.reset)
3816 hw->phy.ops.reset(hw);
3819 err = hw->phy.ops.identify_sfp(hw);
3820 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3822 "Unsupported SFP+ module type was detected.\n");
3826 err = hw->mac.ops.setup_sfp(hw);
3827 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3829 "Setup failure - unsupported SFP+ module type.\n");
3832 if (hw->phy.multispeed_fiber)
3833 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3835 /* Update media type */
3836 switch (hw->mac.ops.get_media_type(hw)) {
3837 case ixgbe_media_type_fiber:
3838 adapter->optics = IFM_10G_SR;
3840 case ixgbe_media_type_copper:
3841 adapter->optics = IFM_10G_TWINAX;
3843 case ixgbe_media_type_cx4:
3844 adapter->optics = IFM_10G_CX4;
3847 adapter->optics = 0;
3851 IXGBE_CORE_UNLOCK(adapter);
3857 ** Tasklet for handling MSF (multispeed fiber) interrupts
3860 ixgbe_handle_msf(void *context, int pending)
3862 struct adapter *adapter = context;
3863 struct ixgbe_hw *hw = &adapter->hw;
3867 IXGBE_CORE_LOCK(adapter);
3868 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3869 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3871 autoneg = hw->phy.autoneg_advertised;
3872 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3873 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3874 if (hw->mac.ops.setup_link)
3875 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3877 /* Adjust media types shown in ifconfig */
3878 ifmedia_removeall(&adapter->media);
3879 ixgbe_add_media_types(adapter);
3880 IXGBE_CORE_UNLOCK(adapter);
3885 ** Tasklet for handling interrupts from an external PHY
3888 ixgbe_handle_phy(void *context, int pending)
3890 struct adapter *adapter = context;
3891 struct ixgbe_hw *hw = &adapter->hw;
3894 error = hw->phy.ops.handle_lasi(hw);
3895 if (error == IXGBE_ERR_OVERTEMP)
3896 device_printf(adapter->dev,
3897 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3898 " PHY will downshift to lower power state!\n");
3900 device_printf(adapter->dev,
3901 "Error handling LASI interrupt: %d\n",
3908 ** Tasklet for reinitializing the Flow Director filter table
3911 ixgbe_reinit_fdir(void *context, int pending)
3913 struct adapter *adapter = context;
3914 struct ifnet *ifp = adapter->ifp;
3916 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3918 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3919 adapter->fdir_reinit = 0;
3920 /* re-enable flow director interrupts */
3921 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3922 /* Restart the interface */
3923 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3928 /*********************************************************************
3930 * Configure DMA Coalescing
3932 **********************************************************************/
3934 ixgbe_config_dmac(struct adapter *adapter)
3936 struct ixgbe_hw *hw = &adapter->hw;
3937 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3939 if (hw->mac.type < ixgbe_mac_X550 ||
3940 !hw->mac.ops.dmac_config)
3943 if (dcfg->watchdog_timer ^ adapter->dmac ||
3944 dcfg->link_speed ^ adapter->link_speed) {
3945 dcfg->watchdog_timer = adapter->dmac;
3946 dcfg->fcoe_en = false;
3947 dcfg->link_speed = adapter->link_speed;
3950 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3951 dcfg->watchdog_timer, dcfg->link_speed);
3953 hw->mac.ops.dmac_config(hw);
3958 * Checks whether the adapter's ports are capable of
3959 * Wake On LAN by reading the adapter's NVM.
3961 * Sets each port's hw->wol_enabled value depending
3962 * on the value read here.
3965 ixgbe_check_wol_support(struct adapter *adapter)
3967 struct ixgbe_hw *hw = &adapter->hw;
3970 /* Find out WoL support for port */
3971 adapter->wol_support = hw->wol_enabled = 0;
3972 ixgbe_get_device_caps(hw, &dev_caps);
3973 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3974 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3976 adapter->wol_support = hw->wol_enabled = 1;
3978 /* Save initial wake up filter configuration */
3979 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3985 * Prepare the adapter/port for LPLU and/or WoL
3988 ixgbe_setup_low_power_mode(struct adapter *adapter)
3990 struct ixgbe_hw *hw = &adapter->hw;
3991 device_t dev = adapter->dev;
3994 mtx_assert(&adapter->core_mtx, MA_OWNED);
3996 if (!hw->wol_enabled)
3997 ixgbe_set_phy_power(hw, FALSE);
3999 /* Limit power management flow to X550EM baseT */
4000 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
4001 && hw->phy.ops.enter_lplu) {
4002 /* Turn off support for APM wakeup. (Using ACPI instead) */
4003 IXGBE_WRITE_REG(hw, IXGBE_GRC,
4004 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
4007 * Clear Wake Up Status register to prevent any previous wakeup
4008 * events from waking us up immediately after we suspend.
4010 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4013 * Program the Wakeup Filter Control register with user filter
4016 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
4018 /* Enable wakeups and power management in Wakeup Control */
4019 IXGBE_WRITE_REG(hw, IXGBE_WUC,
4020 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
4022 /* X550EM baseT adapters need a special LPLU flow */
4023 hw->phy.reset_disable = true;
4024 ixgbe_stop(adapter);
4025 error = hw->phy.ops.enter_lplu(hw);
4028 "Error entering LPLU: %d\n", error);
4029 hw->phy.reset_disable = false;
4031 /* Just stop for other adapters */
4032 ixgbe_stop(adapter);
4038 /**********************************************************************
4040 * Update the board statistics counters.
4042 **********************************************************************/
4044 ixgbe_update_stats_counters(struct adapter *adapter)
4046 struct ixgbe_hw *hw = &adapter->hw;
4047 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4048 u64 total_missed_rx = 0;
4050 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4051 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4052 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4053 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4055 for (int i = 0; i < 16; i++) {
4056 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4057 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4058 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4060 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4061 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4062 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4064 /* Hardware workaround, gprc counts missed packets */
4065 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4066 adapter->stats.pf.gprc -= missed_rx;
4068 if (hw->mac.type != ixgbe_mac_82598EB) {
4069 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4070 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4071 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4072 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4073 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4074 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4075 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4076 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4078 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4079 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4080 /* 82598 only has a counter in the high register */
4081 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4082 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4083 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4087 * Workaround: mprc hardware is incorrectly counting
4088 * broadcasts, so for now we subtract those.
4090 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4091 adapter->stats.pf.bprc += bprc;
4092 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4093 if (hw->mac.type == ixgbe_mac_82598EB)
4094 adapter->stats.pf.mprc -= bprc;
4096 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4097 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4098 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4099 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4100 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4101 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4103 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4104 adapter->stats.pf.lxontxc += lxon;
4105 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4106 adapter->stats.pf.lxofftxc += lxoff;
4107 total = lxon + lxoff;
4109 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4110 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4111 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4112 adapter->stats.pf.gptc -= total;
4113 adapter->stats.pf.mptc -= total;
4114 adapter->stats.pf.ptc64 -= total;
4115 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
4117 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4118 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4119 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4120 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4121 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4122 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4123 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4124 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4125 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4126 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4127 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4128 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4129 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4130 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4131 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4132 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4133 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4134 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4135 /* Only read FCOE on 82599 */
4136 if (hw->mac.type != ixgbe_mac_82598EB) {
4137 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4138 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4139 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4140 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4141 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4144 /* Fill out the OS statistics structure */
4145 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
4146 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
4147 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
4148 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
4149 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
4150 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
4151 IXGBE_SET_COLLISIONS(adapter, 0);
4152 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
4153 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
4154 + adapter->stats.pf.rlec);
4157 #if __FreeBSD_version >= 1100036
4159 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
4161 struct adapter *adapter;
4162 struct tx_ring *txr;
4165 adapter = if_getsoftc(ifp);
4168 case IFCOUNTER_IPACKETS:
4169 return (adapter->ipackets);
4170 case IFCOUNTER_OPACKETS:
4171 return (adapter->opackets);
4172 case IFCOUNTER_IBYTES:
4173 return (adapter->ibytes);
4174 case IFCOUNTER_OBYTES:
4175 return (adapter->obytes);
4176 case IFCOUNTER_IMCASTS:
4177 return (adapter->imcasts);
4178 case IFCOUNTER_OMCASTS:
4179 return (adapter->omcasts);
4180 case IFCOUNTER_COLLISIONS:
4182 case IFCOUNTER_IQDROPS:
4183 return (adapter->iqdrops);
4184 case IFCOUNTER_OQDROPS:
4186 txr = adapter->tx_rings;
4187 for (int i = 0; i < adapter->num_queues; i++, txr++)
4188 rv += txr->br->br_drops;
4190 case IFCOUNTER_IERRORS:
4191 return (adapter->ierrors);
4193 return (if_get_counter_default(ifp, cnt));
4198 /** ixgbe_sysctl_tdh_handler - Handler function
4199 * Retrieves the TDH value from the hardware
4202 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4206 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4209 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4210 error = sysctl_handle_int(oidp, &val, 0, req);
4211 if (error || !req->newptr)
4216 /** ixgbe_sysctl_tdt_handler - Handler function
4217 * Retrieves the TDT value from the hardware
4220 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4224 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4227 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4228 error = sysctl_handle_int(oidp, &val, 0, req);
4229 if (error || !req->newptr)
4234 /** ixgbe_sysctl_rdh_handler - Handler function
4235 * Retrieves the RDH value from the hardware
4238 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4242 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4245 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4246 error = sysctl_handle_int(oidp, &val, 0, req);
4247 if (error || !req->newptr)
4252 /** ixgbe_sysctl_rdt_handler - Handler function
4253 * Retrieves the RDT value from the hardware
4256 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4260 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4263 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4264 error = sysctl_handle_int(oidp, &val, 0, req);
4265 if (error || !req->newptr)
4271 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4274 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4275 unsigned int reg, usec, rate;
4277 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4278 usec = ((reg & 0x0FF8) >> 3);
4280 rate = 500000 / usec;
4283 error = sysctl_handle_int(oidp, &rate, 0, req);
4284 if (error || !req->newptr)
4286 reg &= ~0xfff; /* default, no limitation */
4287 ixgbe_max_interrupt_rate = 0;
4288 if (rate > 0 && rate < 500000) {
4291 ixgbe_max_interrupt_rate = rate;
4292 reg |= ((4000000/rate) & 0xff8 );
4294 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4299 ixgbe_add_device_sysctls(struct adapter *adapter)
4301 device_t dev = adapter->dev;
4302 struct ixgbe_hw *hw = &adapter->hw;
4303 struct sysctl_oid_list *child;
4304 struct sysctl_ctx_list *ctx;
4306 ctx = device_get_sysctl_ctx(dev);
4307 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4309 /* Sysctls for all devices */
4310 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
4311 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4312 ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4314 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
4316 &ixgbe_enable_aim, 1, "Interrupt Moderation");
4318 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4319 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4320 ixgbe_sysctl_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
4322 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
4323 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4324 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
4327 /* testing sysctls (for all devices) */
4328 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
4329 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4330 ixgbe_sysctl_power_state, "I", "PCI Power State");
4332 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
4333 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
4334 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
4336 /* for X550 series devices */
4337 if (hw->mac.type >= ixgbe_mac_X550)
4338 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4339 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4340 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
4342 /* for X552 backplane devices */
4343 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
4344 struct sysctl_oid *eee_node;
4345 struct sysctl_oid_list *eee_list;
4347 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4349 "Energy Efficient Ethernet sysctls");
4350 eee_list = SYSCTL_CHILDREN(eee_node);
4352 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4353 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4354 ixgbe_sysctl_eee_enable, "I",
4355 "Enable or Disable EEE");
4357 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4358 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4359 ixgbe_sysctl_eee_negotiated, "I",
4360 "EEE negotiated on link");
4362 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4363 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4364 ixgbe_sysctl_eee_tx_lpi_status, "I",
4365 "Whether or not TX link is in LPI state");
4367 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4368 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4369 ixgbe_sysctl_eee_rx_lpi_status, "I",
4370 "Whether or not RX link is in LPI state");
4372 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_delay",
4373 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4374 ixgbe_sysctl_eee_tx_lpi_delay, "I",
4375 "TX LPI entry delay in microseconds");
4378 /* for WoL-capable devices */
4379 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4380 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4381 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4382 ixgbe_sysctl_wol_enable, "I",
4383 "Enable/Disable Wake on LAN");
4385 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4386 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4387 ixgbe_sysctl_wufc, "I",
4388 "Enable/Disable Wake Up Filters");
4391 /* for X552/X557-AT devices */
4392 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4393 struct sysctl_oid *phy_node;
4394 struct sysctl_oid_list *phy_list;
4396 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4398 "External PHY sysctls");
4399 phy_list = SYSCTL_CHILDREN(phy_node);
4401 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4402 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4403 ixgbe_sysctl_phy_temp, "I",
4404 "Current External PHY Temperature (Celsius)");
4406 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4407 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4408 ixgbe_sysctl_phy_overtemp_occurred, "I",
4409 "External PHY High Temperature Event Occurred");
4414 * Add sysctl variables, one per statistic, to the system.
4417 ixgbe_add_hw_stats(struct adapter *adapter)
4419 device_t dev = adapter->dev;
4421 struct tx_ring *txr = adapter->tx_rings;
4422 struct rx_ring *rxr = adapter->rx_rings;
4424 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4425 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4426 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4427 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4429 struct sysctl_oid *stat_node, *queue_node;
4430 struct sysctl_oid_list *stat_list, *queue_list;
4432 #define QUEUE_NAME_LEN 32
4433 char namebuf[QUEUE_NAME_LEN];
4435 /* Driver Statistics */
4436 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4437 CTLFLAG_RD, &adapter->dropped_pkts,
4438 "Driver dropped packets");
4439 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4440 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4441 "m_defrag() failed");
4442 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4443 CTLFLAG_RD, &adapter->watchdog_events,
4444 "Watchdog timeouts");
4445 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4446 CTLFLAG_RD, &adapter->link_irq,
4447 "Link MSIX IRQ Handled");
4449 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4450 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4451 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4452 CTLFLAG_RD, NULL, "Queue Name");
4453 queue_list = SYSCTL_CHILDREN(queue_node);
4455 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4456 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4457 sizeof(&adapter->queues[i]),
4458 ixgbe_sysctl_interrupt_rate_handler, "IU",
4460 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4461 CTLFLAG_RD, &(adapter->queues[i].irqs),
4462 "irqs on this queue");
4463 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4464 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4465 ixgbe_sysctl_tdh_handler, "IU",
4466 "Transmit Descriptor Head");
4467 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4468 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4469 ixgbe_sysctl_tdt_handler, "IU",
4470 "Transmit Descriptor Tail");
4471 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4472 CTLFLAG_RD, &txr->tso_tx,
4474 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4475 CTLFLAG_RD, &txr->no_tx_dma_setup,
4476 "Driver tx dma failure in xmit");
4477 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4478 CTLFLAG_RD, &txr->no_desc_avail,
4479 "Queue No Descriptor Available");
4480 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4481 CTLFLAG_RD, &txr->total_packets,
4482 "Queue Packets Transmitted");
4483 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4484 CTLFLAG_RD, &txr->br->br_drops,
4485 "Packets dropped in buf_ring");
4488 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4489 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4490 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4491 CTLFLAG_RD, NULL, "Queue Name");
4492 queue_list = SYSCTL_CHILDREN(queue_node);
4494 struct lro_ctrl *lro = &rxr->lro;
4496 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4497 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4498 CTLFLAG_RD, NULL, "Queue Name");
4499 queue_list = SYSCTL_CHILDREN(queue_node);
4501 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4502 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4503 ixgbe_sysctl_rdh_handler, "IU",
4504 "Receive Descriptor Head");
4505 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4506 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4507 ixgbe_sysctl_rdt_handler, "IU",
4508 "Receive Descriptor Tail");
4509 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4510 CTLFLAG_RD, &rxr->rx_packets,
4511 "Queue Packets Received");
4512 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4513 CTLFLAG_RD, &rxr->rx_bytes,
4514 "Queue Bytes Received");
4515 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4516 CTLFLAG_RD, &rxr->rx_copies,
4517 "Copied RX Frames");
4518 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
4519 CTLFLAG_RD, &lro->lro_queued, 0,
4521 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
4522 CTLFLAG_RD, &lro->lro_flushed, 0,
4526 /* MAC stats get the own sub node */
4528 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4529 CTLFLAG_RD, NULL, "MAC Statistics");
4530 stat_list = SYSCTL_CHILDREN(stat_node);
4532 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4533 CTLFLAG_RD, &stats->crcerrs,
4535 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4536 CTLFLAG_RD, &stats->illerrc,
4537 "Illegal Byte Errors");
4538 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4539 CTLFLAG_RD, &stats->errbc,
4541 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4542 CTLFLAG_RD, &stats->mspdc,
4543 "MAC Short Packets Discarded");
4544 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4545 CTLFLAG_RD, &stats->mlfc,
4546 "MAC Local Faults");
4547 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4548 CTLFLAG_RD, &stats->mrfc,
4549 "MAC Remote Faults");
4550 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4551 CTLFLAG_RD, &stats->rlec,
4552 "Receive Length Errors");
4554 /* Flow Control stats */
4555 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4556 CTLFLAG_RD, &stats->lxontxc,
4557 "Link XON Transmitted");
4558 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4559 CTLFLAG_RD, &stats->lxonrxc,
4560 "Link XON Received");
4561 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4562 CTLFLAG_RD, &stats->lxofftxc,
4563 "Link XOFF Transmitted");
4564 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4565 CTLFLAG_RD, &stats->lxoffrxc,
4566 "Link XOFF Received");
4568 /* Packet Reception Stats */
4569 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4570 CTLFLAG_RD, &stats->tor,
4571 "Total Octets Received");
4572 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4573 CTLFLAG_RD, &stats->gorc,
4574 "Good Octets Received");
4575 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4576 CTLFLAG_RD, &stats->tpr,
4577 "Total Packets Received");
4578 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4579 CTLFLAG_RD, &stats->gprc,
4580 "Good Packets Received");
4581 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4582 CTLFLAG_RD, &stats->mprc,
4583 "Multicast Packets Received");
4584 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4585 CTLFLAG_RD, &stats->bprc,
4586 "Broadcast Packets Received");
4587 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4588 CTLFLAG_RD, &stats->prc64,
4589 "64 byte frames received ");
4590 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4591 CTLFLAG_RD, &stats->prc127,
4592 "65-127 byte frames received");
4593 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4594 CTLFLAG_RD, &stats->prc255,
4595 "128-255 byte frames received");
4596 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4597 CTLFLAG_RD, &stats->prc511,
4598 "256-511 byte frames received");
4599 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4600 CTLFLAG_RD, &stats->prc1023,
4601 "512-1023 byte frames received");
4602 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4603 CTLFLAG_RD, &stats->prc1522,
4604 "1023-1522 byte frames received");
4605 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4606 CTLFLAG_RD, &stats->ruc,
4607 "Receive Undersized");
4608 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4609 CTLFLAG_RD, &stats->rfc,
4610 "Fragmented Packets Received ");
4611 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4612 CTLFLAG_RD, &stats->roc,
4613 "Oversized Packets Received");
4614 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4615 CTLFLAG_RD, &stats->rjc,
4617 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4618 CTLFLAG_RD, &stats->mngprc,
4619 "Management Packets Received");
4620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4621 CTLFLAG_RD, &stats->mngptc,
4622 "Management Packets Dropped");
4623 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4624 CTLFLAG_RD, &stats->xec,
4627 /* Packet Transmission Stats */
4628 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4629 CTLFLAG_RD, &stats->gotc,
4630 "Good Octets Transmitted");
4631 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4632 CTLFLAG_RD, &stats->tpt,
4633 "Total Packets Transmitted");
4634 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4635 CTLFLAG_RD, &stats->gptc,
4636 "Good Packets Transmitted");
4637 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4638 CTLFLAG_RD, &stats->bptc,
4639 "Broadcast Packets Transmitted");
4640 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4641 CTLFLAG_RD, &stats->mptc,
4642 "Multicast Packets Transmitted");
4643 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4644 CTLFLAG_RD, &stats->mngptc,
4645 "Management Packets Transmitted");
4646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4647 CTLFLAG_RD, &stats->ptc64,
4648 "64 byte frames transmitted ");
4649 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4650 CTLFLAG_RD, &stats->ptc127,
4651 "65-127 byte frames transmitted");
4652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4653 CTLFLAG_RD, &stats->ptc255,
4654 "128-255 byte frames transmitted");
4655 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4656 CTLFLAG_RD, &stats->ptc511,
4657 "256-511 byte frames transmitted");
4658 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4659 CTLFLAG_RD, &stats->ptc1023,
4660 "512-1023 byte frames transmitted");
4661 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4662 CTLFLAG_RD, &stats->ptc1522,
4663 "1024-1522 byte frames transmitted");
4667 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4668 const char *description, int *limit, int value)
4671 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4672 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4673 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4677 ** Set flow control using sysctl:
4678 ** Flow control values:
4685 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4688 struct adapter *adapter;
4690 adapter = (struct adapter *) arg1;
4693 error = sysctl_handle_int(oidp, &fc, 0, req);
4694 if ((error) || (req->newptr == NULL))
4697 /* Don't bother if it's not changed */
4698 if (adapter->fc == fc)
4701 return ixgbe_set_flowcntl(adapter, fc);
4706 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4710 case ixgbe_fc_rx_pause:
4711 case ixgbe_fc_tx_pause:
4713 adapter->hw.fc.requested_mode = adapter->fc;
4714 if (adapter->num_queues > 1)
4715 ixgbe_disable_rx_drop(adapter);
4718 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4719 if (adapter->num_queues > 1)
4720 ixgbe_enable_rx_drop(adapter);
4726 /* Don't autoneg if forcing a value */
4727 adapter->hw.fc.disable_fc_autoneg = TRUE;
4728 ixgbe_fc_enable(&adapter->hw);
4733 ** Control advertised link speed:
4735 ** 0x1 - advertise 100 Mb
4736 ** 0x2 - advertise 1G
4737 ** 0x4 - advertise 10G
4740 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4742 int error, advertise;
4743 struct adapter *adapter;
4745 adapter = (struct adapter *) arg1;
4746 advertise = adapter->advertise;
4748 error = sysctl_handle_int(oidp, &advertise, 0, req);
4749 if ((error) || (req->newptr == NULL))
4752 return ixgbe_set_advertise(adapter, advertise);
4756 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4759 struct ixgbe_hw *hw;
4760 ixgbe_link_speed speed;
4762 /* Checks to validate new value */
4763 if (adapter->advertise == advertise) /* no change */
4769 /* No speed changes for backplane media */
4770 if (hw->phy.media_type == ixgbe_media_type_backplane)
4773 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4774 (hw->phy.multispeed_fiber))) {
4776 "Advertised speed can only be set on copper or "
4777 "multispeed fiber media types.\n");
4781 if (advertise < 0x1 || advertise > 0x7) {
4783 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4787 if ((advertise & 0x1)
4788 && (hw->mac.type != ixgbe_mac_X540)
4789 && (hw->mac.type != ixgbe_mac_X550)) {
4790 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4794 /* Set new value and report new advertised mode */
4796 if (advertise & 0x1)
4797 speed |= IXGBE_LINK_SPEED_100_FULL;
4798 if (advertise & 0x2)
4799 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4800 if (advertise & 0x4)
4801 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4802 adapter->advertise = advertise;
4804 hw->mac.autotry_restart = TRUE;
4805 hw->mac.ops.setup_link(hw, speed, TRUE);
4811 * The following two sysctls are for X552/X557-AT devices;
4812 * they deal with the external PHY used in them.
4815 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4817 struct adapter *adapter = (struct adapter *) arg1;
4818 struct ixgbe_hw *hw = &adapter->hw;
4821 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4822 device_printf(adapter->dev,
4823 "Device has no supported external thermal sensor.\n");
4827 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4828 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4830 device_printf(adapter->dev,
4831 "Error reading from PHY's current temperature register\n");
4835 /* Shift temp for output */
4838 return (sysctl_handle_int(oidp, NULL, reg, req));
4842 * Reports whether the current PHY temperature is over
4843 * the overtemp threshold.
4844 * - This is reported directly from the PHY
4847 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4849 struct adapter *adapter = (struct adapter *) arg1;
4850 struct ixgbe_hw *hw = &adapter->hw;
4853 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4854 device_printf(adapter->dev,
4855 "Device has no supported external thermal sensor.\n");
4859 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4860 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4862 device_printf(adapter->dev,
4863 "Error reading from PHY's temperature status register\n");
4867 /* Get occurrence bit */
4868 reg = !!(reg & 0x4000);
4869 return (sysctl_handle_int(oidp, 0, reg, req));
4873 ** Thermal Shutdown Trigger (internal MAC)
4874 ** - Set this to 1 to cause an overtemp event to occur
4877 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4879 struct adapter *adapter = (struct adapter *) arg1;
4880 struct ixgbe_hw *hw = &adapter->hw;
4881 int error, fire = 0;
4883 error = sysctl_handle_int(oidp, &fire, 0, req);
4884 if ((error) || (req->newptr == NULL))
4888 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4889 reg |= IXGBE_EICR_TS;
4890 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4897 ** Manage DMA Coalescing.
4899 ** 0/1 - off / on (use default value of 1000)
4901 ** Legal timer values are:
4902 ** 50,100,250,500,1000,2000,5000,10000
4904 ** Turning off interrupt moderation will also turn this off.
4907 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4909 struct adapter *adapter = (struct adapter *) arg1;
4910 struct ifnet *ifp = adapter->ifp;
4914 newval = adapter->dmac;
4915 error = sysctl_handle_int(oidp, &newval, 0, req);
4916 if ((error) || (req->newptr == NULL))
4925 /* Enable and use default */
4926 adapter->dmac = 1000;
4936 /* Legal values - allow */
4937 adapter->dmac = newval;
4940 /* Do nothing, illegal value */
4944 /* Re-initialize hardware if it's already running */
4945 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4946 ixgbe_init(adapter);
4953 * Sysctl to test power states
4955 * 0 - set device to D0
4956 * 3 - set device to D3
4957 * (none) - get current device power state
4960 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4962 struct adapter *adapter = (struct adapter *) arg1;
4963 device_t dev = adapter->dev;
4964 int curr_ps, new_ps, error = 0;
4966 curr_ps = new_ps = pci_get_powerstate(dev);
4968 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4969 if ((error) || (req->newptr == NULL))
4972 if (new_ps == curr_ps)
4975 if (new_ps == 3 && curr_ps == 0)
4976 error = DEVICE_SUSPEND(dev);
4977 else if (new_ps == 0 && curr_ps == 3)
4978 error = DEVICE_RESUME(dev);
4982 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4988 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4994 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4996 struct adapter *adapter = (struct adapter *) arg1;
4997 struct ixgbe_hw *hw = &adapter->hw;
4998 int new_wol_enabled;
5001 new_wol_enabled = hw->wol_enabled;
5002 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
5003 if ((error) || (req->newptr == NULL))
5005 new_wol_enabled = !!(new_wol_enabled);
5006 if (new_wol_enabled == hw->wol_enabled)
5009 if (new_wol_enabled > 0 && !adapter->wol_support)
5012 hw->wol_enabled = new_wol_enabled;
5018 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
5019 * if supported by the adapter.
5025 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
5027 struct adapter *adapter = (struct adapter *) arg1;
5028 struct ixgbe_hw *hw = &adapter->hw;
5029 struct ifnet *ifp = adapter->ifp;
5030 int new_eee_enabled, error = 0;
5032 new_eee_enabled = adapter->eee_enabled;
5033 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
5034 if ((error) || (req->newptr == NULL))
5036 new_eee_enabled = !!(new_eee_enabled);
5037 if (new_eee_enabled == adapter->eee_enabled)
5040 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
5043 adapter->eee_enabled = new_eee_enabled;
5045 /* Re-initialize hardware if it's already running */
5046 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5047 ixgbe_init(adapter);
5053 * Read-only sysctl indicating whether EEE support was negotiated
5057 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
5059 struct adapter *adapter = (struct adapter *) arg1;
5060 struct ixgbe_hw *hw = &adapter->hw;
5063 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
5065 return (sysctl_handle_int(oidp, 0, status, req));
5069 * Read-only sysctl indicating whether RX Link is in LPI state.
5072 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
5074 struct adapter *adapter = (struct adapter *) arg1;
5075 struct ixgbe_hw *hw = &adapter->hw;
5078 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5079 IXGBE_EEE_RX_LPI_STATUS);
5081 return (sysctl_handle_int(oidp, 0, status, req));
5085 * Read-only sysctl indicating whether TX Link is in LPI state.
5088 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
5090 struct adapter *adapter = (struct adapter *) arg1;
5091 struct ixgbe_hw *hw = &adapter->hw;
5094 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5095 IXGBE_EEE_TX_LPI_STATUS);
5097 return (sysctl_handle_int(oidp, 0, status, req));
5101 * Read-only sysctl indicating TX Link LPI delay
5104 ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS)
5106 struct adapter *adapter = (struct adapter *) arg1;
5107 struct ixgbe_hw *hw = &adapter->hw;
5110 reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU);
5112 return (sysctl_handle_int(oidp, 0, reg >> 26, req));
5116 * Sysctl to enable/disable the types of packets that the
5117 * adapter will wake up on upon receipt.
5118 * WUFC - Wake Up Filter Control
5120 * 0x1 - Link Status Change
5121 * 0x2 - Magic Packet
5122 * 0x4 - Direct Exact
5123 * 0x8 - Directed Multicast
5125 * 0x20 - ARP/IPv4 Request Packet
5126 * 0x40 - Direct IPv4 Packet
5127 * 0x80 - Direct IPv6 Packet
5129 * Setting another flag will cause the sysctl to return an
5133 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5135 struct adapter *adapter = (struct adapter *) arg1;
5139 new_wufc = adapter->wufc;
5141 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
5142 if ((error) || (req->newptr == NULL))
5144 if (new_wufc == adapter->wufc)
5147 if (new_wufc & 0xffffff00)
5151 new_wufc |= (0xffffff & adapter->wufc);
5152 adapter->wufc = new_wufc;
5160 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5162 struct adapter *adapter = (struct adapter *)arg1;
5163 struct ixgbe_hw *hw = &adapter->hw;
5164 device_t dev = adapter->dev;
5165 int error = 0, reta_size;
5169 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5171 device_printf(dev, "Could not allocate sbuf for output.\n");
5175 // TODO: use sbufs to make a string to print out
5176 /* Set multiplier for RETA setup and table size based on MAC */
5177 switch (adapter->hw.mac.type) {
5178 case ixgbe_mac_X550:
5179 case ixgbe_mac_X550EM_x:
5187 /* Print out the redirection table */
5188 sbuf_cat(buf, "\n");
5189 for (int i = 0; i < reta_size; i++) {
5191 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5192 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5194 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5195 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5199 // TODO: print more config
5201 error = sbuf_finish(buf);
5203 device_printf(dev, "Error finishing sbuf: %d\n", error);
5208 #endif /* IXGBE_DEBUG */
5211 ** Enable the hardware to drop packets when the buffer is
5212 ** full. This is useful when multiqueue,so that no single
5213 ** queue being full stalls the entire RX engine. We only
5214 ** enable this when Multiqueue AND when Flow Control is
5218 ixgbe_enable_rx_drop(struct adapter *adapter)
5220 struct ixgbe_hw *hw = &adapter->hw;
5222 for (int i = 0; i < adapter->num_queues; i++) {
5223 struct rx_ring *rxr = &adapter->rx_rings[i];
5224 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5225 srrctl |= IXGBE_SRRCTL_DROP_EN;
5226 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5229 /* enable drop for each vf */
5230 for (int i = 0; i < adapter->num_vfs; i++) {
5231 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5232 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5239 ixgbe_disable_rx_drop(struct adapter *adapter)
5241 struct ixgbe_hw *hw = &adapter->hw;
5243 for (int i = 0; i < adapter->num_queues; i++) {
5244 struct rx_ring *rxr = &adapter->rx_rings[i];
5245 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5246 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5247 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5250 /* disable drop for each vf */
5251 for (int i = 0; i < adapter->num_vfs; i++) {
5252 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5253 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5259 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5263 switch (adapter->hw.mac.type) {
5264 case ixgbe_mac_82598EB:
5265 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5266 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5268 case ixgbe_mac_82599EB:
5269 case ixgbe_mac_X540:
5270 case ixgbe_mac_X550:
5271 case ixgbe_mac_X550EM_x:
5272 mask = (queues & 0xFFFFFFFF);
5273 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5274 mask = (queues >> 32);
5275 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5285 ** Support functions for SRIOV/VF management
5289 ixgbe_ping_all_vfs(struct adapter *adapter)
5291 struct ixgbe_vf *vf;
5293 for (int i = 0; i < adapter->num_vfs; i++) {
5294 vf = &adapter->vfs[i];
5295 if (vf->flags & IXGBE_VF_ACTIVE)
5296 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5302 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
5305 struct ixgbe_hw *hw;
5306 uint32_t vmolr, vmvir;
5312 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
5314 /* Do not receive packets that pass inexact filters. */
5315 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
5317 /* Disable Multicast Promicuous Mode. */
5318 vmolr &= ~IXGBE_VMOLR_MPE;
5320 /* Accept broadcasts. */
5321 vmolr |= IXGBE_VMOLR_BAM;
5324 /* Accept non-vlan tagged traffic. */
5325 //vmolr |= IXGBE_VMOLR_AUPE;
5327 /* Allow VM to tag outgoing traffic; no default tag. */
5330 /* Require vlan-tagged traffic. */
5331 vmolr &= ~IXGBE_VMOLR_AUPE;
5333 /* Tag all traffic with provided vlan tag. */
5334 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
5336 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
5337 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
5342 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
5346 * Frame size compatibility between PF and VF is only a problem on
5347 * 82599-based cards. X540 and later support any combination of jumbo
5348 * frames on PFs and VFs.
5350 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
5353 switch (vf->api_ver) {
5354 case IXGBE_API_VER_1_0:
5355 case IXGBE_API_VER_UNKNOWN:
5357 * On legacy (1.0 and older) VF versions, we don't support jumbo
5358 * frames on either the PF or the VF.
5360 if (adapter->max_frame_size > ETHER_MAX_LEN ||
5361 vf->max_frame_size > ETHER_MAX_LEN)
5367 case IXGBE_API_VER_1_1:
5370 * 1.1 or later VF versions always work if they aren't using
5373 if (vf->max_frame_size <= ETHER_MAX_LEN)
5377 * Jumbo frames only work with VFs if the PF is also using jumbo
5380 if (adapter->max_frame_size <= ETHER_MAX_LEN)
5390 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
5392 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
5394 // XXX clear multicast addresses
5396 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
5398 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5403 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
5405 struct ixgbe_hw *hw;
5406 uint32_t vf_index, vfte;
5410 vf_index = IXGBE_VF_INDEX(vf->pool);
5411 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
5412 vfte |= IXGBE_VF_BIT(vf->pool);
5413 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
5418 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
5420 struct ixgbe_hw *hw;
5421 uint32_t vf_index, vfre;
5425 vf_index = IXGBE_VF_INDEX(vf->pool);
5426 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
5427 if (ixgbe_vf_frame_size_compatible(adapter, vf))
5428 vfre |= IXGBE_VF_BIT(vf->pool);
5430 vfre &= ~IXGBE_VF_BIT(vf->pool);
5431 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
5436 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5438 struct ixgbe_hw *hw;
5440 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
5444 ixgbe_process_vf_reset(adapter, vf);
5446 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5447 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5448 vf->ether_addr, vf->pool, TRUE);
5449 ack = IXGBE_VT_MSGTYPE_ACK;
5451 ack = IXGBE_VT_MSGTYPE_NACK;
5453 ixgbe_vf_enable_transmit(adapter, vf);
5454 ixgbe_vf_enable_receive(adapter, vf);
5456 vf->flags |= IXGBE_VF_CTS;
5458 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
5459 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
5460 resp[3] = hw->mac.mc_filter_type;
5461 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
5466 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5470 mac = (uint8_t*)&msg[1];
5472 /* Check that the VF has permission to change the MAC address. */
5473 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
5474 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5478 if (ixgbe_validate_mac_addr(mac) != 0) {
5479 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5483 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5485 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
5488 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5493 ** VF multicast addresses are set by using the appropriate bit in
5494 ** 1 of 128 32 bit addresses (4096 possible).
5497 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
5499 u16 *list = (u16*)&msg[1];
5501 u32 vmolr, vec_bit, vec_reg, mta_reg;
5503 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
5504 entries = min(entries, IXGBE_MAX_VF_MC);
5506 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
5508 vf->num_mc_hashes = entries;
5510 /* Set the appropriate MTA bit */
5511 for (int i = 0; i < entries; i++) {
5512 vf->mc_hash[i] = list[i];
5513 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
5514 vec_bit = vf->mc_hash[i] & 0x1F;
5515 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
5516 mta_reg |= (1 << vec_bit);
5517 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
5520 vmolr |= IXGBE_VMOLR_ROMPE;
5521 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
5522 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5528 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5530 struct ixgbe_hw *hw;
5535 enable = IXGBE_VT_MSGINFO(msg[0]);
5536 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
5538 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
5539 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5543 /* It is illegal to enable vlan tag 0. */
5544 if (tag == 0 && enable != 0){
5545 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5549 ixgbe_set_vfta(hw, tag, vf->pool, enable);
5550 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5555 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5557 struct ixgbe_hw *hw;
5558 uint32_t vf_max_size, pf_max_size, mhadd;
5561 vf_max_size = msg[1];
5563 if (vf_max_size < ETHER_CRC_LEN) {
5564 /* We intentionally ACK invalid LPE requests. */
5565 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5569 vf_max_size -= ETHER_CRC_LEN;
5571 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5572 /* We intentionally ACK invalid LPE requests. */
5573 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5577 vf->max_frame_size = vf_max_size;
5578 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5581 * We might have to disable reception to this VF if the frame size is
5582 * not compatible with the config on the PF.
5584 ixgbe_vf_enable_receive(adapter, vf);
5586 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5587 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
5589 if (pf_max_size < adapter->max_frame_size) {
5590 mhadd &= ~IXGBE_MHADD_MFS_MASK;
5591 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5592 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
5595 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5600 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
5603 //XXX implement this
5604 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5609 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
5614 case IXGBE_API_VER_1_0:
5615 case IXGBE_API_VER_1_1:
5616 vf->api_ver = msg[1];
5617 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5620 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5621 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5628 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
5631 struct ixgbe_hw *hw;
5632 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5637 /* GET_QUEUES is not supported on pre-1.1 APIs. */
5639 case IXGBE_API_VER_1_0:
5640 case IXGBE_API_VER_UNKNOWN:
5641 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5645 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
5646 IXGBE_VT_MSGTYPE_CTS;
5648 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
5649 resp[IXGBE_VF_TX_QUEUES] = num_queues;
5650 resp[IXGBE_VF_RX_QUEUES] = num_queues;
5651 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5652 resp[IXGBE_VF_DEF_QUEUE] = 0;
5654 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5659 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
5661 struct ixgbe_hw *hw;
5662 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5667 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
5672 CTR3(KTR_MALLOC, "%s: received msg %x from %d",
5673 adapter->ifp->if_xname, msg[0], vf->pool);
5674 if (msg[0] == IXGBE_VF_RESET) {
5675 ixgbe_vf_reset_msg(adapter, vf, msg);
5679 if (!(vf->flags & IXGBE_VF_CTS)) {
5680 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5684 switch (msg[0] & IXGBE_VT_MSG_MASK) {
5685 case IXGBE_VF_SET_MAC_ADDR:
5686 ixgbe_vf_set_mac(adapter, vf, msg);
5688 case IXGBE_VF_SET_MULTICAST:
5689 ixgbe_vf_set_mc_addr(adapter, vf, msg);
5691 case IXGBE_VF_SET_VLAN:
5692 ixgbe_vf_set_vlan(adapter, vf, msg);
5694 case IXGBE_VF_SET_LPE:
5695 ixgbe_vf_set_lpe(adapter, vf, msg);
5697 case IXGBE_VF_SET_MACVLAN:
5698 ixgbe_vf_set_macvlan(adapter, vf, msg);
5700 case IXGBE_VF_API_NEGOTIATE:
5701 ixgbe_vf_api_negotiate(adapter, vf, msg);
5703 case IXGBE_VF_GET_QUEUES:
5704 ixgbe_vf_get_queues(adapter, vf, msg);
5707 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5713 * Tasklet for handling VF -> PF mailbox messages.
5716 ixgbe_handle_mbx(void *context, int pending)
5718 struct adapter *adapter;
5719 struct ixgbe_hw *hw;
5720 struct ixgbe_vf *vf;
5726 IXGBE_CORE_LOCK(adapter);
5727 for (i = 0; i < adapter->num_vfs; i++) {
5728 vf = &adapter->vfs[i];
5730 if (vf->flags & IXGBE_VF_ACTIVE) {
5731 if (ixgbe_check_for_rst(hw, vf->pool) == 0)
5732 ixgbe_process_vf_reset(adapter, vf);
5734 if (ixgbe_check_for_msg(hw, vf->pool) == 0)
5735 ixgbe_process_vf_msg(adapter, vf);
5737 if (ixgbe_check_for_ack(hw, vf->pool) == 0)
5738 ixgbe_process_vf_ack(adapter, vf);
5741 IXGBE_CORE_UNLOCK(adapter);
5746 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
5748 struct adapter *adapter;
5749 enum ixgbe_iov_mode mode;
5751 adapter = device_get_softc(dev);
5752 adapter->num_vfs = num_vfs;
5753 mode = ixgbe_get_iov_mode(adapter);
5755 if (num_vfs > ixgbe_max_vfs(mode)) {
5756 adapter->num_vfs = 0;
5760 IXGBE_CORE_LOCK(adapter);
5762 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
5765 if (adapter->vfs == NULL) {
5766 adapter->num_vfs = 0;
5767 IXGBE_CORE_UNLOCK(adapter);
5771 ixgbe_init_locked(adapter);
5773 IXGBE_CORE_UNLOCK(adapter);
5780 ixgbe_uninit_iov(device_t dev)
5782 struct ixgbe_hw *hw;
5783 struct adapter *adapter;
5784 uint32_t pf_reg, vf_reg;
5786 adapter = device_get_softc(dev);
5789 IXGBE_CORE_LOCK(adapter);
5791 /* Enable rx/tx for the PF and disable it for all VFs. */
5792 pf_reg = IXGBE_VF_INDEX(adapter->pool);
5793 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
5794 IXGBE_VF_BIT(adapter->pool));
5795 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
5796 IXGBE_VF_BIT(adapter->pool));
5802 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
5803 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
5805 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
5807 free(adapter->vfs, M_IXGBE);
5808 adapter->vfs = NULL;
5809 adapter->num_vfs = 0;
5811 IXGBE_CORE_UNLOCK(adapter);
5816 ixgbe_initialize_iov(struct adapter *adapter)
5818 struct ixgbe_hw *hw = &adapter->hw;
5819 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
5820 enum ixgbe_iov_mode mode;
5823 mode = ixgbe_get_iov_mode(adapter);
5824 if (mode == IXGBE_NO_VM)
5827 IXGBE_CORE_LOCK_ASSERT(adapter);
5829 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
5830 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
5834 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
5837 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
5840 panic("Unexpected SR-IOV mode %d", mode);
5842 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
5844 mtqc = IXGBE_MTQC_VT_ENA;
5847 mtqc |= IXGBE_MTQC_64VF;
5850 mtqc |= IXGBE_MTQC_32VF;
5853 panic("Unexpected SR-IOV mode %d", mode);
5855 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
5858 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
5859 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
5860 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
5863 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
5866 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
5869 panic("Unexpected SR-IOV mode %d", mode);
5871 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
5874 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5875 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
5878 gpie |= IXGBE_GPIE_VTMODE_64;
5881 gpie |= IXGBE_GPIE_VTMODE_32;
5884 panic("Unexpected SR-IOV mode %d", mode);
5886 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5888 /* Enable rx/tx for the PF. */
5889 vf_reg = IXGBE_VF_INDEX(adapter->pool);
5890 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
5891 IXGBE_VF_BIT(adapter->pool));
5892 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
5893 IXGBE_VF_BIT(adapter->pool));
5895 /* Allow VM-to-VM communication. */
5896 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
5898 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
5899 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
5900 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
5902 for (i = 0; i < adapter->num_vfs; i++)
5903 ixgbe_init_vf(adapter, &adapter->vfs[i]);
5908 ** Check the max frame setting of all active VF's
5911 ixgbe_recalculate_max_frame(struct adapter *adapter)
5913 struct ixgbe_vf *vf;
5915 IXGBE_CORE_LOCK_ASSERT(adapter);
5917 for (int i = 0; i < adapter->num_vfs; i++) {
5918 vf = &adapter->vfs[i];
5919 if (vf->flags & IXGBE_VF_ACTIVE)
5920 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5926 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
5928 struct ixgbe_hw *hw;
5929 uint32_t vf_index, pfmbimr;
5931 IXGBE_CORE_LOCK_ASSERT(adapter);
5935 if (!(vf->flags & IXGBE_VF_ACTIVE))
5938 vf_index = IXGBE_VF_INDEX(vf->pool);
5939 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
5940 pfmbimr |= IXGBE_VF_BIT(vf->pool);
5941 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
5943 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
5945 // XXX multicast addresses
5947 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5948 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5949 vf->ether_addr, vf->pool, TRUE);
5952 ixgbe_vf_enable_transmit(adapter, vf);
5953 ixgbe_vf_enable_receive(adapter, vf);
5955 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5959 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
5961 struct adapter *adapter;
5962 struct ixgbe_vf *vf;
5965 adapter = device_get_softc(dev);
5967 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
5968 vfnum, adapter->num_vfs));
5970 IXGBE_CORE_LOCK(adapter);
5971 vf = &adapter->vfs[vfnum];
5974 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
5975 vf->rar_index = vfnum + 1;
5976 vf->default_vlan = 0;
5977 vf->max_frame_size = ETHER_MAX_LEN;
5978 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5980 if (nvlist_exists_binary(config, "mac-addr")) {
5981 mac = nvlist_get_binary(config, "mac-addr", NULL);
5982 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5983 if (nvlist_get_bool(config, "allow-set-mac"))
5984 vf->flags |= IXGBE_VF_CAP_MAC;
5987 * If the administrator has not specified a MAC address then
5988 * we must allow the VF to choose one.
5990 vf->flags |= IXGBE_VF_CAP_MAC;
5992 vf->flags = IXGBE_VF_ACTIVE;
5994 ixgbe_init_vf(adapter, vf);
5995 IXGBE_CORE_UNLOCK(adapter);
5999 #endif /* PCI_IOV */