1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
45 #include <net/rss_config.h>
46 #include <netinet/in_rss.h>
49 /*********************************************************************
51 *********************************************************************/
52 char ixgbe_driver_version[] = "3.1.13-k";
55 /*********************************************************************
58 * Used by probe to select devices to load on
59 * Last field stores an index into ixgbe_strings
60 * Last entry must be all 0s
62 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
63 *********************************************************************/
65 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
99 /* required last entry */
103 /*********************************************************************
104 * Table of branding strings
105 *********************************************************************/
107 static char *ixgbe_strings[] = {
108 "Intel(R) PRO/10GbE PCI-Express Network Driver"
111 /*********************************************************************
112 * Function prototypes
113 *********************************************************************/
114 static int ixgbe_probe(device_t);
115 static int ixgbe_attach(device_t);
116 static int ixgbe_detach(device_t);
117 static int ixgbe_shutdown(device_t);
118 static int ixgbe_suspend(device_t);
119 static int ixgbe_resume(device_t);
120 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
121 static void ixgbe_init(void *);
122 static void ixgbe_init_locked(struct adapter *);
123 static void ixgbe_stop(void *);
124 #if __FreeBSD_version >= 1100036
125 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
127 static void ixgbe_add_media_types(struct adapter *);
128 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
129 static int ixgbe_media_change(struct ifnet *);
130 static void ixgbe_identify_hardware(struct adapter *);
131 static int ixgbe_allocate_pci_resources(struct adapter *);
132 static void ixgbe_get_slot_info(struct adapter *);
133 static int ixgbe_allocate_msix(struct adapter *);
134 static int ixgbe_allocate_legacy(struct adapter *);
135 static int ixgbe_setup_msix(struct adapter *);
136 static void ixgbe_free_pci_resources(struct adapter *);
137 static void ixgbe_local_timer(void *);
138 static int ixgbe_setup_interface(device_t, struct adapter *);
139 static void ixgbe_config_gpie(struct adapter *);
140 static void ixgbe_config_dmac(struct adapter *);
141 static void ixgbe_config_delay_values(struct adapter *);
142 static void ixgbe_config_link(struct adapter *);
143 static void ixgbe_check_wol_support(struct adapter *);
144 static int ixgbe_setup_low_power_mode(struct adapter *);
145 static void ixgbe_rearm_queues(struct adapter *, u64);
147 static void ixgbe_initialize_transmit_units(struct adapter *);
148 static void ixgbe_initialize_receive_units(struct adapter *);
149 static void ixgbe_enable_rx_drop(struct adapter *);
150 static void ixgbe_disable_rx_drop(struct adapter *);
151 static void ixgbe_initialize_rss_mapping(struct adapter *);
153 static void ixgbe_enable_intr(struct adapter *);
154 static void ixgbe_disable_intr(struct adapter *);
155 static void ixgbe_update_stats_counters(struct adapter *);
156 static void ixgbe_set_promisc(struct adapter *);
157 static void ixgbe_set_multi(struct adapter *);
158 static void ixgbe_update_link_status(struct adapter *);
159 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
160 static void ixgbe_configure_ivars(struct adapter *);
161 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
163 static void ixgbe_setup_vlan_hw_support(struct adapter *);
164 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
165 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
167 static void ixgbe_add_device_sysctls(struct adapter *);
168 static void ixgbe_add_hw_stats(struct adapter *);
169 static int ixgbe_set_flowcntl(struct adapter *, int);
170 static int ixgbe_set_advertise(struct adapter *, int);
172 /* Sysctl handlers */
173 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
174 const char *, int *, int);
175 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
176 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
177 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
178 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
179 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
180 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
182 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
183 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
185 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
186 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
187 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
188 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
189 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
191 static int ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS);
193 /* Support for pluggable optic modules */
194 static bool ixgbe_sfp_probe(struct adapter *);
195 static void ixgbe_setup_optics(struct adapter *);
197 /* Legacy (single vector interrupt handler */
198 static void ixgbe_legacy_irq(void *);
200 /* The MSI/X Interrupt handlers */
201 static void ixgbe_msix_que(void *);
202 static void ixgbe_msix_link(void *);
204 /* Deferred interrupt tasklets */
205 static void ixgbe_handle_que(void *, int);
206 static void ixgbe_handle_link(void *, int);
207 static void ixgbe_handle_msf(void *, int);
208 static void ixgbe_handle_mod(void *, int);
209 static void ixgbe_handle_phy(void *, int);
212 static void ixgbe_reinit_fdir(void *, int);
216 static void ixgbe_ping_all_vfs(struct adapter *);
217 static void ixgbe_handle_mbx(void *, int);
218 static int ixgbe_init_iov(device_t, u16, const nvlist_t *);
219 static void ixgbe_uninit_iov(device_t);
220 static int ixgbe_add_vf(device_t, u16, const nvlist_t *);
221 static void ixgbe_initialize_iov(struct adapter *);
222 static void ixgbe_recalculate_max_frame(struct adapter *);
223 static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
227 /*********************************************************************
228 * FreeBSD Device Interface Entry Points
229 *********************************************************************/
231 static device_method_t ix_methods[] = {
232 /* Device interface */
233 DEVMETHOD(device_probe, ixgbe_probe),
234 DEVMETHOD(device_attach, ixgbe_attach),
235 DEVMETHOD(device_detach, ixgbe_detach),
236 DEVMETHOD(device_shutdown, ixgbe_shutdown),
237 DEVMETHOD(device_suspend, ixgbe_suspend),
238 DEVMETHOD(device_resume, ixgbe_resume),
240 DEVMETHOD(pci_iov_init, ixgbe_init_iov),
241 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
242 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
247 static driver_t ix_driver = {
248 "ix", ix_methods, sizeof(struct adapter),
251 devclass_t ix_devclass;
252 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
254 MODULE_DEPEND(ix, pci, 1, 1, 1);
255 MODULE_DEPEND(ix, ether, 1, 1, 1);
257 MODULE_DEPEND(ix, netmap, 1, 1, 1);
258 #endif /* DEV_NETMAP */
261 ** TUNEABLE PARAMETERS:
264 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
265 "IXGBE driver parameters");
268 ** AIM: Adaptive Interrupt Moderation
269 ** which means that the interrupt rate
270 ** is varied over time based on the
271 ** traffic for that interrupt vector
273 static int ixgbe_enable_aim = TRUE;
274 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
275 "Enable adaptive interrupt moderation");
277 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
278 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
279 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
281 /* How many packets rxeof tries to clean at a time */
282 static int ixgbe_rx_process_limit = 256;
283 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
284 &ixgbe_rx_process_limit, 0,
285 "Maximum number of received packets to process at a time,"
286 "-1 means unlimited");
288 /* How many packets txeof tries to clean at a time */
289 static int ixgbe_tx_process_limit = 256;
290 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
291 &ixgbe_tx_process_limit, 0,
292 "Maximum number of sent packets to process at a time,"
293 "-1 means unlimited");
295 /* Flow control setting, default to full */
296 static int ixgbe_flow_control = ixgbe_fc_full;
297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
298 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
300 /* Advertise Speed, default to 0 (auto) */
301 static int ixgbe_advertise_speed = 0;
302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
306 ** Smart speed setting, default to on
307 ** this only works as a compile option
308 ** right now as its during attach, set
309 ** this to 'ixgbe_smart_speed_off' to
312 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
315 * MSIX should be the default for best performance,
316 * but this allows it to be forced off for testing.
318 static int ixgbe_enable_msix = 1;
319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
320 "Enable MSI-X interrupts");
323 * Number of Queues, can be set to 0,
324 * it then autoconfigures based on the
325 * number of cpus with a max of 8. This
326 * can be overriden manually here.
328 static int ixgbe_num_queues = 0;
329 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
330 "Number of queues to configure, 0 indicates autoconfigure");
333 ** Number of TX descriptors per ring,
334 ** setting higher than RX as this seems
335 ** the better performing choice.
337 static int ixgbe_txd = PERFORM_TXD;
338 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
339 "Number of transmit descriptors per queue");
341 /* Number of RX descriptors per ring */
342 static int ixgbe_rxd = PERFORM_RXD;
343 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
344 "Number of receive descriptors per queue");
347 ** Defining this on will allow the use
348 ** of unsupported SFP+ modules, note that
349 ** doing so you are on your own :)
351 static int allow_unsupported_sfp = FALSE;
352 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
354 /* Keep running tab on them for sanity check */
355 static int ixgbe_total_ports;
359 ** Flow Director actually 'steals'
360 ** part of the packet buffer as its
361 ** filter pool, this variable controls
363 ** 0 = 64K, 1 = 128K, 2 = 256K
365 static int fdir_pballoc = 1;
370 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
371 * be a reference on how to implement netmap support in a driver.
372 * Additional comments are in ixgbe_netmap.h .
374 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
375 * that extend the standard driver.
377 #include <dev/netmap/ixgbe_netmap.h>
378 #endif /* DEV_NETMAP */
380 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
382 /*********************************************************************
383 * Device identification routine
385 * ixgbe_probe determines if the driver should be loaded on
386 * adapter based on PCI vendor/device id of the adapter.
388 * return BUS_PROBE_DEFAULT on success, positive on failure
389 *********************************************************************/
392 ixgbe_probe(device_t dev)
394 ixgbe_vendor_info_t *ent;
396 u16 pci_vendor_id = 0;
397 u16 pci_device_id = 0;
398 u16 pci_subvendor_id = 0;
399 u16 pci_subdevice_id = 0;
400 char adapter_name[256];
402 INIT_DEBUGOUT("ixgbe_probe: begin");
404 pci_vendor_id = pci_get_vendor(dev);
405 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
408 pci_device_id = pci_get_device(dev);
409 pci_subvendor_id = pci_get_subvendor(dev);
410 pci_subdevice_id = pci_get_subdevice(dev);
412 ent = ixgbe_vendor_info_array;
413 while (ent->vendor_id != 0) {
414 if ((pci_vendor_id == ent->vendor_id) &&
415 (pci_device_id == ent->device_id) &&
417 ((pci_subvendor_id == ent->subvendor_id) ||
418 (ent->subvendor_id == 0)) &&
420 ((pci_subdevice_id == ent->subdevice_id) ||
421 (ent->subdevice_id == 0))) {
422 sprintf(adapter_name, "%s, Version - %s",
423 ixgbe_strings[ent->index],
424 ixgbe_driver_version);
425 device_set_desc_copy(dev, adapter_name);
427 return (BUS_PROBE_DEFAULT);
434 /*********************************************************************
435 * Device initialization routine
437 * The attach entry point is called when the driver is being loaded.
438 * This routine identifies the type of hardware, allocates all resources
439 * and initializes the hardware.
441 * return 0 on success, positive on failure
442 *********************************************************************/
445 ixgbe_attach(device_t dev)
447 struct adapter *adapter;
453 INIT_DEBUGOUT("ixgbe_attach: begin");
455 /* Allocate, clear, and link in our adapter structure */
456 adapter = device_get_softc(dev);
461 adapter->init_locked = ixgbe_init_locked;
462 adapter->stop_locked = ixgbe_stop;
466 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
468 /* Set up the timer callout */
469 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
471 /* Determine hardware revision */
472 ixgbe_identify_hardware(adapter);
474 /* Do base PCI setup - map BAR0 */
475 if (ixgbe_allocate_pci_resources(adapter)) {
476 device_printf(dev, "Allocation of PCI resources failed\n");
481 /* Sysctls for limiting the amount of work done in the taskqueues */
482 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
483 "max number of rx packets to process",
484 &adapter->rx_process_limit, ixgbe_rx_process_limit);
486 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
487 "max number of tx packets to process",
488 &adapter->tx_process_limit, ixgbe_tx_process_limit);
490 /* Do descriptor calc and sanity checks */
491 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
492 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
493 device_printf(dev, "TXD config issue, using default!\n");
494 adapter->num_tx_desc = DEFAULT_TXD;
496 adapter->num_tx_desc = ixgbe_txd;
499 ** With many RX rings it is easy to exceed the
500 ** system mbuf allocation. Tuning nmbclusters
501 ** can alleviate this.
503 if (nmbclusters > 0) {
505 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
506 if (s > nmbclusters) {
507 device_printf(dev, "RX Descriptors exceed "
508 "system mbuf max, using default instead!\n");
509 ixgbe_rxd = DEFAULT_RXD;
513 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
514 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
515 device_printf(dev, "RXD config issue, using default!\n");
516 adapter->num_rx_desc = DEFAULT_RXD;
518 adapter->num_rx_desc = ixgbe_rxd;
520 /* Allocate our TX/RX Queues */
521 if (ixgbe_allocate_queues(adapter)) {
526 /* Allocate multicast array memory. */
527 adapter->mta = malloc(sizeof(*adapter->mta) *
528 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
529 if (adapter->mta == NULL) {
530 device_printf(dev, "Can not allocate multicast setup array\n");
535 /* Initialize the shared code */
536 hw->allow_unsupported_sfp = allow_unsupported_sfp;
537 error = ixgbe_init_shared_code(hw);
538 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
540 ** No optics in this port, set up
541 ** so the timer routine will probe
542 ** for later insertion.
544 adapter->sfp_probe = TRUE;
546 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
547 device_printf(dev, "Unsupported SFP+ module detected!\n");
551 device_printf(dev, "Unable to initialize the shared code\n");
556 /* Make sure we have a good EEPROM before we read from it */
557 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
558 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
563 error = ixgbe_init_hw(hw);
565 case IXGBE_ERR_EEPROM_VERSION:
566 device_printf(dev, "This device is a pre-production adapter/"
567 "LOM. Please be aware there may be issues associated "
568 "with your hardware.\nIf you are experiencing problems "
569 "please contact your Intel or hardware representative "
570 "who provided you with this hardware.\n");
572 case IXGBE_ERR_SFP_NOT_SUPPORTED:
573 device_printf(dev, "Unsupported SFP+ Module\n");
576 case IXGBE_ERR_SFP_NOT_PRESENT:
577 device_printf(dev, "No SFP+ Module found\n");
583 /* hw.ix defaults init */
584 ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
585 ixgbe_set_flowcntl(adapter, ixgbe_flow_control);
586 adapter->enable_aim = ixgbe_enable_aim;
588 if ((adapter->msix > 1) && (ixgbe_enable_msix))
589 error = ixgbe_allocate_msix(adapter);
591 error = ixgbe_allocate_legacy(adapter);
595 /* Enable the optics for 82599 SFP+ fiber */
596 ixgbe_enable_tx_laser(hw);
598 /* Enable power to the phy. */
599 ixgbe_set_phy_power(hw, TRUE);
601 /* Setup OS specific network interface */
602 if (ixgbe_setup_interface(dev, adapter) != 0)
605 /* Initialize statistics */
606 ixgbe_update_stats_counters(adapter);
608 /* Register for VLAN events */
609 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
610 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
611 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
612 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
614 /* Check PCIE slot type/speed/width */
615 ixgbe_get_slot_info(adapter);
617 /* Set an initial default flow control & dmac value */
618 adapter->fc = ixgbe_fc_full;
620 adapter->eee_enabled = 0;
623 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
624 nvlist_t *pf_schema, *vf_schema;
626 hw->mbx.ops.init_params(hw);
627 pf_schema = pci_iov_schema_alloc_node();
628 vf_schema = pci_iov_schema_alloc_node();
629 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
630 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
631 IOV_SCHEMA_HASDEFAULT, TRUE);
632 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
633 IOV_SCHEMA_HASDEFAULT, FALSE);
634 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
635 IOV_SCHEMA_HASDEFAULT, FALSE);
636 error = pci_iov_attach(dev, pf_schema, vf_schema);
639 "Error %d setting up SR-IOV\n", error);
644 /* Check for certain supported features */
645 ixgbe_check_wol_support(adapter);
648 ixgbe_add_device_sysctls(adapter);
649 ixgbe_add_hw_stats(adapter);
651 /* let hardware know driver is loaded */
652 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
653 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
654 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
657 ixgbe_netmap_attach(adapter);
658 #endif /* DEV_NETMAP */
659 INIT_DEBUGOUT("ixgbe_attach: end");
663 ixgbe_free_transmit_structures(adapter);
664 ixgbe_free_receive_structures(adapter);
666 if (adapter->ifp != NULL)
667 if_free(adapter->ifp);
668 ixgbe_free_pci_resources(adapter);
669 free(adapter->mta, M_DEVBUF);
673 /*********************************************************************
674 * Device removal routine
676 * The detach entry point is called when the driver is being removed.
677 * This routine stops the adapter and deallocates all the resources
678 * that were allocated for driver operation.
680 * return 0 on success, positive on failure
681 *********************************************************************/
684 ixgbe_detach(device_t dev)
686 struct adapter *adapter = device_get_softc(dev);
687 struct ix_queue *que = adapter->queues;
688 struct tx_ring *txr = adapter->tx_rings;
691 INIT_DEBUGOUT("ixgbe_detach: begin");
693 /* Make sure VLANS are not using driver */
694 if (adapter->ifp->if_vlantrunk != NULL) {
695 device_printf(dev,"Vlan in use, detach first\n");
700 if (pci_iov_detach(dev) != 0) {
701 device_printf(dev, "SR-IOV in use; detach first.\n");
706 ether_ifdetach(adapter->ifp);
707 /* Stop the adapter */
708 IXGBE_CORE_LOCK(adapter);
709 ixgbe_setup_low_power_mode(adapter);
710 IXGBE_CORE_UNLOCK(adapter);
712 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
714 #ifndef IXGBE_LEGACY_TX
715 taskqueue_drain(que->tq, &txr->txq_task);
717 taskqueue_drain(que->tq, &que->que_task);
718 taskqueue_free(que->tq);
722 /* Drain the Link queue */
724 taskqueue_drain(adapter->tq, &adapter->link_task);
725 taskqueue_drain(adapter->tq, &adapter->mod_task);
726 taskqueue_drain(adapter->tq, &adapter->msf_task);
728 taskqueue_drain(adapter->tq, &adapter->mbx_task);
730 taskqueue_drain(adapter->tq, &adapter->phy_task);
732 taskqueue_drain(adapter->tq, &adapter->fdir_task);
734 taskqueue_free(adapter->tq);
737 /* let hardware know driver is unloading */
738 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
739 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
740 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
742 /* Unregister VLAN events */
743 if (adapter->vlan_attach != NULL)
744 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
745 if (adapter->vlan_detach != NULL)
746 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
748 callout_drain(&adapter->timer);
750 netmap_detach(adapter->ifp);
751 #endif /* DEV_NETMAP */
752 ixgbe_free_pci_resources(adapter);
753 bus_generic_detach(dev);
754 if_free(adapter->ifp);
756 ixgbe_free_transmit_structures(adapter);
757 ixgbe_free_receive_structures(adapter);
758 free(adapter->mta, M_DEVBUF);
760 IXGBE_CORE_LOCK_DESTROY(adapter);
764 /*********************************************************************
766 * Shutdown entry point
768 **********************************************************************/
771 ixgbe_shutdown(device_t dev)
773 struct adapter *adapter = device_get_softc(dev);
776 INIT_DEBUGOUT("ixgbe_shutdown: begin");
778 IXGBE_CORE_LOCK(adapter);
779 error = ixgbe_setup_low_power_mode(adapter);
780 IXGBE_CORE_UNLOCK(adapter);
786 * Methods for going from:
787 * D0 -> D3: ixgbe_suspend
788 * D3 -> D0: ixgbe_resume
791 ixgbe_suspend(device_t dev)
793 struct adapter *adapter = device_get_softc(dev);
796 INIT_DEBUGOUT("ixgbe_suspend: begin");
798 IXGBE_CORE_LOCK(adapter);
800 error = ixgbe_setup_low_power_mode(adapter);
802 IXGBE_CORE_UNLOCK(adapter);
808 ixgbe_resume(device_t dev)
810 struct adapter *adapter = device_get_softc(dev);
811 struct ifnet *ifp = adapter->ifp;
812 struct ixgbe_hw *hw = &adapter->hw;
815 INIT_DEBUGOUT("ixgbe_resume: begin");
817 IXGBE_CORE_LOCK(adapter);
819 /* Read & clear WUS register */
820 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
822 device_printf(dev, "Woken up by (WUS): %#010x\n",
823 IXGBE_READ_REG(hw, IXGBE_WUS));
824 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
825 /* And clear WUFC until next low-power transition */
826 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
829 * Required after D3->D0 transition;
830 * will re-advertise all previous advertised speeds
832 if (ifp->if_flags & IFF_UP)
833 ixgbe_init_locked(adapter);
835 IXGBE_CORE_UNLOCK(adapter);
841 /*********************************************************************
844 * ixgbe_ioctl is called when the user wants to configure the
847 * return 0 on success, positive on failure
848 **********************************************************************/
851 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
853 struct adapter *adapter = ifp->if_softc;
854 struct ifreq *ifr = (struct ifreq *) data;
855 #if defined(INET) || defined(INET6)
856 struct ifaddr *ifa = (struct ifaddr *)data;
859 bool avoid_reset = FALSE;
865 if (ifa->ifa_addr->sa_family == AF_INET)
869 if (ifa->ifa_addr->sa_family == AF_INET6)
873 ** Calling init results in link renegotiation,
874 ** so we avoid doing it when possible.
877 ifp->if_flags |= IFF_UP;
878 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
881 if (!(ifp->if_flags & IFF_NOARP))
882 arp_ifinit(ifp, ifa);
885 error = ether_ioctl(ifp, command, data);
888 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
889 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
892 IXGBE_CORE_LOCK(adapter);
893 ifp->if_mtu = ifr->ifr_mtu;
894 adapter->max_frame_size =
895 ifp->if_mtu + IXGBE_MTU_HDR;
896 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
897 ixgbe_init_locked(adapter);
899 ixgbe_recalculate_max_frame(adapter);
901 IXGBE_CORE_UNLOCK(adapter);
905 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
906 IXGBE_CORE_LOCK(adapter);
907 if (ifp->if_flags & IFF_UP) {
908 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
909 if ((ifp->if_flags ^ adapter->if_flags) &
910 (IFF_PROMISC | IFF_ALLMULTI)) {
911 ixgbe_set_promisc(adapter);
914 ixgbe_init_locked(adapter);
916 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
918 adapter->if_flags = ifp->if_flags;
919 IXGBE_CORE_UNLOCK(adapter);
923 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
924 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
925 IXGBE_CORE_LOCK(adapter);
926 ixgbe_disable_intr(adapter);
927 ixgbe_set_multi(adapter);
928 ixgbe_enable_intr(adapter);
929 IXGBE_CORE_UNLOCK(adapter);
934 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
935 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
939 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
941 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
945 /* HW cannot turn these on/off separately */
946 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
947 ifp->if_capenable ^= IFCAP_RXCSUM;
948 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
950 if (mask & IFCAP_TXCSUM)
951 ifp->if_capenable ^= IFCAP_TXCSUM;
952 if (mask & IFCAP_TXCSUM_IPV6)
953 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
954 if (mask & IFCAP_TSO4)
955 ifp->if_capenable ^= IFCAP_TSO4;
956 if (mask & IFCAP_TSO6)
957 ifp->if_capenable ^= IFCAP_TSO6;
958 if (mask & IFCAP_LRO)
959 ifp->if_capenable ^= IFCAP_LRO;
960 if (mask & IFCAP_VLAN_HWTAGGING)
961 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
962 if (mask & IFCAP_VLAN_HWFILTER)
963 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
964 if (mask & IFCAP_VLAN_HWTSO)
965 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
967 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
968 IXGBE_CORE_LOCK(adapter);
969 ixgbe_init_locked(adapter);
970 IXGBE_CORE_UNLOCK(adapter);
972 VLAN_CAPABILITIES(ifp);
975 #if __FreeBSD_version >= 1100036
978 struct ixgbe_hw *hw = &adapter->hw;
981 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
982 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
985 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
989 if (i2c.len > sizeof(i2c.data)) {
994 for (i = 0; i < i2c.len; i++)
995 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
996 i2c.dev_addr, &i2c.data[i]);
997 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1002 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
1003 error = ether_ioctl(ifp, command, data);
1011 * Set the various hardware offload abilities.
1013 * This takes the ifnet's if_capenable flags (e.g. set by the user using
1014 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
1015 * mbuf offload flags the driver will understand.
1018 ixgbe_set_if_hwassist(struct adapter *adapter)
1020 struct ifnet *ifp = adapter->ifp;
1021 struct ixgbe_hw *hw = &adapter->hw;
1023 ifp->if_hwassist = 0;
1024 #if __FreeBSD_version >= 1000000
1025 if (ifp->if_capenable & IFCAP_TSO4)
1026 ifp->if_hwassist |= CSUM_IP_TSO;
1027 if (ifp->if_capenable & IFCAP_TSO6)
1028 ifp->if_hwassist |= CSUM_IP6_TSO;
1029 if (ifp->if_capenable & IFCAP_TXCSUM) {
1030 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
1031 if (hw->mac.type != ixgbe_mac_82598EB)
1032 ifp->if_hwassist |= CSUM_IP_SCTP;
1034 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
1035 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
1036 if (hw->mac.type != ixgbe_mac_82598EB)
1037 ifp->if_hwassist |= CSUM_IP6_SCTP;
1040 if (ifp->if_capenable & IFCAP_TSO)
1041 ifp->if_hwassist |= CSUM_TSO;
1042 if (ifp->if_capenable & IFCAP_TXCSUM) {
1043 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1044 if (hw->mac.type != ixgbe_mac_82598EB)
1045 ifp->if_hwassist |= CSUM_SCTP;
1050 /*********************************************************************
1053 * This routine is used in two ways. It is used by the stack as
1054 * init entry point in network interface structure. It is also used
1055 * by the driver as a hw/sw initialization routine to get to a
1058 * return 0 on success, positive on failure
1059 **********************************************************************/
1060 #define IXGBE_MHADD_MFS_SHIFT 16
1063 ixgbe_init_locked(struct adapter *adapter)
1065 struct ifnet *ifp = adapter->ifp;
1066 device_t dev = adapter->dev;
1067 struct ixgbe_hw *hw = &adapter->hw;
1068 struct tx_ring *txr;
1069 struct rx_ring *rxr;
1074 enum ixgbe_iov_mode mode;
1077 mtx_assert(&adapter->core_mtx, MA_OWNED);
1078 INIT_DEBUGOUT("ixgbe_init_locked: begin");
1080 hw->adapter_stopped = FALSE;
1081 ixgbe_stop_adapter(hw);
1082 callout_stop(&adapter->timer);
1085 mode = ixgbe_get_iov_mode(adapter);
1086 adapter->pool = ixgbe_max_vfs(mode);
1087 /* Queue indices may change with IOV mode */
1088 for (int i = 0; i < adapter->num_queues; i++) {
1089 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
1090 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
1093 /* reprogram the RAR[0] in case user changed it. */
1094 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1096 /* Get the latest mac address, User can use a LAA */
1097 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1098 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1099 hw->addr_ctrl.rar_used_count = 1;
1101 /* Set hardware offload abilities from ifnet flags */
1102 ixgbe_set_if_hwassist(adapter);
1104 /* Prepare transmit descriptors and buffers */
1105 if (ixgbe_setup_transmit_structures(adapter)) {
1106 device_printf(dev, "Could not setup transmit structures\n");
1107 ixgbe_stop(adapter);
1113 ixgbe_initialize_iov(adapter);
1115 ixgbe_initialize_transmit_units(adapter);
1117 /* Setup Multicast table */
1118 ixgbe_set_multi(adapter);
1120 /* Determine the correct mbuf pool, based on frame size */
1121 if (adapter->max_frame_size <= MCLBYTES)
1122 adapter->rx_mbuf_sz = MCLBYTES;
1124 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1126 /* Prepare receive descriptors and buffers */
1127 if (ixgbe_setup_receive_structures(adapter)) {
1128 device_printf(dev, "Could not setup receive structures\n");
1129 ixgbe_stop(adapter);
1133 /* Configure RX settings */
1134 ixgbe_initialize_receive_units(adapter);
1136 /* Enable SDP & MSIX interrupts based on adapter */
1137 ixgbe_config_gpie(adapter);
1140 if (ifp->if_mtu > ETHERMTU) {
1141 /* aka IXGBE_MAXFRS on 82599 and newer */
1142 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1143 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1144 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1145 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1148 /* Now enable all the queues */
1149 for (int i = 0; i < adapter->num_queues; i++) {
1150 txr = &adapter->tx_rings[i];
1151 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1152 txdctl |= IXGBE_TXDCTL_ENABLE;
1153 /* Set WTHRESH to 8, burst writeback */
1154 txdctl |= (8 << 16);
1156 * When the internal queue falls below PTHRESH (32),
1157 * start prefetching as long as there are at least
1158 * HTHRESH (1) buffers ready. The values are taken
1159 * from the Intel linux driver 3.8.21.
1160 * Prefetching enables tx line rate even with 1 queue.
1162 txdctl |= (32 << 0) | (1 << 8);
1163 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1166 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1167 rxr = &adapter->rx_rings[i];
1168 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1169 if (hw->mac.type == ixgbe_mac_82598EB) {
1175 rxdctl &= ~0x3FFFFF;
1178 rxdctl |= IXGBE_RXDCTL_ENABLE;
1179 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1180 for (; j < 10; j++) {
1181 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1182 IXGBE_RXDCTL_ENABLE)
1190 * In netmap mode, we must preserve the buffers made
1191 * available to userspace before the if_init()
1192 * (this is true by default on the TX side, because
1193 * init makes all buffers available to userspace).
1195 * netmap_reset() and the device specific routines
1196 * (e.g. ixgbe_setup_receive_rings()) map these
1197 * buffers at the end of the NIC ring, so here we
1198 * must set the RDT (tail) register to make sure
1199 * they are not overwritten.
1201 * In this driver the NIC ring starts at RDH = 0,
1202 * RDT points to the last slot available for reception (?),
1203 * so RDT = num_rx_desc - 1 means the whole ring is available.
1205 if (ifp->if_capenable & IFCAP_NETMAP) {
1206 struct netmap_adapter *na = NA(adapter->ifp);
1207 struct netmap_kring *kring = &na->rx_rings[i];
1208 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1210 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1212 #endif /* DEV_NETMAP */
1213 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
1216 /* Enable Receive engine */
1217 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1218 if (hw->mac.type == ixgbe_mac_82598EB)
1219 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1220 rxctrl |= IXGBE_RXCTRL_RXEN;
1221 ixgbe_enable_rx_dma(hw, rxctrl);
1223 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1225 /* Set up MSI/X routing */
1226 if (ixgbe_enable_msix) {
1227 ixgbe_configure_ivars(adapter);
1228 /* Set up auto-mask */
1229 if (hw->mac.type == ixgbe_mac_82598EB)
1230 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1232 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1233 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1235 } else { /* Simple settings for Legacy/MSI */
1236 ixgbe_set_ivar(adapter, 0, 0, 0);
1237 ixgbe_set_ivar(adapter, 0, 0, 1);
1238 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1242 /* Init Flow director */
1243 if (hw->mac.type != ixgbe_mac_82598EB) {
1244 u32 hdrm = 32 << fdir_pballoc;
1246 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1247 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1252 * Check on any SFP devices that
1253 * need to be kick-started
1255 if (hw->phy.type == ixgbe_phy_none) {
1256 err = hw->phy.ops.identify(hw);
1257 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1259 "Unsupported SFP+ module type was detected.\n");
1264 /* Set moderation on the Link interrupt */
1265 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1267 /* Configure Energy Efficient Ethernet for supported devices */
1268 if (hw->mac.ops.setup_eee) {
1269 err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled);
1271 device_printf(dev, "Error setting up EEE: %d\n", err);
1274 /* Enable power to the phy. */
1275 ixgbe_set_phy_power(hw, TRUE);
1277 /* Config/Enable Link */
1278 ixgbe_config_link(adapter);
1280 /* Hardware Packet Buffer & Flow Control setup */
1281 ixgbe_config_delay_values(adapter);
1283 /* Initialize the FC settings */
1286 /* Set up VLAN support and filter */
1287 ixgbe_setup_vlan_hw_support(adapter);
1289 /* Setup DMA Coalescing */
1290 ixgbe_config_dmac(adapter);
1292 /* And now turn on interrupts */
1293 ixgbe_enable_intr(adapter);
1296 /* Enable the use of the MBX by the VF's */
1298 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1299 reg |= IXGBE_CTRL_EXT_PFRSTD;
1300 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
1304 /* Now inform the stack we're ready */
1305 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1311 ixgbe_init(void *arg)
1313 struct adapter *adapter = arg;
1315 IXGBE_CORE_LOCK(adapter);
1316 ixgbe_init_locked(adapter);
1317 IXGBE_CORE_UNLOCK(adapter);
1322 ixgbe_config_gpie(struct adapter *adapter)
1324 struct ixgbe_hw *hw = &adapter->hw;
1327 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1329 /* Fan Failure Interrupt */
1330 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1331 gpie |= IXGBE_SDP1_GPIEN;
1334 * Module detection (SDP2)
1335 * Media ready (SDP1)
1337 if (hw->mac.type == ixgbe_mac_82599EB) {
1338 gpie |= IXGBE_SDP2_GPIEN;
1339 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
1340 gpie |= IXGBE_SDP1_GPIEN;
1344 * Thermal Failure Detection (X540)
1345 * Link Detection (X552 SFP+, X552/X557-AT)
1347 if (hw->mac.type == ixgbe_mac_X540 ||
1348 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1349 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1350 gpie |= IXGBE_SDP0_GPIEN_X540;
1352 if (adapter->msix > 1) {
1353 /* Enable Enhanced MSIX mode */
1354 gpie |= IXGBE_GPIE_MSIX_MODE;
1355 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1359 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1364 * Requires adapter->max_frame_size to be set.
1367 ixgbe_config_delay_values(struct adapter *adapter)
1369 struct ixgbe_hw *hw = &adapter->hw;
1370 u32 rxpb, frame, size, tmp;
1372 frame = adapter->max_frame_size;
1374 /* Calculate High Water */
1375 switch (hw->mac.type) {
1376 case ixgbe_mac_X540:
1377 case ixgbe_mac_X550:
1378 case ixgbe_mac_X550EM_x:
1379 tmp = IXGBE_DV_X540(frame, frame);
1382 tmp = IXGBE_DV(frame, frame);
1385 size = IXGBE_BT2KB(tmp);
1386 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1387 hw->fc.high_water[0] = rxpb - size;
1389 /* Now calculate Low Water */
1390 switch (hw->mac.type) {
1391 case ixgbe_mac_X540:
1392 case ixgbe_mac_X550:
1393 case ixgbe_mac_X550EM_x:
1394 tmp = IXGBE_LOW_DV_X540(frame);
1397 tmp = IXGBE_LOW_DV(frame);
1400 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1402 hw->fc.requested_mode = adapter->fc;
1403 hw->fc.pause_time = IXGBE_FC_PAUSE;
1404 hw->fc.send_xon = TRUE;
1409 ** MSIX Interrupt Handlers and Tasklets
1414 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1416 struct ixgbe_hw *hw = &adapter->hw;
1417 u64 queue = (u64)(1 << vector);
1420 if (hw->mac.type == ixgbe_mac_82598EB) {
1421 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1422 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1424 mask = (queue & 0xFFFFFFFF);
1426 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1427 mask = (queue >> 32);
1429 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1434 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1436 struct ixgbe_hw *hw = &adapter->hw;
1437 u64 queue = (u64)(1 << vector);
1440 if (hw->mac.type == ixgbe_mac_82598EB) {
1441 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1442 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1444 mask = (queue & 0xFFFFFFFF);
1446 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1447 mask = (queue >> 32);
1449 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1454 ixgbe_handle_que(void *context, int pending)
1456 struct ix_queue *que = context;
1457 struct adapter *adapter = que->adapter;
1458 struct tx_ring *txr = que->txr;
1459 struct ifnet *ifp = adapter->ifp;
1461 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1465 #ifndef IXGBE_LEGACY_TX
1466 if (!drbr_empty(ifp, txr->br))
1467 ixgbe_mq_start_locked(ifp, txr);
1469 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1470 ixgbe_start_locked(txr, ifp);
1472 IXGBE_TX_UNLOCK(txr);
1475 /* Reenable this interrupt */
1476 if (que->res != NULL)
1477 ixgbe_enable_queue(adapter, que->msix);
1479 ixgbe_enable_intr(adapter);
1484 /*********************************************************************
1486 * Legacy Interrupt Service routine
1488 **********************************************************************/
1491 ixgbe_legacy_irq(void *arg)
1493 struct ix_queue *que = arg;
1494 struct adapter *adapter = que->adapter;
1495 struct ixgbe_hw *hw = &adapter->hw;
1496 struct ifnet *ifp = adapter->ifp;
1497 struct tx_ring *txr = adapter->tx_rings;
1502 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1505 if (reg_eicr == 0) {
1506 ixgbe_enable_intr(adapter);
1510 more = ixgbe_rxeof(que);
1514 #ifdef IXGBE_LEGACY_TX
1515 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1516 ixgbe_start_locked(txr, ifp);
1518 if (!drbr_empty(ifp, txr->br))
1519 ixgbe_mq_start_locked(ifp, txr);
1521 IXGBE_TX_UNLOCK(txr);
1523 /* Check for fan failure */
1524 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1525 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1526 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1527 "REPLACE IMMEDIATELY!!\n");
1528 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1531 /* Link status change */
1532 if (reg_eicr & IXGBE_EICR_LSC)
1533 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1535 /* External PHY interrupt */
1536 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1537 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1538 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1541 taskqueue_enqueue(que->tq, &que->que_task);
1543 ixgbe_enable_intr(adapter);
1548 /*********************************************************************
1550 * MSIX Queue Interrupt Service routine
1552 **********************************************************************/
1554 ixgbe_msix_que(void *arg)
1556 struct ix_queue *que = arg;
1557 struct adapter *adapter = que->adapter;
1558 struct ifnet *ifp = adapter->ifp;
1559 struct tx_ring *txr = que->txr;
1560 struct rx_ring *rxr = que->rxr;
1565 /* Protect against spurious interrupts */
1566 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1569 ixgbe_disable_queue(adapter, que->msix);
1572 more = ixgbe_rxeof(que);
1576 #ifdef IXGBE_LEGACY_TX
1577 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1578 ixgbe_start_locked(txr, ifp);
1580 if (!drbr_empty(ifp, txr->br))
1581 ixgbe_mq_start_locked(ifp, txr);
1583 IXGBE_TX_UNLOCK(txr);
1587 if (adapter->enable_aim == FALSE)
1590 ** Do Adaptive Interrupt Moderation:
1591 ** - Write out last calculated setting
1592 ** - Calculate based on average size over
1593 ** the last interval.
1595 if (que->eitr_setting)
1596 IXGBE_WRITE_REG(&adapter->hw,
1597 IXGBE_EITR(que->msix), que->eitr_setting);
1599 que->eitr_setting = 0;
1601 /* Idle, do nothing */
1602 if ((txr->bytes == 0) && (rxr->bytes == 0))
1605 if ((txr->bytes) && (txr->packets))
1606 newitr = txr->bytes/txr->packets;
1607 if ((rxr->bytes) && (rxr->packets))
1608 newitr = max(newitr,
1609 (rxr->bytes / rxr->packets));
1610 newitr += 24; /* account for hardware frame, crc */
1612 /* set an upper boundary */
1613 newitr = min(newitr, 3000);
1615 /* Be nice to the mid range */
1616 if ((newitr > 300) && (newitr < 1200))
1617 newitr = (newitr / 3);
1619 newitr = (newitr / 2);
1621 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1622 newitr |= newitr << 16;
1624 newitr |= IXGBE_EITR_CNT_WDIS;
1626 /* save for next interrupt */
1627 que->eitr_setting = newitr;
1637 taskqueue_enqueue(que->tq, &que->que_task);
1639 ixgbe_enable_queue(adapter, que->msix);
1645 ixgbe_msix_link(void *arg)
1647 struct adapter *adapter = arg;
1648 struct ixgbe_hw *hw = &adapter->hw;
1649 u32 reg_eicr, mod_mask;
1651 ++adapter->link_irq;
1653 /* Pause other interrupts */
1654 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1656 /* First get the cause */
1657 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1658 /* Be sure the queue bits are not cleared */
1659 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1660 /* Clear interrupt with write */
1661 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1663 /* Link status change */
1664 if (reg_eicr & IXGBE_EICR_LSC) {
1665 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1666 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1669 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1671 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1672 /* This is probably overkill :) */
1673 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1675 /* Disable the interrupt */
1676 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1677 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1680 if (reg_eicr & IXGBE_EICR_ECC) {
1681 device_printf(adapter->dev, "CRITICAL: ECC ERROR!! "
1682 "Please Reboot!!\n");
1683 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1686 /* Check for over temp condition */
1687 if (reg_eicr & IXGBE_EICR_TS) {
1688 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! "
1689 "PHY IS SHUT DOWN!!\n");
1690 device_printf(adapter->dev, "System shutdown required!\n");
1691 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1694 if (reg_eicr & IXGBE_EICR_MAILBOX)
1695 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1699 /* Pluggable optics-related interrupt */
1700 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1701 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1703 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1705 if (ixgbe_is_sfp(hw)) {
1706 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1707 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1708 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1709 } else if (reg_eicr & mod_mask) {
1710 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1711 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1715 /* Check for fan failure */
1716 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1717 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1718 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1719 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1720 "REPLACE IMMEDIATELY!!\n");
1723 /* External PHY interrupt */
1724 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1725 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1726 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1727 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1730 /* Re-enable other interrupts */
1731 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1735 /*********************************************************************
1737 * Media Ioctl callback
1739 * This routine is called whenever the user queries the status of
1740 * the interface using ifconfig.
1742 **********************************************************************/
1744 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1746 struct adapter *adapter = ifp->if_softc;
1747 struct ixgbe_hw *hw = &adapter->hw;
1750 INIT_DEBUGOUT("ixgbe_media_status: begin");
1751 IXGBE_CORE_LOCK(adapter);
1752 ixgbe_update_link_status(adapter);
1754 ifmr->ifm_status = IFM_AVALID;
1755 ifmr->ifm_active = IFM_ETHER;
1757 if (!adapter->link_active) {
1758 IXGBE_CORE_UNLOCK(adapter);
1762 ifmr->ifm_status |= IFM_ACTIVE;
1763 layer = adapter->phy_layer;
1765 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1766 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1767 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1768 switch (adapter->link_speed) {
1769 case IXGBE_LINK_SPEED_10GB_FULL:
1770 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1772 case IXGBE_LINK_SPEED_1GB_FULL:
1773 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1775 case IXGBE_LINK_SPEED_100_FULL:
1776 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1779 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1780 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1781 switch (adapter->link_speed) {
1782 case IXGBE_LINK_SPEED_10GB_FULL:
1783 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1786 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1787 switch (adapter->link_speed) {
1788 case IXGBE_LINK_SPEED_10GB_FULL:
1789 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1791 case IXGBE_LINK_SPEED_1GB_FULL:
1792 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1795 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1796 switch (adapter->link_speed) {
1797 case IXGBE_LINK_SPEED_10GB_FULL:
1798 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1800 case IXGBE_LINK_SPEED_1GB_FULL:
1801 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1804 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1805 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1806 switch (adapter->link_speed) {
1807 case IXGBE_LINK_SPEED_10GB_FULL:
1808 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1810 case IXGBE_LINK_SPEED_1GB_FULL:
1811 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1814 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1815 switch (adapter->link_speed) {
1816 case IXGBE_LINK_SPEED_10GB_FULL:
1817 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1821 ** XXX: These need to use the proper media types once
1824 #ifndef IFM_ETH_XTYPE
1825 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1826 switch (adapter->link_speed) {
1827 case IXGBE_LINK_SPEED_10GB_FULL:
1828 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1830 case IXGBE_LINK_SPEED_2_5GB_FULL:
1831 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1833 case IXGBE_LINK_SPEED_1GB_FULL:
1834 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1837 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1838 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1839 switch (adapter->link_speed) {
1840 case IXGBE_LINK_SPEED_10GB_FULL:
1841 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1843 case IXGBE_LINK_SPEED_2_5GB_FULL:
1844 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1846 case IXGBE_LINK_SPEED_1GB_FULL:
1847 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1851 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1852 switch (adapter->link_speed) {
1853 case IXGBE_LINK_SPEED_10GB_FULL:
1854 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
1856 case IXGBE_LINK_SPEED_2_5GB_FULL:
1857 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1859 case IXGBE_LINK_SPEED_1GB_FULL:
1860 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1863 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1864 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1865 switch (adapter->link_speed) {
1866 case IXGBE_LINK_SPEED_10GB_FULL:
1867 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
1869 case IXGBE_LINK_SPEED_2_5GB_FULL:
1870 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1872 case IXGBE_LINK_SPEED_1GB_FULL:
1873 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1878 /* If nothing is recognized... */
1879 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1880 ifmr->ifm_active |= IFM_UNKNOWN;
1882 #if __FreeBSD_version >= 900025
1883 /* Display current flow control setting used on link */
1884 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1885 hw->fc.current_mode == ixgbe_fc_full)
1886 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1887 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1888 hw->fc.current_mode == ixgbe_fc_full)
1889 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1892 IXGBE_CORE_UNLOCK(adapter);
1897 /*********************************************************************
1899 * Media Ioctl callback
1901 * This routine is called when the user changes speed/duplex using
1902 * media/mediopt option with ifconfig.
1904 **********************************************************************/
1906 ixgbe_media_change(struct ifnet * ifp)
1908 struct adapter *adapter = ifp->if_softc;
1909 struct ifmedia *ifm = &adapter->media;
1910 struct ixgbe_hw *hw = &adapter->hw;
1911 ixgbe_link_speed speed = 0;
1913 INIT_DEBUGOUT("ixgbe_media_change: begin");
1915 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1918 if (hw->phy.media_type == ixgbe_media_type_backplane)
1922 ** We don't actually need to check against the supported
1923 ** media types of the adapter; ifmedia will take care of
1926 #ifndef IFM_ETH_XTYPE
1927 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1930 speed |= IXGBE_LINK_SPEED_100_FULL;
1932 case IFM_10G_SR: /* KR, too */
1934 case IFM_10G_CX4: /* KX4 */
1935 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1936 case IFM_10G_TWINAX:
1937 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1940 speed |= IXGBE_LINK_SPEED_100_FULL;
1943 case IFM_1000_CX: /* KX */
1944 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1947 speed |= IXGBE_LINK_SPEED_100_FULL;
1953 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1956 speed |= IXGBE_LINK_SPEED_100_FULL;
1961 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1962 case IFM_10G_TWINAX:
1963 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1966 speed |= IXGBE_LINK_SPEED_100_FULL;
1970 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1973 speed |= IXGBE_LINK_SPEED_100_FULL;
1980 hw->mac.autotry_restart = TRUE;
1981 hw->mac.ops.setup_link(hw, speed, TRUE);
1982 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1983 adapter->advertise = 0;
1985 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
1986 adapter->advertise |= 1 << 2;
1987 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
1988 adapter->advertise |= 1 << 1;
1989 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
1990 adapter->advertise |= 1 << 0;
1996 device_printf(adapter->dev, "Invalid media type!\n");
2001 ixgbe_set_promisc(struct adapter *adapter)
2004 struct ifnet *ifp = adapter->ifp;
2007 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2008 reg_rctl &= (~IXGBE_FCTRL_UPE);
2009 if (ifp->if_flags & IFF_ALLMULTI)
2010 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2012 struct ifmultiaddr *ifma;
2013 #if __FreeBSD_version < 800000
2016 if_maddr_rlock(ifp);
2018 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2019 if (ifma->ifma_addr->sa_family != AF_LINK)
2021 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2025 #if __FreeBSD_version < 800000
2026 IF_ADDR_UNLOCK(ifp);
2028 if_maddr_runlock(ifp);
2031 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2032 reg_rctl &= (~IXGBE_FCTRL_MPE);
2033 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2035 if (ifp->if_flags & IFF_PROMISC) {
2036 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2037 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2038 } else if (ifp->if_flags & IFF_ALLMULTI) {
2039 reg_rctl |= IXGBE_FCTRL_MPE;
2040 reg_rctl &= ~IXGBE_FCTRL_UPE;
2041 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2047 /*********************************************************************
2050 * This routine is called whenever multicast address list is updated.
2052 **********************************************************************/
2053 #define IXGBE_RAR_ENTRIES 16
2056 ixgbe_set_multi(struct adapter *adapter)
2060 struct ifmultiaddr *ifma;
2061 struct ixgbe_mc_addr *mta;
2063 struct ifnet *ifp = adapter->ifp;
2065 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
2068 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
2070 #if __FreeBSD_version < 800000
2073 if_maddr_rlock(ifp);
2075 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2076 if (ifma->ifma_addr->sa_family != AF_LINK)
2078 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2080 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2081 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2082 mta[mcnt].vmdq = adapter->pool;
2085 #if __FreeBSD_version < 800000
2086 IF_ADDR_UNLOCK(ifp);
2088 if_maddr_runlock(ifp);
2091 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2092 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2093 if (ifp->if_flags & IFF_PROMISC)
2094 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2095 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
2096 ifp->if_flags & IFF_ALLMULTI) {
2097 fctrl |= IXGBE_FCTRL_MPE;
2098 fctrl &= ~IXGBE_FCTRL_UPE;
2100 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2102 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2104 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
2105 update_ptr = (u8 *)mta;
2106 ixgbe_update_mc_addr_list(&adapter->hw,
2107 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
2114 * This is an iterator function now needed by the multicast
2115 * shared code. It simply feeds the shared code routine the
2116 * addresses in the array of ixgbe_set_multi() one by one.
2119 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
2121 struct ixgbe_mc_addr *mta;
2123 mta = (struct ixgbe_mc_addr *)*update_ptr;
2126 *update_ptr = (u8*)(mta + 1);
2131 /*********************************************************************
2134 * This routine checks for link status,updates statistics,
2135 * and runs the watchdog check.
2137 **********************************************************************/
2140 ixgbe_local_timer(void *arg)
2142 struct adapter *adapter = arg;
2143 device_t dev = adapter->dev;
2144 struct ix_queue *que = adapter->queues;
2148 mtx_assert(&adapter->core_mtx, MA_OWNED);
2150 /* Check for pluggable optics */
2151 if (adapter->sfp_probe)
2152 if (!ixgbe_sfp_probe(adapter))
2153 goto out; /* Nothing to do */
2155 ixgbe_update_link_status(adapter);
2156 ixgbe_update_stats_counters(adapter);
2159 ** Check the TX queues status
2160 ** - mark hung queues so we don't schedule on them
2161 ** - watchdog only if all queues show hung
2163 for (int i = 0; i < adapter->num_queues; i++, que++) {
2164 /* Keep track of queues with work for soft irq */
2166 queues |= ((u64)1 << que->me);
2168 ** Each time txeof runs without cleaning, but there
2169 ** are uncleaned descriptors it increments busy. If
2170 ** we get to the MAX we declare it hung.
2172 if (que->busy == IXGBE_QUEUE_HUNG) {
2174 /* Mark the queue as inactive */
2175 adapter->active_queues &= ~((u64)1 << que->me);
2178 /* Check if we've come back from hung */
2179 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2180 adapter->active_queues |= ((u64)1 << que->me);
2182 if (que->busy >= IXGBE_MAX_TX_BUSY) {
2183 device_printf(dev,"Warning queue %d "
2184 "appears to be hung!\n", i);
2185 que->txr->busy = IXGBE_QUEUE_HUNG;
2191 /* Only truly watchdog if all queues show hung */
2192 if (hung == adapter->num_queues)
2194 else if (queues != 0) { /* Force an IRQ on queues with work */
2195 ixgbe_rearm_queues(adapter, queues);
2199 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2203 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2204 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2205 adapter->watchdog_events++;
2206 ixgbe_init_locked(adapter);
2211 ** Note: this routine updates the OS on the link state
2212 ** the real check of the hardware only happens with
2213 ** a link interrupt.
2216 ixgbe_update_link_status(struct adapter *adapter)
2218 struct ifnet *ifp = adapter->ifp;
2219 device_t dev = adapter->dev;
2221 if (adapter->link_up){
2222 if (adapter->link_active == FALSE) {
2224 device_printf(dev,"Link is up %d Gbps %s \n",
2225 ((adapter->link_speed == 128)? 10:1),
2227 adapter->link_active = TRUE;
2228 /* Update any Flow Control changes */
2229 ixgbe_fc_enable(&adapter->hw);
2230 /* Update DMA coalescing config */
2231 ixgbe_config_dmac(adapter);
2232 if_link_state_change(ifp, LINK_STATE_UP);
2234 ixgbe_ping_all_vfs(adapter);
2237 } else { /* Link down */
2238 if (adapter->link_active == TRUE) {
2240 device_printf(dev,"Link is Down\n");
2241 if_link_state_change(ifp, LINK_STATE_DOWN);
2242 adapter->link_active = FALSE;
2244 ixgbe_ping_all_vfs(adapter);
2253 /*********************************************************************
2255 * This routine disables all traffic on the adapter by issuing a
2256 * global reset on the MAC and deallocates TX/RX buffers.
2258 **********************************************************************/
2261 ixgbe_stop(void *arg)
2264 struct adapter *adapter = arg;
2265 struct ixgbe_hw *hw = &adapter->hw;
2268 mtx_assert(&adapter->core_mtx, MA_OWNED);
2270 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2271 ixgbe_disable_intr(adapter);
2272 callout_stop(&adapter->timer);
2274 /* Let the stack know...*/
2275 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2278 hw->adapter_stopped = FALSE;
2279 ixgbe_stop_adapter(hw);
2280 if (hw->mac.type == ixgbe_mac_82599EB)
2281 ixgbe_stop_mac_link_on_d3_82599(hw);
2282 /* Turn off the laser - noop with no optics */
2283 ixgbe_disable_tx_laser(hw);
2285 /* Update the stack */
2286 adapter->link_up = FALSE;
2287 ixgbe_update_link_status(adapter);
2289 /* reprogram the RAR[0] in case user changed it. */
2290 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2296 /*********************************************************************
2298 * Determine hardware revision.
2300 **********************************************************************/
2302 ixgbe_identify_hardware(struct adapter *adapter)
2304 device_t dev = adapter->dev;
2305 struct ixgbe_hw *hw = &adapter->hw;
2307 /* Save off the information about this board */
2308 hw->vendor_id = pci_get_vendor(dev);
2309 hw->device_id = pci_get_device(dev);
2310 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2311 hw->subsystem_vendor_id =
2312 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2313 hw->subsystem_device_id =
2314 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2317 ** Make sure BUSMASTER is set
2319 pci_enable_busmaster(dev);
2321 /* We need this here to set the num_segs below */
2322 ixgbe_set_mac_type(hw);
2324 /* Pick up the 82599 settings */
2325 if (hw->mac.type != ixgbe_mac_82598EB) {
2326 hw->phy.smart_speed = ixgbe_smart_speed;
2327 adapter->num_segs = IXGBE_82599_SCATTER;
2329 adapter->num_segs = IXGBE_82598_SCATTER;
2334 /*********************************************************************
2336 * Determine optic type
2338 **********************************************************************/
2340 ixgbe_setup_optics(struct adapter *adapter)
2342 struct ixgbe_hw *hw = &adapter->hw;
2345 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2347 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2348 adapter->optics = IFM_10G_T;
2352 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2353 adapter->optics = IFM_1000_T;
2357 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2358 adapter->optics = IFM_1000_SX;
2362 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2363 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2364 adapter->optics = IFM_10G_LR;
2368 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2369 adapter->optics = IFM_10G_SR;
2373 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2374 adapter->optics = IFM_10G_TWINAX;
2378 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2379 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2380 adapter->optics = IFM_10G_CX4;
2384 /* If we get here just set the default */
2385 adapter->optics = IFM_ETHER | IFM_AUTO;
2389 /*********************************************************************
2391 * Setup the Legacy or MSI Interrupt handler
2393 **********************************************************************/
2395 ixgbe_allocate_legacy(struct adapter *adapter)
2397 device_t dev = adapter->dev;
2398 struct ix_queue *que = adapter->queues;
2399 #ifndef IXGBE_LEGACY_TX
2400 struct tx_ring *txr = adapter->tx_rings;
2405 if (adapter->msix == 1)
2408 /* We allocate a single interrupt resource */
2409 adapter->res = bus_alloc_resource_any(dev,
2410 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2411 if (adapter->res == NULL) {
2412 device_printf(dev, "Unable to allocate bus resource: "
2418 * Try allocating a fast interrupt and the associated deferred
2419 * processing contexts.
2421 #ifndef IXGBE_LEGACY_TX
2422 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2424 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2425 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2426 taskqueue_thread_enqueue, &que->tq);
2427 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2428 device_get_nameunit(adapter->dev));
2430 /* Tasklets for Link, SFP and Multispeed Fiber */
2431 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2432 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2433 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2434 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2436 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2438 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2439 taskqueue_thread_enqueue, &adapter->tq);
2440 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2441 device_get_nameunit(adapter->dev));
2443 if ((error = bus_setup_intr(dev, adapter->res,
2444 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2445 que, &adapter->tag)) != 0) {
2446 device_printf(dev, "Failed to register fast interrupt "
2447 "handler: %d\n", error);
2448 taskqueue_free(que->tq);
2449 taskqueue_free(adapter->tq);
2454 /* For simplicity in the handlers */
2455 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2461 /*********************************************************************
2463 * Setup MSIX Interrupt resources and handlers
2465 **********************************************************************/
2467 ixgbe_allocate_msix(struct adapter *adapter)
2469 device_t dev = adapter->dev;
2470 struct ix_queue *que = adapter->queues;
2471 struct tx_ring *txr = adapter->tx_rings;
2472 int error, rid, vector = 0;
2480 * If we're doing RSS, the number of queues needs to
2481 * match the number of RSS buckets that are configured.
2483 * + If there's more queues than RSS buckets, we'll end
2484 * up with queues that get no traffic.
2486 * + If there's more RSS buckets than queues, we'll end
2487 * up having multiple RSS buckets map to the same queue,
2488 * so there'll be some contention.
2490 if (adapter->num_queues != rss_getnumbuckets()) {
2492 "%s: number of queues (%d) != number of RSS buckets (%d)"
2493 "; performance will be impacted.\n",
2495 adapter->num_queues,
2496 rss_getnumbuckets());
2500 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2502 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2503 RF_SHAREABLE | RF_ACTIVE);
2504 if (que->res == NULL) {
2505 device_printf(dev,"Unable to allocate"
2506 " bus resource: que interrupt [%d]\n", vector);
2509 /* Set the handler function */
2510 error = bus_setup_intr(dev, que->res,
2511 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2512 ixgbe_msix_que, que, &que->tag);
2515 device_printf(dev, "Failed to register QUE handler");
2518 #if __FreeBSD_version >= 800504
2519 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2522 adapter->active_queues |= (u64)(1 << que->msix);
2525 * The queue ID is used as the RSS layer bucket ID.
2526 * We look up the queue ID -> RSS CPU ID and select
2529 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2532 * Bind the msix vector, and thus the
2533 * rings to the corresponding cpu.
2535 * This just happens to match the default RSS round-robin
2536 * bucket -> queue -> CPU allocation.
2538 if (adapter->num_queues > 1)
2541 if (adapter->num_queues > 1)
2542 bus_bind_intr(dev, que->res, cpu_id);
2546 "Bound RSS bucket %d to CPU %d\n",
2550 "Bound queue %d to cpu %d\n",
2553 #endif /* IXGBE_DEBUG */
2556 #ifndef IXGBE_LEGACY_TX
2557 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2559 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2560 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2561 taskqueue_thread_enqueue, &que->tq);
2563 CPU_SETOF(cpu_id, &cpu_mask);
2564 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2567 device_get_nameunit(adapter->dev),
2570 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
2571 device_get_nameunit(adapter->dev), i);
2577 adapter->res = bus_alloc_resource_any(dev,
2578 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2579 if (!adapter->res) {
2580 device_printf(dev,"Unable to allocate"
2581 " bus resource: Link interrupt [%d]\n", rid);
2584 /* Set the link handler function */
2585 error = bus_setup_intr(dev, adapter->res,
2586 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2587 ixgbe_msix_link, adapter, &adapter->tag);
2589 adapter->res = NULL;
2590 device_printf(dev, "Failed to register LINK handler");
2593 #if __FreeBSD_version >= 800504
2594 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2596 adapter->vector = vector;
2597 /* Tasklets for Link, SFP and Multispeed Fiber */
2598 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2599 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2600 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2602 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2604 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2606 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2608 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2609 taskqueue_thread_enqueue, &adapter->tq);
2610 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2611 device_get_nameunit(adapter->dev));
2617 * Setup Either MSI/X or MSI
2620 ixgbe_setup_msix(struct adapter *adapter)
2622 device_t dev = adapter->dev;
2623 int rid, want, queues, msgs;
2625 /* Override by tuneable */
2626 if (ixgbe_enable_msix == 0)
2629 /* First try MSI/X */
2630 msgs = pci_msix_count(dev);
2633 rid = PCIR_BAR(MSIX_82598_BAR);
2634 adapter->msix_mem = bus_alloc_resource_any(dev,
2635 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2636 if (adapter->msix_mem == NULL) {
2637 rid += 4; /* 82599 maps in higher BAR */
2638 adapter->msix_mem = bus_alloc_resource_any(dev,
2639 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2641 if (adapter->msix_mem == NULL) {
2642 /* May not be enabled */
2643 device_printf(adapter->dev,
2644 "Unable to map MSIX table \n");
2648 /* Figure out a reasonable auto config value */
2649 queues = (mp_ncpus > (msgs - 1)) ? (msgs - 1) : mp_ncpus;
2652 /* If we're doing RSS, clamp at the number of RSS buckets */
2653 if (queues > rss_getnumbuckets())
2654 queues = rss_getnumbuckets();
2657 if (ixgbe_num_queues != 0)
2658 queues = ixgbe_num_queues;
2659 /* Set max queues to 8 when autoconfiguring */
2660 else if ((ixgbe_num_queues == 0) && (queues > 8))
2663 /* reflect correct sysctl value */
2664 ixgbe_num_queues = queues;
2667 ** Want one vector (RX/TX pair) per queue
2668 ** plus an additional for Link.
2674 device_printf(adapter->dev,
2675 "MSIX Configuration Problem, "
2676 "%d vectors but %d queues wanted!\n",
2680 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2681 device_printf(adapter->dev,
2682 "Using MSIX interrupts with %d vectors\n", msgs);
2683 adapter->num_queues = queues;
2687 ** If MSIX alloc failed or provided us with
2688 ** less than needed, free and fall through to MSI
2690 pci_release_msi(dev);
2693 if (adapter->msix_mem != NULL) {
2694 bus_release_resource(dev, SYS_RES_MEMORY,
2695 rid, adapter->msix_mem);
2696 adapter->msix_mem = NULL;
2699 if (pci_alloc_msi(dev, &msgs) == 0) {
2700 device_printf(adapter->dev, "Using an MSI interrupt\n");
2703 device_printf(adapter->dev, "Using a Legacy interrupt\n");
2709 ixgbe_allocate_pci_resources(struct adapter *adapter)
2712 device_t dev = adapter->dev;
2715 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2718 if (!(adapter->pci_mem)) {
2719 device_printf(dev, "Unable to allocate bus resource: memory\n");
2723 /* Save bus_space values for READ/WRITE_REG macros */
2724 adapter->osdep.mem_bus_space_tag =
2725 rman_get_bustag(adapter->pci_mem);
2726 adapter->osdep.mem_bus_space_handle =
2727 rman_get_bushandle(adapter->pci_mem);
2728 /* Set hw values for shared code */
2729 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2730 adapter->hw.back = adapter;
2732 /* Default to 1 queue if MSI-X setup fails */
2733 adapter->num_queues = 1;
2736 ** Now setup MSI or MSI-X, should
2737 ** return us the number of supported
2738 ** vectors. (Will be 1 for MSI)
2740 adapter->msix = ixgbe_setup_msix(adapter);
2745 ixgbe_free_pci_resources(struct adapter * adapter)
2747 struct ix_queue *que = adapter->queues;
2748 device_t dev = adapter->dev;
2751 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2752 memrid = PCIR_BAR(MSIX_82598_BAR);
2754 memrid = PCIR_BAR(MSIX_82599_BAR);
2757 ** There is a slight possibility of a failure mode
2758 ** in attach that will result in entering this function
2759 ** before interrupt resources have been initialized, and
2760 ** in that case we do not want to execute the loops below
2761 ** We can detect this reliably by the state of the adapter
2764 if (adapter->res == NULL)
2768 ** Release all msix queue resources:
2770 for (int i = 0; i < adapter->num_queues; i++, que++) {
2771 rid = que->msix + 1;
2772 if (que->tag != NULL) {
2773 bus_teardown_intr(dev, que->res, que->tag);
2776 if (que->res != NULL)
2777 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2781 /* Clean the Legacy or Link interrupt last */
2782 if (adapter->vector) /* we are doing MSIX */
2783 rid = adapter->vector + 1;
2785 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2787 if (adapter->tag != NULL) {
2788 bus_teardown_intr(dev, adapter->res, adapter->tag);
2789 adapter->tag = NULL;
2791 if (adapter->res != NULL)
2792 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2796 pci_release_msi(dev);
2798 if (adapter->msix_mem != NULL)
2799 bus_release_resource(dev, SYS_RES_MEMORY,
2800 memrid, adapter->msix_mem);
2802 if (adapter->pci_mem != NULL)
2803 bus_release_resource(dev, SYS_RES_MEMORY,
2804 PCIR_BAR(0), adapter->pci_mem);
2809 /*********************************************************************
2811 * Setup networking device structure and register an interface.
2813 **********************************************************************/
2815 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2819 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2821 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2823 device_printf(dev, "can not allocate ifnet structure\n");
2826 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2827 ifp->if_baudrate = IF_Gbps(10);
2828 ifp->if_init = ixgbe_init;
2829 ifp->if_softc = adapter;
2830 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2831 ifp->if_ioctl = ixgbe_ioctl;
2832 #if __FreeBSD_version >= 1100036
2833 if_setgetcounterfn(ifp, ixgbe_get_counter);
2835 #if __FreeBSD_version >= 1100045
2836 /* TSO parameters */
2837 ifp->if_hw_tsomax = 65518;
2838 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2839 ifp->if_hw_tsomaxsegsize = 2048;
2841 #ifndef IXGBE_LEGACY_TX
2842 ifp->if_transmit = ixgbe_mq_start;
2843 ifp->if_qflush = ixgbe_qflush;
2845 ifp->if_start = ixgbe_start;
2846 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2847 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2848 IFQ_SET_READY(&ifp->if_snd);
2851 ether_ifattach(ifp, adapter->hw.mac.addr);
2853 adapter->max_frame_size =
2854 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2857 * Tell the upper layer(s) we support long frames.
2859 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2861 /* Set capability flags */
2862 ifp->if_capabilities |= IFCAP_RXCSUM
2869 | IFCAP_VLAN_HWTAGGING
2876 /* Enable the above capabilities by default */
2877 ifp->if_capenable = ifp->if_capabilities;
2880 ** Don't turn this on by default, if vlans are
2881 ** created on another pseudo device (eg. lagg)
2882 ** then vlan events are not passed thru, breaking
2883 ** operation, but with HW FILTER off it works. If
2884 ** using vlans directly on the ixgbe driver you can
2885 ** enable this and get full hardware tag filtering.
2887 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2890 * Specify the media types supported by this adapter and register
2891 * callbacks to update media and link information
2893 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2894 ixgbe_media_status);
2896 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
2897 ixgbe_add_media_types(adapter);
2899 /* Set autoselect media by default */
2900 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2906 ixgbe_add_media_types(struct adapter *adapter)
2908 struct ixgbe_hw *hw = &adapter->hw;
2909 device_t dev = adapter->dev;
2912 layer = adapter->phy_layer;
2914 /* Media types with matching FreeBSD media defines */
2915 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2916 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2917 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2918 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2919 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2920 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2922 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2923 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2924 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2926 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
2927 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2928 if (hw->phy.multispeed_fiber)
2929 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2931 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2932 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2933 if (hw->phy.multispeed_fiber)
2934 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2935 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2936 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2937 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2938 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2940 #ifdef IFM_ETH_XTYPE
2941 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2942 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2943 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
2944 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2945 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2946 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2948 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2949 device_printf(dev, "Media supported: 10GbaseKR\n");
2950 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2951 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2953 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2954 device_printf(dev, "Media supported: 10GbaseKX4\n");
2955 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2956 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2958 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2959 device_printf(dev, "Media supported: 1000baseKX\n");
2960 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2961 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2964 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
2965 device_printf(dev, "Media supported: 1000baseBX\n");
2967 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2968 ifmedia_add(&adapter->media,
2969 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2970 ifmedia_add(&adapter->media,
2971 IFM_ETHER | IFM_1000_T, 0, NULL);
2974 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2978 ixgbe_config_link(struct adapter *adapter)
2980 struct ixgbe_hw *hw = &adapter->hw;
2981 u32 autoneg, err = 0;
2982 bool sfp, negotiate;
2984 sfp = ixgbe_is_sfp(hw);
2987 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2989 if (hw->mac.ops.check_link)
2990 err = ixgbe_check_link(hw, &adapter->link_speed,
2991 &adapter->link_up, FALSE);
2994 autoneg = hw->phy.autoneg_advertised;
2995 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2996 err = hw->mac.ops.get_link_capabilities(hw,
2997 &autoneg, &negotiate);
3000 if (hw->mac.ops.setup_link)
3001 err = hw->mac.ops.setup_link(hw,
3002 autoneg, adapter->link_up);
3009 /*********************************************************************
3011 * Enable transmit units.
3013 **********************************************************************/
3015 ixgbe_initialize_transmit_units(struct adapter *adapter)
3017 struct tx_ring *txr = adapter->tx_rings;
3018 struct ixgbe_hw *hw = &adapter->hw;
3020 /* Setup the Base and Length of the Tx Descriptor Ring */
3021 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3022 u64 tdba = txr->txdma.dma_paddr;
3026 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
3027 (tdba & 0x00000000ffffffffULL));
3028 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
3029 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
3030 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
3032 /* Setup the HW Tx Head and Tail descriptor pointers */
3033 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
3034 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
3036 /* Cache the tail address */
3037 txr->tail = IXGBE_TDT(j);
3039 /* Disable Head Writeback */
3041 * Note: for X550 series devices, these registers are actually
3042 * prefixed with TPH_ isntead of DCA_, but the addresses and
3043 * fields remain the same.
3045 switch (hw->mac.type) {
3046 case ixgbe_mac_82598EB:
3047 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
3050 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
3053 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3054 switch (hw->mac.type) {
3055 case ixgbe_mac_82598EB:
3056 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
3059 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
3065 if (hw->mac.type != ixgbe_mac_82598EB) {
3066 u32 dmatxctl, rttdcs;
3068 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
3070 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3071 dmatxctl |= IXGBE_DMATXCTL_TE;
3072 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3073 /* Disable arbiter to set MTQC */
3074 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3075 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3076 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3078 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
3080 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
3082 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3083 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3090 ixgbe_initialize_rss_mapping(struct adapter *adapter)
3092 struct ixgbe_hw *hw = &adapter->hw;
3093 u32 reta = 0, mrqc, rss_key[10];
3094 int queue_id, table_size, index_mult;
3096 u32 rss_hash_config;
3099 enum ixgbe_iov_mode mode;
3103 /* Fetch the configured RSS key */
3104 rss_getkey((uint8_t *) &rss_key);
3106 /* set up random bits */
3107 arc4rand(&rss_key, sizeof(rss_key), 0);
3110 /* Set multiplier for RETA setup and table size based on MAC */
3113 switch (adapter->hw.mac.type) {
3114 case ixgbe_mac_82598EB:
3117 case ixgbe_mac_X550:
3118 case ixgbe_mac_X550EM_x:
3125 /* Set up the redirection table */
3126 for (int i = 0, j = 0; i < table_size; i++, j++) {
3127 if (j == adapter->num_queues) j = 0;
3130 * Fetch the RSS bucket id for the given indirection entry.
3131 * Cap it at the number of configured buckets (which is
3134 queue_id = rss_get_indirection_to_bucket(i);
3135 queue_id = queue_id % adapter->num_queues;
3137 queue_id = (j * index_mult);
3140 * The low 8 bits are for hash value (n+0);
3141 * The next 8 bits are for hash value (n+1), etc.
3144 reta = reta | ( ((uint32_t) queue_id) << 24);
3147 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3149 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
3154 /* Now fill our hash function seeds */
3155 for (int i = 0; i < 10; i++)
3156 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3158 /* Perform hash on these packet types */
3160 mrqc = IXGBE_MRQC_RSSEN;
3161 rss_hash_config = rss_gethashconfig();
3162 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3163 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3164 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3165 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3166 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3167 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3168 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3169 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3170 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3171 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3172 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3173 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3174 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3175 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3176 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3177 device_printf(adapter->dev,
3178 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
3179 "but not supported\n", __func__);
3180 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3181 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3182 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3183 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3186 * Disable UDP - IP fragments aren't currently being handled
3187 * and so we end up with a mix of 2-tuple and 4-tuple
3190 mrqc = IXGBE_MRQC_RSSEN
3191 | IXGBE_MRQC_RSS_FIELD_IPV4
3192 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3193 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3194 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3195 | IXGBE_MRQC_RSS_FIELD_IPV6
3196 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3200 mode = ixgbe_get_iov_mode(adapter);
3201 mrqc |= ixgbe_get_mrqc(mode);
3203 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3207 /*********************************************************************
3209 * Setup receive registers and features.
3211 **********************************************************************/
3212 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3214 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3217 ixgbe_initialize_receive_units(struct adapter *adapter)
3219 struct rx_ring *rxr = adapter->rx_rings;
3220 struct ixgbe_hw *hw = &adapter->hw;
3221 struct ifnet *ifp = adapter->ifp;
3222 u32 bufsz, fctrl, srrctl, rxcsum;
3226 * Make sure receives are disabled while
3227 * setting up the descriptor ring
3229 ixgbe_disable_rx(hw);
3231 /* Enable broadcasts */
3232 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3233 fctrl |= IXGBE_FCTRL_BAM;
3234 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3235 fctrl |= IXGBE_FCTRL_DPF;
3236 fctrl |= IXGBE_FCTRL_PMCF;
3238 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3240 /* Set for Jumbo Frames? */
3241 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3242 if (ifp->if_mtu > ETHERMTU)
3243 hlreg |= IXGBE_HLREG0_JUMBOEN;
3245 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3247 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
3248 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3249 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3251 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3252 #endif /* DEV_NETMAP */
3253 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3255 bufsz = (adapter->rx_mbuf_sz +
3256 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3258 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3259 u64 rdba = rxr->rxdma.dma_paddr;
3262 /* Setup the Base and Length of the Rx Descriptor Ring */
3263 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3264 (rdba & 0x00000000ffffffffULL));
3265 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3266 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3267 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3269 /* Set up the SRRCTL register */
3270 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3271 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3272 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3274 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3277 * Set DROP_EN iff we have no flow control and >1 queue.
3278 * Note that srrctl was cleared shortly before during reset,
3279 * so we do not need to clear the bit, but do it just in case
3280 * this code is moved elsewhere.
3282 if (adapter->num_queues > 1 &&
3283 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3284 srrctl |= IXGBE_SRRCTL_DROP_EN;
3286 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3289 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3291 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3292 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3293 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3295 /* Set the driver rx tail address */
3296 rxr->tail = IXGBE_RDT(rxr->me);
3299 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3300 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3301 IXGBE_PSRTYPE_UDPHDR |
3302 IXGBE_PSRTYPE_IPV4HDR |
3303 IXGBE_PSRTYPE_IPV6HDR;
3304 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3307 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3309 ixgbe_initialize_rss_mapping(adapter);
3311 if (adapter->num_queues > 1) {
3312 /* RSS and RX IPP Checksum are mutually exclusive */
3313 rxcsum |= IXGBE_RXCSUM_PCSD;
3316 if (ifp->if_capenable & IFCAP_RXCSUM)
3317 rxcsum |= IXGBE_RXCSUM_PCSD;
3319 /* This is useful for calculating UDP/IP fragment checksums */
3320 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3321 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3323 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3330 ** This routine is run via an vlan config EVENT,
3331 ** it enables us to use the HW Filter table since
3332 ** we can get the vlan id. This just creates the
3333 ** entry in the soft version of the VFTA, init will
3334 ** repopulate the real table.
3337 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3339 struct adapter *adapter = ifp->if_softc;
3342 if (ifp->if_softc != arg) /* Not our event */
3345 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3348 IXGBE_CORE_LOCK(adapter);
3349 index = (vtag >> 5) & 0x7F;
3351 adapter->shadow_vfta[index] |= (1 << bit);
3352 ++adapter->num_vlans;
3353 ixgbe_setup_vlan_hw_support(adapter);
3354 IXGBE_CORE_UNLOCK(adapter);
3358 ** This routine is run via an vlan
3359 ** unconfig EVENT, remove our entry
3360 ** in the soft vfta.
3363 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3365 struct adapter *adapter = ifp->if_softc;
3368 if (ifp->if_softc != arg)
3371 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3374 IXGBE_CORE_LOCK(adapter);
3375 index = (vtag >> 5) & 0x7F;
3377 adapter->shadow_vfta[index] &= ~(1 << bit);
3378 --adapter->num_vlans;
3379 /* Re-init to load the changes */
3380 ixgbe_setup_vlan_hw_support(adapter);
3381 IXGBE_CORE_UNLOCK(adapter);
3385 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3387 struct ifnet *ifp = adapter->ifp;
3388 struct ixgbe_hw *hw = &adapter->hw;
3389 struct rx_ring *rxr;
3394 ** We get here thru init_locked, meaning
3395 ** a soft reset, this has already cleared
3396 ** the VFTA and other state, so if there
3397 ** have been no vlan's registered do nothing.
3399 if (adapter->num_vlans == 0)
3402 /* Setup the queues for vlans */
3403 for (int i = 0; i < adapter->num_queues; i++) {
3404 rxr = &adapter->rx_rings[i];
3405 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3406 if (hw->mac.type != ixgbe_mac_82598EB) {
3407 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3408 ctrl |= IXGBE_RXDCTL_VME;
3409 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3411 rxr->vtag_strip = TRUE;
3414 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3417 ** A soft reset zero's out the VFTA, so
3418 ** we need to repopulate it now.
3420 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3421 if (adapter->shadow_vfta[i] != 0)
3422 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3423 adapter->shadow_vfta[i]);
3425 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3426 /* Enable the Filter Table if enabled */
3427 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3428 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3429 ctrl |= IXGBE_VLNCTRL_VFE;
3431 if (hw->mac.type == ixgbe_mac_82598EB)
3432 ctrl |= IXGBE_VLNCTRL_VME;
3433 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3437 ixgbe_enable_intr(struct adapter *adapter)
3439 struct ixgbe_hw *hw = &adapter->hw;
3440 struct ix_queue *que = adapter->queues;
3443 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3444 /* Enable Fan Failure detection */
3445 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3446 mask |= IXGBE_EIMS_GPI_SDP1;
3448 switch (adapter->hw.mac.type) {
3449 case ixgbe_mac_82599EB:
3450 mask |= IXGBE_EIMS_ECC;
3451 /* Temperature sensor on some adapters */
3452 mask |= IXGBE_EIMS_GPI_SDP0;
3453 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3454 mask |= IXGBE_EIMS_GPI_SDP1;
3455 mask |= IXGBE_EIMS_GPI_SDP2;
3457 mask |= IXGBE_EIMS_FLOW_DIR;
3460 mask |= IXGBE_EIMS_MAILBOX;
3463 case ixgbe_mac_X540:
3464 /* Detect if Thermal Sensor is enabled */
3465 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3466 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3467 mask |= IXGBE_EIMS_TS;
3468 mask |= IXGBE_EIMS_ECC;
3470 mask |= IXGBE_EIMS_FLOW_DIR;
3473 case ixgbe_mac_X550:
3474 case ixgbe_mac_X550EM_x:
3475 /* MAC thermal sensor is automatically enabled */
3476 mask |= IXGBE_EIMS_TS;
3477 /* Some devices use SDP0 for important information */
3478 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3479 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3480 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3481 mask |= IXGBE_EIMS_ECC;
3483 mask |= IXGBE_EIMS_FLOW_DIR;
3486 mask |= IXGBE_EIMS_MAILBOX;
3493 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3495 /* With MSI-X we use auto clear */
3496 if (adapter->msix_mem) {
3497 mask = IXGBE_EIMS_ENABLE_MASK;
3498 /* Don't autoclear Link */
3499 mask &= ~IXGBE_EIMS_OTHER;
3500 mask &= ~IXGBE_EIMS_LSC;
3502 mask &= ~IXGBE_EIMS_MAILBOX;
3504 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3508 ** Now enable all queues, this is done separately to
3509 ** allow for handling the extended (beyond 32) MSIX
3510 ** vectors that can be used by 82599
3512 for (int i = 0; i < adapter->num_queues; i++, que++)
3513 ixgbe_enable_queue(adapter, que->msix);
3515 IXGBE_WRITE_FLUSH(hw);
3521 ixgbe_disable_intr(struct adapter *adapter)
3523 if (adapter->msix_mem)
3524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3525 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3526 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3528 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3529 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3530 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3532 IXGBE_WRITE_FLUSH(&adapter->hw);
3537 ** Get the width and transaction speed of
3538 ** the slot this adapter is plugged into.
3541 ixgbe_get_slot_info(struct adapter *adapter)
3543 device_t dev = adapter->dev;
3544 struct ixgbe_hw *hw = &adapter->hw;
3545 struct ixgbe_mac_info *mac = &hw->mac;
3549 /* For most devices simply call the shared code routine */
3550 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3551 ixgbe_get_bus_info(hw);
3552 /* These devices don't use PCI-E */
3553 switch (hw->mac.type) {
3554 case ixgbe_mac_X550EM_x:
3562 ** For the Quad port adapter we need to parse back
3563 ** up the PCI tree to find the speed of the expansion
3564 ** slot into which this adapter is plugged. A bit more work.
3566 dev = device_get_parent(device_get_parent(dev));
3568 device_printf(dev, "parent pcib = %x,%x,%x\n",
3569 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3571 dev = device_get_parent(device_get_parent(dev));
3573 device_printf(dev, "slot pcib = %x,%x,%x\n",
3574 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3576 /* Now get the PCI Express Capabilities offset */
3577 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3578 /* ...and read the Link Status Register */
3579 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3580 switch (link & IXGBE_PCI_LINK_WIDTH) {
3581 case IXGBE_PCI_LINK_WIDTH_1:
3582 hw->bus.width = ixgbe_bus_width_pcie_x1;
3584 case IXGBE_PCI_LINK_WIDTH_2:
3585 hw->bus.width = ixgbe_bus_width_pcie_x2;
3587 case IXGBE_PCI_LINK_WIDTH_4:
3588 hw->bus.width = ixgbe_bus_width_pcie_x4;
3590 case IXGBE_PCI_LINK_WIDTH_8:
3591 hw->bus.width = ixgbe_bus_width_pcie_x8;
3594 hw->bus.width = ixgbe_bus_width_unknown;
3598 switch (link & IXGBE_PCI_LINK_SPEED) {
3599 case IXGBE_PCI_LINK_SPEED_2500:
3600 hw->bus.speed = ixgbe_bus_speed_2500;
3602 case IXGBE_PCI_LINK_SPEED_5000:
3603 hw->bus.speed = ixgbe_bus_speed_5000;
3605 case IXGBE_PCI_LINK_SPEED_8000:
3606 hw->bus.speed = ixgbe_bus_speed_8000;
3609 hw->bus.speed = ixgbe_bus_speed_unknown;
3613 mac->ops.set_lan_id(hw);
3616 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3617 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3618 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3619 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3620 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3621 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3622 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3625 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3626 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3627 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3628 device_printf(dev, "PCI-Express bandwidth available"
3629 " for this card\n is not sufficient for"
3630 " optimal performance.\n");
3631 device_printf(dev, "For optimal performance a x8 "
3632 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3634 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3635 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3636 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3637 device_printf(dev, "PCI-Express bandwidth available"
3638 " for this card\n is not sufficient for"
3639 " optimal performance.\n");
3640 device_printf(dev, "For optimal performance a x8 "
3641 "PCIE Gen3 slot is required.\n");
3649 ** Setup the correct IVAR register for a particular MSIX interrupt
3650 ** (yes this is all very magic and confusing :)
3651 ** - entry is the register array entry
3652 ** - vector is the MSIX vector for this queue
3653 ** - type is RX/TX/MISC
3656 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3658 struct ixgbe_hw *hw = &adapter->hw;
3661 vector |= IXGBE_IVAR_ALLOC_VAL;
3663 switch (hw->mac.type) {
3665 case ixgbe_mac_82598EB:
3667 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3669 entry += (type * 64);
3670 index = (entry >> 2) & 0x1F;
3671 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3672 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3673 ivar |= (vector << (8 * (entry & 0x3)));
3674 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3677 case ixgbe_mac_82599EB:
3678 case ixgbe_mac_X540:
3679 case ixgbe_mac_X550:
3680 case ixgbe_mac_X550EM_x:
3681 if (type == -1) { /* MISC IVAR */
3682 index = (entry & 1) * 8;
3683 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3684 ivar &= ~(0xFF << index);
3685 ivar |= (vector << index);
3686 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3687 } else { /* RX/TX IVARS */
3688 index = (16 * (entry & 1)) + (8 * type);
3689 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3690 ivar &= ~(0xFF << index);
3691 ivar |= (vector << index);
3692 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3701 ixgbe_configure_ivars(struct adapter *adapter)
3703 struct ix_queue *que = adapter->queues;
3706 if (ixgbe_max_interrupt_rate > 0)
3707 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3710 ** Disable DMA coalescing if interrupt moderation is
3717 for (int i = 0; i < adapter->num_queues; i++, que++) {
3718 struct rx_ring *rxr = &adapter->rx_rings[i];
3719 struct tx_ring *txr = &adapter->tx_rings[i];
3720 /* First the RX queue entry */
3721 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3722 /* ... and the TX */
3723 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3724 /* Set an Initial EITR value */
3725 IXGBE_WRITE_REG(&adapter->hw,
3726 IXGBE_EITR(que->msix), newitr);
3729 /* For the Link interrupt */
3730 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3734 ** ixgbe_sfp_probe - called in the local timer to
3735 ** determine if a port had optics inserted.
3738 ixgbe_sfp_probe(struct adapter *adapter)
3740 struct ixgbe_hw *hw = &adapter->hw;
3741 device_t dev = adapter->dev;
3742 bool result = FALSE;
3744 if ((hw->phy.type == ixgbe_phy_nl) &&
3745 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3746 s32 ret = hw->phy.ops.identify_sfp(hw);
3749 ret = hw->phy.ops.reset(hw);
3750 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3751 device_printf(dev, "Unsupported SFP+ module detected!");
3752 device_printf(dev, "Reload driver with supported module.\n");
3753 adapter->sfp_probe = FALSE;
3756 device_printf(dev, "SFP+ module detected!\n");
3757 /* We now have supported optics */
3758 adapter->sfp_probe = FALSE;
3759 /* Set the optics type so system reports correctly */
3760 ixgbe_setup_optics(adapter);
3768 ** Tasklet handler for MSIX Link interrupts
3769 ** - do outside interrupt since it might sleep
3772 ixgbe_handle_link(void *context, int pending)
3774 struct adapter *adapter = context;
3775 struct ixgbe_hw *hw = &adapter->hw;
3777 ixgbe_check_link(hw,
3778 &adapter->link_speed, &adapter->link_up, 0);
3779 ixgbe_update_link_status(adapter);
3781 /* Re-enable link interrupts */
3782 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3786 ** Tasklet for handling SFP module interrupts
3789 ixgbe_handle_mod(void *context, int pending)
3791 struct adapter *adapter = context;
3792 struct ixgbe_hw *hw = &adapter->hw;
3793 enum ixgbe_phy_type orig_type = hw->phy.type;
3794 device_t dev = adapter->dev;
3797 IXGBE_CORE_LOCK(adapter);
3799 /* Check to see if the PHY type changed */
3800 if (hw->phy.ops.identify) {
3801 hw->phy.type = ixgbe_phy_unknown;
3802 hw->phy.ops.identify(hw);
3805 if (hw->phy.type != orig_type) {
3806 device_printf(dev, "Detected phy_type %d\n", hw->phy.type);
3808 if (hw->phy.type == ixgbe_phy_none) {
3809 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
3813 /* Try to do the initialization that was skipped before */
3814 if (hw->phy.ops.init)
3815 hw->phy.ops.init(hw);
3816 if (hw->phy.ops.reset)
3817 hw->phy.ops.reset(hw);
3820 err = hw->phy.ops.identify_sfp(hw);
3821 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3823 "Unsupported SFP+ module type was detected.\n");
3827 err = hw->mac.ops.setup_sfp(hw);
3828 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3830 "Setup failure - unsupported SFP+ module type.\n");
3833 if (hw->phy.multispeed_fiber)
3834 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3836 /* Update media type */
3837 switch (hw->mac.ops.get_media_type(hw)) {
3838 case ixgbe_media_type_fiber:
3839 adapter->optics = IFM_10G_SR;
3841 case ixgbe_media_type_copper:
3842 adapter->optics = IFM_10G_TWINAX;
3844 case ixgbe_media_type_cx4:
3845 adapter->optics = IFM_10G_CX4;
3848 adapter->optics = 0;
3852 IXGBE_CORE_UNLOCK(adapter);
3858 ** Tasklet for handling MSF (multispeed fiber) interrupts
3861 ixgbe_handle_msf(void *context, int pending)
3863 struct adapter *adapter = context;
3864 struct ixgbe_hw *hw = &adapter->hw;
3868 IXGBE_CORE_LOCK(adapter);
3869 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3870 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3872 autoneg = hw->phy.autoneg_advertised;
3873 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3874 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3875 if (hw->mac.ops.setup_link)
3876 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3878 /* Adjust media types shown in ifconfig */
3879 ifmedia_removeall(&adapter->media);
3880 ixgbe_add_media_types(adapter);
3881 IXGBE_CORE_UNLOCK(adapter);
3886 ** Tasklet for handling interrupts from an external PHY
3889 ixgbe_handle_phy(void *context, int pending)
3891 struct adapter *adapter = context;
3892 struct ixgbe_hw *hw = &adapter->hw;
3895 error = hw->phy.ops.handle_lasi(hw);
3896 if (error == IXGBE_ERR_OVERTEMP)
3897 device_printf(adapter->dev,
3898 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3899 " PHY will downshift to lower power state!\n");
3901 device_printf(adapter->dev,
3902 "Error handling LASI interrupt: %d\n",
3909 ** Tasklet for reinitializing the Flow Director filter table
3912 ixgbe_reinit_fdir(void *context, int pending)
3914 struct adapter *adapter = context;
3915 struct ifnet *ifp = adapter->ifp;
3917 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3919 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3920 adapter->fdir_reinit = 0;
3921 /* re-enable flow director interrupts */
3922 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3923 /* Restart the interface */
3924 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3929 /*********************************************************************
3931 * Configure DMA Coalescing
3933 **********************************************************************/
3935 ixgbe_config_dmac(struct adapter *adapter)
3937 struct ixgbe_hw *hw = &adapter->hw;
3938 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3940 if (hw->mac.type < ixgbe_mac_X550 ||
3941 !hw->mac.ops.dmac_config)
3944 if (dcfg->watchdog_timer ^ adapter->dmac ||
3945 dcfg->link_speed ^ adapter->link_speed) {
3946 dcfg->watchdog_timer = adapter->dmac;
3947 dcfg->fcoe_en = false;
3948 dcfg->link_speed = adapter->link_speed;
3951 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3952 dcfg->watchdog_timer, dcfg->link_speed);
3954 hw->mac.ops.dmac_config(hw);
3959 * Checks whether the adapter's ports are capable of
3960 * Wake On LAN by reading the adapter's NVM.
3962 * Sets each port's hw->wol_enabled value depending
3963 * on the value read here.
3966 ixgbe_check_wol_support(struct adapter *adapter)
3968 struct ixgbe_hw *hw = &adapter->hw;
3971 /* Find out WoL support for port */
3972 adapter->wol_support = hw->wol_enabled = 0;
3973 ixgbe_get_device_caps(hw, &dev_caps);
3974 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3975 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3977 adapter->wol_support = hw->wol_enabled = 1;
3979 /* Save initial wake up filter configuration */
3980 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3986 * Prepare the adapter/port for LPLU and/or WoL
3989 ixgbe_setup_low_power_mode(struct adapter *adapter)
3991 struct ixgbe_hw *hw = &adapter->hw;
3992 device_t dev = adapter->dev;
3995 mtx_assert(&adapter->core_mtx, MA_OWNED);
3997 if (!hw->wol_enabled)
3998 ixgbe_set_phy_power(hw, FALSE);
4000 /* Limit power management flow to X550EM baseT */
4001 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
4002 && hw->phy.ops.enter_lplu) {
4003 /* Turn off support for APM wakeup. (Using ACPI instead) */
4004 IXGBE_WRITE_REG(hw, IXGBE_GRC,
4005 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
4008 * Clear Wake Up Status register to prevent any previous wakeup
4009 * events from waking us up immediately after we suspend.
4011 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4014 * Program the Wakeup Filter Control register with user filter
4017 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
4019 /* Enable wakeups and power management in Wakeup Control */
4020 IXGBE_WRITE_REG(hw, IXGBE_WUC,
4021 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
4023 /* X550EM baseT adapters need a special LPLU flow */
4024 hw->phy.reset_disable = true;
4025 ixgbe_stop(adapter);
4026 error = hw->phy.ops.enter_lplu(hw);
4029 "Error entering LPLU: %d\n", error);
4030 hw->phy.reset_disable = false;
4032 /* Just stop for other adapters */
4033 ixgbe_stop(adapter);
4039 /**********************************************************************
4041 * Update the board statistics counters.
4043 **********************************************************************/
4045 ixgbe_update_stats_counters(struct adapter *adapter)
4047 struct ixgbe_hw *hw = &adapter->hw;
4048 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4049 u64 total_missed_rx = 0;
4051 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4052 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4053 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4054 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4056 for (int i = 0; i < 16; i++) {
4057 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4058 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4059 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4061 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4062 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4063 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4065 /* Hardware workaround, gprc counts missed packets */
4066 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4067 adapter->stats.pf.gprc -= missed_rx;
4069 if (hw->mac.type != ixgbe_mac_82598EB) {
4070 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4071 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4072 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4073 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4074 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4075 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4076 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4077 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4079 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4080 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4081 /* 82598 only has a counter in the high register */
4082 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4083 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4084 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4088 * Workaround: mprc hardware is incorrectly counting
4089 * broadcasts, so for now we subtract those.
4091 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4092 adapter->stats.pf.bprc += bprc;
4093 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4094 if (hw->mac.type == ixgbe_mac_82598EB)
4095 adapter->stats.pf.mprc -= bprc;
4097 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4098 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4099 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4100 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4101 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4102 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4104 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4105 adapter->stats.pf.lxontxc += lxon;
4106 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4107 adapter->stats.pf.lxofftxc += lxoff;
4108 total = lxon + lxoff;
4110 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4111 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4112 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4113 adapter->stats.pf.gptc -= total;
4114 adapter->stats.pf.mptc -= total;
4115 adapter->stats.pf.ptc64 -= total;
4116 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
4118 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4119 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4120 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4121 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4122 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4123 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4124 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4125 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4126 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4127 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4128 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4129 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4130 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4131 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4132 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4133 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4134 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4135 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4136 /* Only read FCOE on 82599 */
4137 if (hw->mac.type != ixgbe_mac_82598EB) {
4138 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4139 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4140 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4141 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4142 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4145 /* Fill out the OS statistics structure */
4146 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
4147 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
4148 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
4149 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
4150 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
4151 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
4152 IXGBE_SET_COLLISIONS(adapter, 0);
4153 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
4154 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
4155 + adapter->stats.pf.rlec);
4158 #if __FreeBSD_version >= 1100036
4160 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
4162 struct adapter *adapter;
4163 struct tx_ring *txr;
4166 adapter = if_getsoftc(ifp);
4169 case IFCOUNTER_IPACKETS:
4170 return (adapter->ipackets);
4171 case IFCOUNTER_OPACKETS:
4172 return (adapter->opackets);
4173 case IFCOUNTER_IBYTES:
4174 return (adapter->ibytes);
4175 case IFCOUNTER_OBYTES:
4176 return (adapter->obytes);
4177 case IFCOUNTER_IMCASTS:
4178 return (adapter->imcasts);
4179 case IFCOUNTER_OMCASTS:
4180 return (adapter->omcasts);
4181 case IFCOUNTER_COLLISIONS:
4183 case IFCOUNTER_IQDROPS:
4184 return (adapter->iqdrops);
4185 case IFCOUNTER_OQDROPS:
4187 txr = adapter->tx_rings;
4188 for (int i = 0; i < adapter->num_queues; i++, txr++)
4189 rv += txr->br->br_drops;
4191 case IFCOUNTER_IERRORS:
4192 return (adapter->ierrors);
4194 return (if_get_counter_default(ifp, cnt));
4199 /** ixgbe_sysctl_tdh_handler - Handler function
4200 * Retrieves the TDH value from the hardware
4203 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4207 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4210 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4211 error = sysctl_handle_int(oidp, &val, 0, req);
4212 if (error || !req->newptr)
4217 /** ixgbe_sysctl_tdt_handler - Handler function
4218 * Retrieves the TDT value from the hardware
4221 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4225 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4228 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4229 error = sysctl_handle_int(oidp, &val, 0, req);
4230 if (error || !req->newptr)
4235 /** ixgbe_sysctl_rdh_handler - Handler function
4236 * Retrieves the RDH value from the hardware
4239 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4243 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4246 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4247 error = sysctl_handle_int(oidp, &val, 0, req);
4248 if (error || !req->newptr)
4253 /** ixgbe_sysctl_rdt_handler - Handler function
4254 * Retrieves the RDT value from the hardware
4257 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4261 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4264 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4265 error = sysctl_handle_int(oidp, &val, 0, req);
4266 if (error || !req->newptr)
4272 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4275 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4276 unsigned int reg, usec, rate;
4278 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4279 usec = ((reg & 0x0FF8) >> 3);
4281 rate = 500000 / usec;
4284 error = sysctl_handle_int(oidp, &rate, 0, req);
4285 if (error || !req->newptr)
4287 reg &= ~0xfff; /* default, no limitation */
4288 ixgbe_max_interrupt_rate = 0;
4289 if (rate > 0 && rate < 500000) {
4292 ixgbe_max_interrupt_rate = rate;
4293 reg |= ((4000000/rate) & 0xff8 );
4295 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4300 ixgbe_add_device_sysctls(struct adapter *adapter)
4302 device_t dev = adapter->dev;
4303 struct ixgbe_hw *hw = &adapter->hw;
4304 struct sysctl_oid_list *child;
4305 struct sysctl_ctx_list *ctx;
4307 ctx = device_get_sysctl_ctx(dev);
4308 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4310 /* Sysctls for all devices */
4311 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
4312 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4313 ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4315 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
4317 &ixgbe_enable_aim, 1, "Interrupt Moderation");
4319 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4320 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4321 ixgbe_sysctl_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
4323 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
4324 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4325 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
4328 /* testing sysctls (for all devices) */
4329 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
4330 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4331 ixgbe_sysctl_power_state, "I", "PCI Power State");
4333 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
4334 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
4335 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
4337 /* for X550 series devices */
4338 if (hw->mac.type >= ixgbe_mac_X550)
4339 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4340 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4341 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
4343 /* for X552 backplane devices */
4344 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
4345 struct sysctl_oid *eee_node;
4346 struct sysctl_oid_list *eee_list;
4348 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4350 "Energy Efficient Ethernet sysctls");
4351 eee_list = SYSCTL_CHILDREN(eee_node);
4353 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4354 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4355 ixgbe_sysctl_eee_enable, "I",
4356 "Enable or Disable EEE");
4358 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4359 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4360 ixgbe_sysctl_eee_negotiated, "I",
4361 "EEE negotiated on link");
4363 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4364 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4365 ixgbe_sysctl_eee_tx_lpi_status, "I",
4366 "Whether or not TX link is in LPI state");
4368 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4369 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4370 ixgbe_sysctl_eee_rx_lpi_status, "I",
4371 "Whether or not RX link is in LPI state");
4373 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_delay",
4374 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4375 ixgbe_sysctl_eee_tx_lpi_delay, "I",
4376 "TX LPI entry delay in microseconds");
4379 /* for WoL-capable devices */
4380 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4381 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4382 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4383 ixgbe_sysctl_wol_enable, "I",
4384 "Enable/Disable Wake on LAN");
4386 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4387 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4388 ixgbe_sysctl_wufc, "I",
4389 "Enable/Disable Wake Up Filters");
4392 /* for X552/X557-AT devices */
4393 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4394 struct sysctl_oid *phy_node;
4395 struct sysctl_oid_list *phy_list;
4397 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4399 "External PHY sysctls");
4400 phy_list = SYSCTL_CHILDREN(phy_node);
4402 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4403 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4404 ixgbe_sysctl_phy_temp, "I",
4405 "Current External PHY Temperature (Celsius)");
4407 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4408 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4409 ixgbe_sysctl_phy_overtemp_occurred, "I",
4410 "External PHY High Temperature Event Occurred");
4415 * Add sysctl variables, one per statistic, to the system.
4418 ixgbe_add_hw_stats(struct adapter *adapter)
4420 device_t dev = adapter->dev;
4422 struct tx_ring *txr = adapter->tx_rings;
4423 struct rx_ring *rxr = adapter->rx_rings;
4425 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4426 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4427 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4428 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4430 struct sysctl_oid *stat_node, *queue_node;
4431 struct sysctl_oid_list *stat_list, *queue_list;
4433 #define QUEUE_NAME_LEN 32
4434 char namebuf[QUEUE_NAME_LEN];
4436 /* Driver Statistics */
4437 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4438 CTLFLAG_RD, &adapter->dropped_pkts,
4439 "Driver dropped packets");
4440 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4441 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4442 "m_defrag() failed");
4443 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4444 CTLFLAG_RD, &adapter->watchdog_events,
4445 "Watchdog timeouts");
4446 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4447 CTLFLAG_RD, &adapter->link_irq,
4448 "Link MSIX IRQ Handled");
4450 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4451 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4452 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4453 CTLFLAG_RD, NULL, "Queue Name");
4454 queue_list = SYSCTL_CHILDREN(queue_node);
4456 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4457 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4458 sizeof(&adapter->queues[i]),
4459 ixgbe_sysctl_interrupt_rate_handler, "IU",
4461 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4462 CTLFLAG_RD, &(adapter->queues[i].irqs),
4463 "irqs on this queue");
4464 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4465 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4466 ixgbe_sysctl_tdh_handler, "IU",
4467 "Transmit Descriptor Head");
4468 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4469 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4470 ixgbe_sysctl_tdt_handler, "IU",
4471 "Transmit Descriptor Tail");
4472 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4473 CTLFLAG_RD, &txr->tso_tx,
4475 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4476 CTLFLAG_RD, &txr->no_tx_dma_setup,
4477 "Driver tx dma failure in xmit");
4478 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4479 CTLFLAG_RD, &txr->no_desc_avail,
4480 "Queue No Descriptor Available");
4481 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4482 CTLFLAG_RD, &txr->total_packets,
4483 "Queue Packets Transmitted");
4484 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4485 CTLFLAG_RD, &txr->br->br_drops,
4486 "Packets dropped in buf_ring");
4489 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4490 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4491 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4492 CTLFLAG_RD, NULL, "Queue Name");
4493 queue_list = SYSCTL_CHILDREN(queue_node);
4495 struct lro_ctrl *lro = &rxr->lro;
4497 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4498 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4499 CTLFLAG_RD, NULL, "Queue Name");
4500 queue_list = SYSCTL_CHILDREN(queue_node);
4502 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4503 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4504 ixgbe_sysctl_rdh_handler, "IU",
4505 "Receive Descriptor Head");
4506 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4507 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4508 ixgbe_sysctl_rdt_handler, "IU",
4509 "Receive Descriptor Tail");
4510 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4511 CTLFLAG_RD, &rxr->rx_packets,
4512 "Queue Packets Received");
4513 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4514 CTLFLAG_RD, &rxr->rx_bytes,
4515 "Queue Bytes Received");
4516 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4517 CTLFLAG_RD, &rxr->rx_copies,
4518 "Copied RX Frames");
4519 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
4520 CTLFLAG_RD, &lro->lro_queued, 0,
4522 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
4523 CTLFLAG_RD, &lro->lro_flushed, 0,
4527 /* MAC stats get the own sub node */
4529 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4530 CTLFLAG_RD, NULL, "MAC Statistics");
4531 stat_list = SYSCTL_CHILDREN(stat_node);
4533 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4534 CTLFLAG_RD, &stats->crcerrs,
4536 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4537 CTLFLAG_RD, &stats->illerrc,
4538 "Illegal Byte Errors");
4539 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4540 CTLFLAG_RD, &stats->errbc,
4542 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4543 CTLFLAG_RD, &stats->mspdc,
4544 "MAC Short Packets Discarded");
4545 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4546 CTLFLAG_RD, &stats->mlfc,
4547 "MAC Local Faults");
4548 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4549 CTLFLAG_RD, &stats->mrfc,
4550 "MAC Remote Faults");
4551 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4552 CTLFLAG_RD, &stats->rlec,
4553 "Receive Length Errors");
4555 /* Flow Control stats */
4556 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4557 CTLFLAG_RD, &stats->lxontxc,
4558 "Link XON Transmitted");
4559 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4560 CTLFLAG_RD, &stats->lxonrxc,
4561 "Link XON Received");
4562 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4563 CTLFLAG_RD, &stats->lxofftxc,
4564 "Link XOFF Transmitted");
4565 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4566 CTLFLAG_RD, &stats->lxoffrxc,
4567 "Link XOFF Received");
4569 /* Packet Reception Stats */
4570 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4571 CTLFLAG_RD, &stats->tor,
4572 "Total Octets Received");
4573 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4574 CTLFLAG_RD, &stats->gorc,
4575 "Good Octets Received");
4576 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4577 CTLFLAG_RD, &stats->tpr,
4578 "Total Packets Received");
4579 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4580 CTLFLAG_RD, &stats->gprc,
4581 "Good Packets Received");
4582 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4583 CTLFLAG_RD, &stats->mprc,
4584 "Multicast Packets Received");
4585 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4586 CTLFLAG_RD, &stats->bprc,
4587 "Broadcast Packets Received");
4588 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4589 CTLFLAG_RD, &stats->prc64,
4590 "64 byte frames received ");
4591 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4592 CTLFLAG_RD, &stats->prc127,
4593 "65-127 byte frames received");
4594 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4595 CTLFLAG_RD, &stats->prc255,
4596 "128-255 byte frames received");
4597 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4598 CTLFLAG_RD, &stats->prc511,
4599 "256-511 byte frames received");
4600 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4601 CTLFLAG_RD, &stats->prc1023,
4602 "512-1023 byte frames received");
4603 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4604 CTLFLAG_RD, &stats->prc1522,
4605 "1023-1522 byte frames received");
4606 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4607 CTLFLAG_RD, &stats->ruc,
4608 "Receive Undersized");
4609 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4610 CTLFLAG_RD, &stats->rfc,
4611 "Fragmented Packets Received ");
4612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4613 CTLFLAG_RD, &stats->roc,
4614 "Oversized Packets Received");
4615 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4616 CTLFLAG_RD, &stats->rjc,
4618 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4619 CTLFLAG_RD, &stats->mngprc,
4620 "Management Packets Received");
4621 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4622 CTLFLAG_RD, &stats->mngptc,
4623 "Management Packets Dropped");
4624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4625 CTLFLAG_RD, &stats->xec,
4628 /* Packet Transmission Stats */
4629 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4630 CTLFLAG_RD, &stats->gotc,
4631 "Good Octets Transmitted");
4632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4633 CTLFLAG_RD, &stats->tpt,
4634 "Total Packets Transmitted");
4635 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4636 CTLFLAG_RD, &stats->gptc,
4637 "Good Packets Transmitted");
4638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4639 CTLFLAG_RD, &stats->bptc,
4640 "Broadcast Packets Transmitted");
4641 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4642 CTLFLAG_RD, &stats->mptc,
4643 "Multicast Packets Transmitted");
4644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4645 CTLFLAG_RD, &stats->mngptc,
4646 "Management Packets Transmitted");
4647 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4648 CTLFLAG_RD, &stats->ptc64,
4649 "64 byte frames transmitted ");
4650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4651 CTLFLAG_RD, &stats->ptc127,
4652 "65-127 byte frames transmitted");
4653 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4654 CTLFLAG_RD, &stats->ptc255,
4655 "128-255 byte frames transmitted");
4656 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4657 CTLFLAG_RD, &stats->ptc511,
4658 "256-511 byte frames transmitted");
4659 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4660 CTLFLAG_RD, &stats->ptc1023,
4661 "512-1023 byte frames transmitted");
4662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4663 CTLFLAG_RD, &stats->ptc1522,
4664 "1024-1522 byte frames transmitted");
4668 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4669 const char *description, int *limit, int value)
4672 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4673 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4674 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4678 ** Set flow control using sysctl:
4679 ** Flow control values:
4686 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4689 struct adapter *adapter;
4691 adapter = (struct adapter *) arg1;
4694 error = sysctl_handle_int(oidp, &fc, 0, req);
4695 if ((error) || (req->newptr == NULL))
4698 /* Don't bother if it's not changed */
4699 if (adapter->fc == fc)
4702 return ixgbe_set_flowcntl(adapter, fc);
4707 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4711 case ixgbe_fc_rx_pause:
4712 case ixgbe_fc_tx_pause:
4714 adapter->hw.fc.requested_mode = adapter->fc;
4715 if (adapter->num_queues > 1)
4716 ixgbe_disable_rx_drop(adapter);
4719 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4720 if (adapter->num_queues > 1)
4721 ixgbe_enable_rx_drop(adapter);
4727 /* Don't autoneg if forcing a value */
4728 adapter->hw.fc.disable_fc_autoneg = TRUE;
4729 ixgbe_fc_enable(&adapter->hw);
4734 ** Control advertised link speed:
4736 ** 0x1 - advertise 100 Mb
4737 ** 0x2 - advertise 1G
4738 ** 0x4 - advertise 10G
4741 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4743 int error, advertise;
4744 struct adapter *adapter;
4746 adapter = (struct adapter *) arg1;
4747 advertise = adapter->advertise;
4749 error = sysctl_handle_int(oidp, &advertise, 0, req);
4750 if ((error) || (req->newptr == NULL))
4753 return ixgbe_set_advertise(adapter, advertise);
4757 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4760 struct ixgbe_hw *hw;
4761 ixgbe_link_speed speed;
4763 /* Checks to validate new value */
4764 if (adapter->advertise == advertise) /* no change */
4770 /* No speed changes for backplane media */
4771 if (hw->phy.media_type == ixgbe_media_type_backplane)
4774 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4775 (hw->phy.multispeed_fiber))) {
4777 "Advertised speed can only be set on copper or "
4778 "multispeed fiber media types.\n");
4782 if (advertise < 0x1 || advertise > 0x7) {
4784 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4788 if ((advertise & 0x1)
4789 && (hw->mac.type != ixgbe_mac_X540)
4790 && (hw->mac.type != ixgbe_mac_X550)) {
4791 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4795 /* Set new value and report new advertised mode */
4797 if (advertise & 0x1)
4798 speed |= IXGBE_LINK_SPEED_100_FULL;
4799 if (advertise & 0x2)
4800 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4801 if (advertise & 0x4)
4802 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4803 adapter->advertise = advertise;
4805 hw->mac.autotry_restart = TRUE;
4806 hw->mac.ops.setup_link(hw, speed, TRUE);
4812 * The following two sysctls are for X552/X557-AT devices;
4813 * they deal with the external PHY used in them.
4816 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4818 struct adapter *adapter = (struct adapter *) arg1;
4819 struct ixgbe_hw *hw = &adapter->hw;
4822 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4823 device_printf(adapter->dev,
4824 "Device has no supported external thermal sensor.\n");
4828 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4829 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4831 device_printf(adapter->dev,
4832 "Error reading from PHY's current temperature register\n");
4836 /* Shift temp for output */
4839 return (sysctl_handle_int(oidp, NULL, reg, req));
4843 * Reports whether the current PHY temperature is over
4844 * the overtemp threshold.
4845 * - This is reported directly from the PHY
4848 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4850 struct adapter *adapter = (struct adapter *) arg1;
4851 struct ixgbe_hw *hw = &adapter->hw;
4854 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4855 device_printf(adapter->dev,
4856 "Device has no supported external thermal sensor.\n");
4860 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4861 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4863 device_printf(adapter->dev,
4864 "Error reading from PHY's temperature status register\n");
4868 /* Get occurrence bit */
4869 reg = !!(reg & 0x4000);
4870 return (sysctl_handle_int(oidp, 0, reg, req));
4874 ** Thermal Shutdown Trigger (internal MAC)
4875 ** - Set this to 1 to cause an overtemp event to occur
4878 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4880 struct adapter *adapter = (struct adapter *) arg1;
4881 struct ixgbe_hw *hw = &adapter->hw;
4882 int error, fire = 0;
4884 error = sysctl_handle_int(oidp, &fire, 0, req);
4885 if ((error) || (req->newptr == NULL))
4889 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4890 reg |= IXGBE_EICR_TS;
4891 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4898 ** Manage DMA Coalescing.
4900 ** 0/1 - off / on (use default value of 1000)
4902 ** Legal timer values are:
4903 ** 50,100,250,500,1000,2000,5000,10000
4905 ** Turning off interrupt moderation will also turn this off.
4908 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4910 struct adapter *adapter = (struct adapter *) arg1;
4911 struct ifnet *ifp = adapter->ifp;
4915 newval = adapter->dmac;
4916 error = sysctl_handle_int(oidp, &newval, 0, req);
4917 if ((error) || (req->newptr == NULL))
4926 /* Enable and use default */
4927 adapter->dmac = 1000;
4937 /* Legal values - allow */
4938 adapter->dmac = newval;
4941 /* Do nothing, illegal value */
4945 /* Re-initialize hardware if it's already running */
4946 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4947 ixgbe_init(adapter);
4954 * Sysctl to test power states
4956 * 0 - set device to D0
4957 * 3 - set device to D3
4958 * (none) - get current device power state
4961 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4963 struct adapter *adapter = (struct adapter *) arg1;
4964 device_t dev = adapter->dev;
4965 int curr_ps, new_ps, error = 0;
4967 curr_ps = new_ps = pci_get_powerstate(dev);
4969 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4970 if ((error) || (req->newptr == NULL))
4973 if (new_ps == curr_ps)
4976 if (new_ps == 3 && curr_ps == 0)
4977 error = DEVICE_SUSPEND(dev);
4978 else if (new_ps == 0 && curr_ps == 3)
4979 error = DEVICE_RESUME(dev);
4983 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4989 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4995 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4997 struct adapter *adapter = (struct adapter *) arg1;
4998 struct ixgbe_hw *hw = &adapter->hw;
4999 int new_wol_enabled;
5002 new_wol_enabled = hw->wol_enabled;
5003 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
5004 if ((error) || (req->newptr == NULL))
5006 new_wol_enabled = !!(new_wol_enabled);
5007 if (new_wol_enabled == hw->wol_enabled)
5010 if (new_wol_enabled > 0 && !adapter->wol_support)
5013 hw->wol_enabled = new_wol_enabled;
5019 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
5020 * if supported by the adapter.
5026 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
5028 struct adapter *adapter = (struct adapter *) arg1;
5029 struct ixgbe_hw *hw = &adapter->hw;
5030 struct ifnet *ifp = adapter->ifp;
5031 int new_eee_enabled, error = 0;
5033 new_eee_enabled = adapter->eee_enabled;
5034 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
5035 if ((error) || (req->newptr == NULL))
5037 new_eee_enabled = !!(new_eee_enabled);
5038 if (new_eee_enabled == adapter->eee_enabled)
5041 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
5044 adapter->eee_enabled = new_eee_enabled;
5046 /* Re-initialize hardware if it's already running */
5047 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5048 ixgbe_init(adapter);
5054 * Read-only sysctl indicating whether EEE support was negotiated
5058 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
5060 struct adapter *adapter = (struct adapter *) arg1;
5061 struct ixgbe_hw *hw = &adapter->hw;
5064 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
5066 return (sysctl_handle_int(oidp, 0, status, req));
5070 * Read-only sysctl indicating whether RX Link is in LPI state.
5073 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
5075 struct adapter *adapter = (struct adapter *) arg1;
5076 struct ixgbe_hw *hw = &adapter->hw;
5079 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5080 IXGBE_EEE_RX_LPI_STATUS);
5082 return (sysctl_handle_int(oidp, 0, status, req));
5086 * Read-only sysctl indicating whether TX Link is in LPI state.
5089 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
5091 struct adapter *adapter = (struct adapter *) arg1;
5092 struct ixgbe_hw *hw = &adapter->hw;
5095 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5096 IXGBE_EEE_TX_LPI_STATUS);
5098 return (sysctl_handle_int(oidp, 0, status, req));
5102 * Read-only sysctl indicating TX Link LPI delay
5105 ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS)
5107 struct adapter *adapter = (struct adapter *) arg1;
5108 struct ixgbe_hw *hw = &adapter->hw;
5111 reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU);
5113 return (sysctl_handle_int(oidp, 0, reg >> 26, req));
5117 * Sysctl to enable/disable the types of packets that the
5118 * adapter will wake up on upon receipt.
5119 * WUFC - Wake Up Filter Control
5121 * 0x1 - Link Status Change
5122 * 0x2 - Magic Packet
5123 * 0x4 - Direct Exact
5124 * 0x8 - Directed Multicast
5126 * 0x20 - ARP/IPv4 Request Packet
5127 * 0x40 - Direct IPv4 Packet
5128 * 0x80 - Direct IPv6 Packet
5130 * Setting another flag will cause the sysctl to return an
5134 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5136 struct adapter *adapter = (struct adapter *) arg1;
5140 new_wufc = adapter->wufc;
5142 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
5143 if ((error) || (req->newptr == NULL))
5145 if (new_wufc == adapter->wufc)
5148 if (new_wufc & 0xffffff00)
5152 new_wufc |= (0xffffff & adapter->wufc);
5153 adapter->wufc = new_wufc;
5161 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5163 struct adapter *adapter = (struct adapter *)arg1;
5164 struct ixgbe_hw *hw = &adapter->hw;
5165 device_t dev = adapter->dev;
5166 int error = 0, reta_size;
5170 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5172 device_printf(dev, "Could not allocate sbuf for output.\n");
5176 // TODO: use sbufs to make a string to print out
5177 /* Set multiplier for RETA setup and table size based on MAC */
5178 switch (adapter->hw.mac.type) {
5179 case ixgbe_mac_X550:
5180 case ixgbe_mac_X550EM_x:
5188 /* Print out the redirection table */
5189 sbuf_cat(buf, "\n");
5190 for (int i = 0; i < reta_size; i++) {
5192 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5193 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5195 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5196 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5200 // TODO: print more config
5202 error = sbuf_finish(buf);
5204 device_printf(dev, "Error finishing sbuf: %d\n", error);
5209 #endif /* IXGBE_DEBUG */
5212 ** Enable the hardware to drop packets when the buffer is
5213 ** full. This is useful when multiqueue,so that no single
5214 ** queue being full stalls the entire RX engine. We only
5215 ** enable this when Multiqueue AND when Flow Control is
5219 ixgbe_enable_rx_drop(struct adapter *adapter)
5221 struct ixgbe_hw *hw = &adapter->hw;
5223 for (int i = 0; i < adapter->num_queues; i++) {
5224 struct rx_ring *rxr = &adapter->rx_rings[i];
5225 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5226 srrctl |= IXGBE_SRRCTL_DROP_EN;
5227 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5230 /* enable drop for each vf */
5231 for (int i = 0; i < adapter->num_vfs; i++) {
5232 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5233 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5240 ixgbe_disable_rx_drop(struct adapter *adapter)
5242 struct ixgbe_hw *hw = &adapter->hw;
5244 for (int i = 0; i < adapter->num_queues; i++) {
5245 struct rx_ring *rxr = &adapter->rx_rings[i];
5246 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5247 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5248 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5251 /* disable drop for each vf */
5252 for (int i = 0; i < adapter->num_vfs; i++) {
5253 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5254 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5260 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5264 switch (adapter->hw.mac.type) {
5265 case ixgbe_mac_82598EB:
5266 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5267 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5269 case ixgbe_mac_82599EB:
5270 case ixgbe_mac_X540:
5271 case ixgbe_mac_X550:
5272 case ixgbe_mac_X550EM_x:
5273 mask = (queues & 0xFFFFFFFF);
5274 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5275 mask = (queues >> 32);
5276 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5286 ** Support functions for SRIOV/VF management
5290 ixgbe_ping_all_vfs(struct adapter *adapter)
5292 struct ixgbe_vf *vf;
5294 for (int i = 0; i < adapter->num_vfs; i++) {
5295 vf = &adapter->vfs[i];
5296 if (vf->flags & IXGBE_VF_ACTIVE)
5297 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5303 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
5306 struct ixgbe_hw *hw;
5307 uint32_t vmolr, vmvir;
5313 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
5315 /* Do not receive packets that pass inexact filters. */
5316 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
5318 /* Disable Multicast Promicuous Mode. */
5319 vmolr &= ~IXGBE_VMOLR_MPE;
5321 /* Accept broadcasts. */
5322 vmolr |= IXGBE_VMOLR_BAM;
5325 /* Accept non-vlan tagged traffic. */
5326 //vmolr |= IXGBE_VMOLR_AUPE;
5328 /* Allow VM to tag outgoing traffic; no default tag. */
5331 /* Require vlan-tagged traffic. */
5332 vmolr &= ~IXGBE_VMOLR_AUPE;
5334 /* Tag all traffic with provided vlan tag. */
5335 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
5337 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
5338 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
5343 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
5347 * Frame size compatibility between PF and VF is only a problem on
5348 * 82599-based cards. X540 and later support any combination of jumbo
5349 * frames on PFs and VFs.
5351 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
5354 switch (vf->api_ver) {
5355 case IXGBE_API_VER_1_0:
5356 case IXGBE_API_VER_UNKNOWN:
5358 * On legacy (1.0 and older) VF versions, we don't support jumbo
5359 * frames on either the PF or the VF.
5361 if (adapter->max_frame_size > ETHER_MAX_LEN ||
5362 vf->max_frame_size > ETHER_MAX_LEN)
5368 case IXGBE_API_VER_1_1:
5371 * 1.1 or later VF versions always work if they aren't using
5374 if (vf->max_frame_size <= ETHER_MAX_LEN)
5378 * Jumbo frames only work with VFs if the PF is also using jumbo
5381 if (adapter->max_frame_size <= ETHER_MAX_LEN)
5391 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
5393 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
5395 // XXX clear multicast addresses
5397 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
5399 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5404 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
5406 struct ixgbe_hw *hw;
5407 uint32_t vf_index, vfte;
5411 vf_index = IXGBE_VF_INDEX(vf->pool);
5412 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
5413 vfte |= IXGBE_VF_BIT(vf->pool);
5414 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
5419 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
5421 struct ixgbe_hw *hw;
5422 uint32_t vf_index, vfre;
5426 vf_index = IXGBE_VF_INDEX(vf->pool);
5427 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
5428 if (ixgbe_vf_frame_size_compatible(adapter, vf))
5429 vfre |= IXGBE_VF_BIT(vf->pool);
5431 vfre &= ~IXGBE_VF_BIT(vf->pool);
5432 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
5437 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5439 struct ixgbe_hw *hw;
5441 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
5445 ixgbe_process_vf_reset(adapter, vf);
5447 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5448 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5449 vf->ether_addr, vf->pool, TRUE);
5450 ack = IXGBE_VT_MSGTYPE_ACK;
5452 ack = IXGBE_VT_MSGTYPE_NACK;
5454 ixgbe_vf_enable_transmit(adapter, vf);
5455 ixgbe_vf_enable_receive(adapter, vf);
5457 vf->flags |= IXGBE_VF_CTS;
5459 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
5460 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
5461 resp[3] = hw->mac.mc_filter_type;
5462 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
5467 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5471 mac = (uint8_t*)&msg[1];
5473 /* Check that the VF has permission to change the MAC address. */
5474 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
5475 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5479 if (ixgbe_validate_mac_addr(mac) != 0) {
5480 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5484 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5486 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
5489 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5494 ** VF multicast addresses are set by using the appropriate bit in
5495 ** 1 of 128 32 bit addresses (4096 possible).
5498 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
5500 u16 *list = (u16*)&msg[1];
5502 u32 vmolr, vec_bit, vec_reg, mta_reg;
5504 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
5505 entries = min(entries, IXGBE_MAX_VF_MC);
5507 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
5509 vf->num_mc_hashes = entries;
5511 /* Set the appropriate MTA bit */
5512 for (int i = 0; i < entries; i++) {
5513 vf->mc_hash[i] = list[i];
5514 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
5515 vec_bit = vf->mc_hash[i] & 0x1F;
5516 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
5517 mta_reg |= (1 << vec_bit);
5518 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
5521 vmolr |= IXGBE_VMOLR_ROMPE;
5522 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
5523 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5529 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5531 struct ixgbe_hw *hw;
5536 enable = IXGBE_VT_MSGINFO(msg[0]);
5537 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
5539 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
5540 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5544 /* It is illegal to enable vlan tag 0. */
5545 if (tag == 0 && enable != 0){
5546 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5550 ixgbe_set_vfta(hw, tag, vf->pool, enable);
5551 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5556 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5558 struct ixgbe_hw *hw;
5559 uint32_t vf_max_size, pf_max_size, mhadd;
5562 vf_max_size = msg[1];
5564 if (vf_max_size < ETHER_CRC_LEN) {
5565 /* We intentionally ACK invalid LPE requests. */
5566 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5570 vf_max_size -= ETHER_CRC_LEN;
5572 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5573 /* We intentionally ACK invalid LPE requests. */
5574 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5578 vf->max_frame_size = vf_max_size;
5579 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5582 * We might have to disable reception to this VF if the frame size is
5583 * not compatible with the config on the PF.
5585 ixgbe_vf_enable_receive(adapter, vf);
5587 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5588 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
5590 if (pf_max_size < adapter->max_frame_size) {
5591 mhadd &= ~IXGBE_MHADD_MFS_MASK;
5592 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5593 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
5596 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5601 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
5604 //XXX implement this
5605 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5610 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
5615 case IXGBE_API_VER_1_0:
5616 case IXGBE_API_VER_1_1:
5617 vf->api_ver = msg[1];
5618 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5621 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5622 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5629 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
5632 struct ixgbe_hw *hw;
5633 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5638 /* GET_QUEUES is not supported on pre-1.1 APIs. */
5640 case IXGBE_API_VER_1_0:
5641 case IXGBE_API_VER_UNKNOWN:
5642 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5646 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
5647 IXGBE_VT_MSGTYPE_CTS;
5649 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
5650 resp[IXGBE_VF_TX_QUEUES] = num_queues;
5651 resp[IXGBE_VF_RX_QUEUES] = num_queues;
5652 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5653 resp[IXGBE_VF_DEF_QUEUE] = 0;
5655 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5660 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
5662 struct ixgbe_hw *hw;
5663 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5668 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
5673 CTR3(KTR_MALLOC, "%s: received msg %x from %d",
5674 adapter->ifp->if_xname, msg[0], vf->pool);
5675 if (msg[0] == IXGBE_VF_RESET) {
5676 ixgbe_vf_reset_msg(adapter, vf, msg);
5680 if (!(vf->flags & IXGBE_VF_CTS)) {
5681 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5685 switch (msg[0] & IXGBE_VT_MSG_MASK) {
5686 case IXGBE_VF_SET_MAC_ADDR:
5687 ixgbe_vf_set_mac(adapter, vf, msg);
5689 case IXGBE_VF_SET_MULTICAST:
5690 ixgbe_vf_set_mc_addr(adapter, vf, msg);
5692 case IXGBE_VF_SET_VLAN:
5693 ixgbe_vf_set_vlan(adapter, vf, msg);
5695 case IXGBE_VF_SET_LPE:
5696 ixgbe_vf_set_lpe(adapter, vf, msg);
5698 case IXGBE_VF_SET_MACVLAN:
5699 ixgbe_vf_set_macvlan(adapter, vf, msg);
5701 case IXGBE_VF_API_NEGOTIATE:
5702 ixgbe_vf_api_negotiate(adapter, vf, msg);
5704 case IXGBE_VF_GET_QUEUES:
5705 ixgbe_vf_get_queues(adapter, vf, msg);
5708 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5714 * Tasklet for handling VF -> PF mailbox messages.
5717 ixgbe_handle_mbx(void *context, int pending)
5719 struct adapter *adapter;
5720 struct ixgbe_hw *hw;
5721 struct ixgbe_vf *vf;
5727 IXGBE_CORE_LOCK(adapter);
5728 for (i = 0; i < adapter->num_vfs; i++) {
5729 vf = &adapter->vfs[i];
5731 if (vf->flags & IXGBE_VF_ACTIVE) {
5732 if (ixgbe_check_for_rst(hw, vf->pool) == 0)
5733 ixgbe_process_vf_reset(adapter, vf);
5735 if (ixgbe_check_for_msg(hw, vf->pool) == 0)
5736 ixgbe_process_vf_msg(adapter, vf);
5738 if (ixgbe_check_for_ack(hw, vf->pool) == 0)
5739 ixgbe_process_vf_ack(adapter, vf);
5742 IXGBE_CORE_UNLOCK(adapter);
5747 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
5749 struct adapter *adapter;
5750 enum ixgbe_iov_mode mode;
5752 adapter = device_get_softc(dev);
5753 adapter->num_vfs = num_vfs;
5754 mode = ixgbe_get_iov_mode(adapter);
5756 if (num_vfs > ixgbe_max_vfs(mode)) {
5757 adapter->num_vfs = 0;
5761 IXGBE_CORE_LOCK(adapter);
5763 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
5766 if (adapter->vfs == NULL) {
5767 adapter->num_vfs = 0;
5768 IXGBE_CORE_UNLOCK(adapter);
5772 ixgbe_init_locked(adapter);
5774 IXGBE_CORE_UNLOCK(adapter);
5781 ixgbe_uninit_iov(device_t dev)
5783 struct ixgbe_hw *hw;
5784 struct adapter *adapter;
5785 uint32_t pf_reg, vf_reg;
5787 adapter = device_get_softc(dev);
5790 IXGBE_CORE_LOCK(adapter);
5792 /* Enable rx/tx for the PF and disable it for all VFs. */
5793 pf_reg = IXGBE_VF_INDEX(adapter->pool);
5794 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
5795 IXGBE_VF_BIT(adapter->pool));
5796 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
5797 IXGBE_VF_BIT(adapter->pool));
5803 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
5804 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
5806 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
5808 free(adapter->vfs, M_IXGBE);
5809 adapter->vfs = NULL;
5810 adapter->num_vfs = 0;
5812 IXGBE_CORE_UNLOCK(adapter);
5817 ixgbe_initialize_iov(struct adapter *adapter)
5819 struct ixgbe_hw *hw = &adapter->hw;
5820 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
5821 enum ixgbe_iov_mode mode;
5824 mode = ixgbe_get_iov_mode(adapter);
5825 if (mode == IXGBE_NO_VM)
5828 IXGBE_CORE_LOCK_ASSERT(adapter);
5830 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
5831 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
5835 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
5838 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
5841 panic("Unexpected SR-IOV mode %d", mode);
5843 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
5845 mtqc = IXGBE_MTQC_VT_ENA;
5848 mtqc |= IXGBE_MTQC_64VF;
5851 mtqc |= IXGBE_MTQC_32VF;
5854 panic("Unexpected SR-IOV mode %d", mode);
5856 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
5859 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
5860 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
5861 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
5864 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
5867 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
5870 panic("Unexpected SR-IOV mode %d", mode);
5872 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
5875 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5876 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
5879 gpie |= IXGBE_GPIE_VTMODE_64;
5882 gpie |= IXGBE_GPIE_VTMODE_32;
5885 panic("Unexpected SR-IOV mode %d", mode);
5887 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5889 /* Enable rx/tx for the PF. */
5890 vf_reg = IXGBE_VF_INDEX(adapter->pool);
5891 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
5892 IXGBE_VF_BIT(adapter->pool));
5893 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
5894 IXGBE_VF_BIT(adapter->pool));
5896 /* Allow VM-to-VM communication. */
5897 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
5899 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
5900 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
5901 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
5903 for (i = 0; i < adapter->num_vfs; i++)
5904 ixgbe_init_vf(adapter, &adapter->vfs[i]);
5909 ** Check the max frame setting of all active VF's
5912 ixgbe_recalculate_max_frame(struct adapter *adapter)
5914 struct ixgbe_vf *vf;
5916 IXGBE_CORE_LOCK_ASSERT(adapter);
5918 for (int i = 0; i < adapter->num_vfs; i++) {
5919 vf = &adapter->vfs[i];
5920 if (vf->flags & IXGBE_VF_ACTIVE)
5921 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5927 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
5929 struct ixgbe_hw *hw;
5930 uint32_t vf_index, pfmbimr;
5932 IXGBE_CORE_LOCK_ASSERT(adapter);
5936 if (!(vf->flags & IXGBE_VF_ACTIVE))
5939 vf_index = IXGBE_VF_INDEX(vf->pool);
5940 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
5941 pfmbimr |= IXGBE_VF_BIT(vf->pool);
5942 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
5944 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
5946 // XXX multicast addresses
5948 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5949 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5950 vf->ether_addr, vf->pool, TRUE);
5953 ixgbe_vf_enable_transmit(adapter, vf);
5954 ixgbe_vf_enable_receive(adapter, vf);
5956 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5960 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
5962 struct adapter *adapter;
5963 struct ixgbe_vf *vf;
5966 adapter = device_get_softc(dev);
5968 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
5969 vfnum, adapter->num_vfs));
5971 IXGBE_CORE_LOCK(adapter);
5972 vf = &adapter->vfs[vfnum];
5975 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
5976 vf->rar_index = vfnum + 1;
5977 vf->default_vlan = 0;
5978 vf->max_frame_size = ETHER_MAX_LEN;
5979 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5981 if (nvlist_exists_binary(config, "mac-addr")) {
5982 mac = nvlist_get_binary(config, "mac-addr", NULL);
5983 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5984 if (nvlist_get_bool(config, "allow-set-mac"))
5985 vf->flags |= IXGBE_VF_CAP_MAC;
5988 * If the administrator has not specified a MAC address then
5989 * we must allow the VF to choose one.
5991 vf->flags |= IXGBE_VF_CAP_MAC;
5993 vf->flags = IXGBE_VF_ACTIVE;
5995 ixgbe_init_vf(adapter, vf);
5996 IXGBE_CORE_UNLOCK(adapter);
6000 #endif /* PCI_IOV */