1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
44 #include <net/rss_config.h>
45 #include <netinet/in_rss.h>
48 /*********************************************************************
50 *********************************************************************/
51 char ixgbe_driver_version[] = "3.1.13-k";
54 /*********************************************************************
57 * Used by probe to select devices to load on
58 * Last field stores an index into ixgbe_strings
59 * Last entry must be all 0s
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
64 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
98 /* required last entry */
102 /*********************************************************************
103 * Table of branding strings
104 *********************************************************************/
106 static char *ixgbe_strings[] = {
107 "Intel(R) PRO/10GbE PCI-Express Network Driver"
110 /*********************************************************************
111 * Function prototypes
112 *********************************************************************/
113 static int ixgbe_probe(device_t);
114 static int ixgbe_attach(device_t);
115 static int ixgbe_detach(device_t);
116 static int ixgbe_shutdown(device_t);
117 static int ixgbe_suspend(device_t);
118 static int ixgbe_resume(device_t);
119 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
120 static void ixgbe_init(void *);
121 static void ixgbe_init_locked(struct adapter *);
122 static void ixgbe_stop(void *);
123 #if __FreeBSD_version >= 1100036
124 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
126 static void ixgbe_add_media_types(struct adapter *);
127 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
128 static int ixgbe_media_change(struct ifnet *);
129 static void ixgbe_identify_hardware(struct adapter *);
130 static int ixgbe_allocate_pci_resources(struct adapter *);
131 static void ixgbe_get_slot_info(struct adapter *);
132 static int ixgbe_allocate_msix(struct adapter *);
133 static int ixgbe_allocate_legacy(struct adapter *);
134 static int ixgbe_setup_msix(struct adapter *);
135 static void ixgbe_free_pci_resources(struct adapter *);
136 static void ixgbe_local_timer(void *);
137 static int ixgbe_setup_interface(device_t, struct adapter *);
138 static void ixgbe_config_gpie(struct adapter *);
139 static void ixgbe_config_dmac(struct adapter *);
140 static void ixgbe_config_delay_values(struct adapter *);
141 static void ixgbe_config_link(struct adapter *);
142 static void ixgbe_check_wol_support(struct adapter *);
143 static int ixgbe_setup_low_power_mode(struct adapter *);
144 static void ixgbe_rearm_queues(struct adapter *, u64);
146 static void ixgbe_initialize_transmit_units(struct adapter *);
147 static void ixgbe_initialize_receive_units(struct adapter *);
148 static void ixgbe_enable_rx_drop(struct adapter *);
149 static void ixgbe_disable_rx_drop(struct adapter *);
150 static void ixgbe_initialize_rss_mapping(struct adapter *);
152 static void ixgbe_enable_intr(struct adapter *);
153 static void ixgbe_disable_intr(struct adapter *);
154 static void ixgbe_update_stats_counters(struct adapter *);
155 static void ixgbe_set_promisc(struct adapter *);
156 static void ixgbe_set_multi(struct adapter *);
157 static void ixgbe_update_link_status(struct adapter *);
158 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
159 static void ixgbe_configure_ivars(struct adapter *);
160 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 static void ixgbe_setup_vlan_hw_support(struct adapter *);
163 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
164 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
166 static void ixgbe_add_device_sysctls(struct adapter *);
167 static void ixgbe_add_hw_stats(struct adapter *);
168 static int ixgbe_set_flowcntl(struct adapter *, int);
169 static int ixgbe_set_advertise(struct adapter *, int);
171 /* Sysctl handlers */
172 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
173 const char *, int *, int);
174 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
175 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
176 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
177 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
178 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
179 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
181 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
182 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
184 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
185 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
186 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
187 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
188 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
189 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS);
192 /* Support for pluggable optic modules */
193 static bool ixgbe_sfp_probe(struct adapter *);
194 static void ixgbe_setup_optics(struct adapter *);
196 /* Legacy (single vector interrupt handler */
197 static void ixgbe_legacy_irq(void *);
199 /* The MSI/X Interrupt handlers */
200 static void ixgbe_msix_que(void *);
201 static void ixgbe_msix_link(void *);
203 /* Deferred interrupt tasklets */
204 static void ixgbe_handle_que(void *, int);
205 static void ixgbe_handle_link(void *, int);
206 static void ixgbe_handle_msf(void *, int);
207 static void ixgbe_handle_mod(void *, int);
208 static void ixgbe_handle_phy(void *, int);
211 static void ixgbe_reinit_fdir(void *, int);
215 static void ixgbe_ping_all_vfs(struct adapter *);
216 static void ixgbe_handle_mbx(void *, int);
217 static int ixgbe_init_iov(device_t, u16, const nvlist_t *);
218 static void ixgbe_uninit_iov(device_t);
219 static int ixgbe_add_vf(device_t, u16, const nvlist_t *);
220 static void ixgbe_initialize_iov(struct adapter *);
221 static void ixgbe_recalculate_max_frame(struct adapter *);
222 static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
226 /*********************************************************************
227 * FreeBSD Device Interface Entry Points
228 *********************************************************************/
230 static device_method_t ix_methods[] = {
231 /* Device interface */
232 DEVMETHOD(device_probe, ixgbe_probe),
233 DEVMETHOD(device_attach, ixgbe_attach),
234 DEVMETHOD(device_detach, ixgbe_detach),
235 DEVMETHOD(device_shutdown, ixgbe_shutdown),
236 DEVMETHOD(device_suspend, ixgbe_suspend),
237 DEVMETHOD(device_resume, ixgbe_resume),
239 DEVMETHOD(pci_iov_init, ixgbe_init_iov),
240 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
241 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
246 static driver_t ix_driver = {
247 "ix", ix_methods, sizeof(struct adapter),
250 devclass_t ix_devclass;
251 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
253 MODULE_DEPEND(ix, pci, 1, 1, 1);
254 MODULE_DEPEND(ix, ether, 1, 1, 1);
256 MODULE_DEPEND(ix, netmap, 1, 1, 1);
257 #endif /* DEV_NETMAP */
260 ** TUNEABLE PARAMETERS:
263 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
264 "IXGBE driver parameters");
267 ** AIM: Adaptive Interrupt Moderation
268 ** which means that the interrupt rate
269 ** is varied over time based on the
270 ** traffic for that interrupt vector
272 static int ixgbe_enable_aim = TRUE;
273 TUNABLE_INT("hw.ix.enable_aim", &ixgbe_enable_aim);
274 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
275 "Enable adaptive interrupt moderation");
277 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
278 TUNABLE_INT("hw.ix.max_interrupt_rate", &ixgbe_max_interrupt_rate);
279 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
280 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
282 /* How many packets rxeof tries to clean at a time */
283 static int ixgbe_rx_process_limit = 256;
284 TUNABLE_INT("hw.ix.rx_process_limit", &ixgbe_rx_process_limit);
285 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
286 &ixgbe_rx_process_limit, 0,
287 "Maximum number of received packets to process at a time,"
288 "-1 means unlimited");
290 /* How many packets txeof tries to clean at a time */
291 static int ixgbe_tx_process_limit = 256;
292 TUNABLE_INT("hw.ix.tx_process_limit", &ixgbe_tx_process_limit);
293 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
294 &ixgbe_tx_process_limit, 0,
295 "Maximum number of sent packets to process at a time,"
296 "-1 means unlimited");
298 /* Flow control setting, default to full */
299 static int ixgbe_flow_control = ixgbe_fc_full;
300 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
301 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
303 /* Advertise Speed, default to 0 (auto) */
304 static int ixgbe_advertise_speed = 0;
305 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
306 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
309 ** Smart speed setting, default to on
310 ** this only works as a compile option
311 ** right now as its during attach, set
312 ** this to 'ixgbe_smart_speed_off' to
315 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
318 * MSIX should be the default for best performance,
319 * but this allows it to be forced off for testing.
321 static int ixgbe_enable_msix = 1;
322 TUNABLE_INT("hw.ix.enable_msix", &ixgbe_enable_msix);
323 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
324 "Enable MSI-X interrupts");
327 * Number of Queues, can be set to 0,
328 * it then autoconfigures based on the
329 * number of cpus with a max of 8. This
330 * can be overriden manually here.
332 static int ixgbe_num_queues = 0;
333 TUNABLE_INT("hw.ix.num_queues", &ixgbe_num_queues);
334 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
335 "Number of queues to configure up to a maximum of 8,"
336 "0 indicates autoconfigure");
339 ** Number of TX descriptors per ring,
340 ** setting higher than RX as this seems
341 ** the better performing choice.
343 static int ixgbe_txd = PERFORM_TXD;
344 TUNABLE_INT("hw.ix.txd", &ixgbe_txd);
345 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
346 "Number of transmit descriptors per queue");
348 /* Number of RX descriptors per ring */
349 static int ixgbe_rxd = PERFORM_RXD;
350 TUNABLE_INT("hw.ix.rxd", &ixgbe_rxd);
351 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
352 "Number of receive descriptors per queue");
355 ** Defining this on will allow the use
356 ** of unsupported SFP+ modules, note that
357 ** doing so you are on your own :)
359 static int allow_unsupported_sfp = FALSE;
360 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
362 /* Keep running tab on them for sanity check */
363 static int ixgbe_total_ports;
367 ** Flow Director actually 'steals'
368 ** part of the packet buffer as its
369 ** filter pool, this variable controls
371 ** 0 = 64K, 1 = 128K, 2 = 256K
373 static int fdir_pballoc = 1;
378 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
379 * be a reference on how to implement netmap support in a driver.
380 * Additional comments are in ixgbe_netmap.h .
382 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
383 * that extend the standard driver.
385 #include <dev/netmap/ixgbe_netmap.h>
386 #endif /* DEV_NETMAP */
388 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
390 /*********************************************************************
391 * Device identification routine
393 * ixgbe_probe determines if the driver should be loaded on
394 * adapter based on PCI vendor/device id of the adapter.
396 * return BUS_PROBE_DEFAULT on success, positive on failure
397 *********************************************************************/
400 ixgbe_probe(device_t dev)
402 ixgbe_vendor_info_t *ent;
404 u16 pci_vendor_id = 0;
405 u16 pci_device_id = 0;
406 u16 pci_subvendor_id = 0;
407 u16 pci_subdevice_id = 0;
408 char adapter_name[256];
410 INIT_DEBUGOUT("ixgbe_probe: begin");
412 pci_vendor_id = pci_get_vendor(dev);
413 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
416 pci_device_id = pci_get_device(dev);
417 pci_subvendor_id = pci_get_subvendor(dev);
418 pci_subdevice_id = pci_get_subdevice(dev);
420 ent = ixgbe_vendor_info_array;
421 while (ent->vendor_id != 0) {
422 if ((pci_vendor_id == ent->vendor_id) &&
423 (pci_device_id == ent->device_id) &&
425 ((pci_subvendor_id == ent->subvendor_id) ||
426 (ent->subvendor_id == 0)) &&
428 ((pci_subdevice_id == ent->subdevice_id) ||
429 (ent->subdevice_id == 0))) {
430 sprintf(adapter_name, "%s, Version - %s",
431 ixgbe_strings[ent->index],
432 ixgbe_driver_version);
433 device_set_desc_copy(dev, adapter_name);
435 return (BUS_PROBE_DEFAULT);
442 /*********************************************************************
443 * Device initialization routine
445 * The attach entry point is called when the driver is being loaded.
446 * This routine identifies the type of hardware, allocates all resources
447 * and initializes the hardware.
449 * return 0 on success, positive on failure
450 *********************************************************************/
453 ixgbe_attach(device_t dev)
455 struct adapter *adapter;
461 INIT_DEBUGOUT("ixgbe_attach: begin");
463 /* Allocate, clear, and link in our adapter structure */
464 adapter = device_get_softc(dev);
469 adapter->init_locked = ixgbe_init_locked;
470 adapter->stop_locked = ixgbe_stop;
473 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
475 /* Set up the timer callout */
476 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
478 /* Determine hardware revision */
479 ixgbe_identify_hardware(adapter);
481 /* Do base PCI setup - map BAR0 */
482 if (ixgbe_allocate_pci_resources(adapter)) {
483 device_printf(dev, "Allocation of PCI resources failed\n");
488 /* Sysctls for limiting the amount of work done in the taskqueues */
489 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
490 "max number of rx packets to process",
491 &adapter->rx_process_limit, ixgbe_rx_process_limit);
493 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
494 "max number of tx packets to process",
495 &adapter->tx_process_limit, ixgbe_tx_process_limit);
497 /* Do descriptor calc and sanity checks */
498 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
499 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
500 device_printf(dev, "TXD config issue, using default!\n");
501 adapter->num_tx_desc = DEFAULT_TXD;
503 adapter->num_tx_desc = ixgbe_txd;
506 ** With many RX rings it is easy to exceed the
507 ** system mbuf allocation. Tuning nmbclusters
508 ** can alleviate this.
510 if (nmbclusters > 0) {
512 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
513 if (s > nmbclusters) {
514 device_printf(dev, "RX Descriptors exceed "
515 "system mbuf max, using default instead!\n");
516 ixgbe_rxd = DEFAULT_RXD;
520 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
521 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
522 device_printf(dev, "RXD config issue, using default!\n");
523 adapter->num_rx_desc = DEFAULT_RXD;
525 adapter->num_rx_desc = ixgbe_rxd;
527 /* Allocate our TX/RX Queues */
528 if (ixgbe_allocate_queues(adapter)) {
533 /* Allocate multicast array memory. */
534 adapter->mta = malloc(sizeof(*adapter->mta) *
535 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
536 if (adapter->mta == NULL) {
537 device_printf(dev, "Can not allocate multicast setup array\n");
542 /* Initialize the shared code */
543 hw->allow_unsupported_sfp = allow_unsupported_sfp;
544 error = ixgbe_init_shared_code(hw);
545 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
547 ** No optics in this port, set up
548 ** so the timer routine will probe
549 ** for later insertion.
551 adapter->sfp_probe = TRUE;
553 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
554 device_printf(dev, "Unsupported SFP+ module detected!\n");
558 device_printf(dev, "Unable to initialize the shared code\n");
563 /* Make sure we have a good EEPROM before we read from it */
564 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
565 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
570 error = ixgbe_init_hw(hw);
572 case IXGBE_ERR_EEPROM_VERSION:
573 device_printf(dev, "This device is a pre-production adapter/"
574 "LOM. Please be aware there may be issues associated "
575 "with your hardware.\nIf you are experiencing problems "
576 "please contact your Intel or hardware representative "
577 "who provided you with this hardware.\n");
579 case IXGBE_ERR_SFP_NOT_SUPPORTED:
580 device_printf(dev, "Unsupported SFP+ Module\n");
583 case IXGBE_ERR_SFP_NOT_PRESENT:
584 device_printf(dev, "No SFP+ Module found\n");
590 /* hw.ix defaults init */
591 ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
592 ixgbe_set_flowcntl(adapter, ixgbe_flow_control);
593 adapter->enable_aim = ixgbe_enable_aim;
595 if ((adapter->msix > 1) && (ixgbe_enable_msix))
596 error = ixgbe_allocate_msix(adapter);
598 error = ixgbe_allocate_legacy(adapter);
602 /* Enable the optics for 82599 SFP+ fiber */
603 ixgbe_enable_tx_laser(hw);
605 /* Enable power to the phy. */
606 ixgbe_set_phy_power(hw, TRUE);
608 /* Setup OS specific network interface */
609 if (ixgbe_setup_interface(dev, adapter) != 0)
612 /* Initialize statistics */
613 ixgbe_update_stats_counters(adapter);
615 /* Register for VLAN events */
616 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
617 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
618 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
619 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
621 /* Check PCIE slot type/speed/width */
622 ixgbe_get_slot_info(adapter);
624 /* Set an initial default flow control & dmac value */
625 adapter->fc = ixgbe_fc_full;
627 adapter->eee_enabled = 0;
630 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
631 nvlist_t *pf_schema, *vf_schema;
633 hw->mbx.ops.init_params(hw);
634 pf_schema = pci_iov_schema_alloc_node();
635 vf_schema = pci_iov_schema_alloc_node();
636 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
637 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
638 IOV_SCHEMA_HASDEFAULT, TRUE);
639 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
640 IOV_SCHEMA_HASDEFAULT, FALSE);
641 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
642 IOV_SCHEMA_HASDEFAULT, FALSE);
643 error = pci_iov_attach(dev, pf_schema, vf_schema);
646 "Error %d setting up SR-IOV\n", error);
651 /* Check for certain supported features */
652 ixgbe_check_wol_support(adapter);
655 ixgbe_add_device_sysctls(adapter);
656 ixgbe_add_hw_stats(adapter);
658 /* let hardware know driver is loaded */
659 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
660 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
661 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
664 ixgbe_netmap_attach(adapter);
665 #endif /* DEV_NETMAP */
666 INIT_DEBUGOUT("ixgbe_attach: end");
670 ixgbe_free_transmit_structures(adapter);
671 ixgbe_free_receive_structures(adapter);
673 if (adapter->ifp != NULL)
674 if_free(adapter->ifp);
675 ixgbe_free_pci_resources(adapter);
676 free(adapter->mta, M_DEVBUF);
680 /*********************************************************************
681 * Device removal routine
683 * The detach entry point is called when the driver is being removed.
684 * This routine stops the adapter and deallocates all the resources
685 * that were allocated for driver operation.
687 * return 0 on success, positive on failure
688 *********************************************************************/
691 ixgbe_detach(device_t dev)
693 struct adapter *adapter = device_get_softc(dev);
694 struct ix_queue *que = adapter->queues;
695 struct tx_ring *txr = adapter->tx_rings;
698 INIT_DEBUGOUT("ixgbe_detach: begin");
700 /* Make sure VLANS are not using driver */
701 if (adapter->ifp->if_vlantrunk != NULL) {
702 device_printf(dev,"Vlan in use, detach first\n");
707 if (pci_iov_detach(dev) != 0) {
708 device_printf(dev, "SR-IOV in use; detach first.\n");
713 ether_ifdetach(adapter->ifp);
714 /* Stop the adapter */
715 IXGBE_CORE_LOCK(adapter);
716 ixgbe_setup_low_power_mode(adapter);
717 IXGBE_CORE_UNLOCK(adapter);
719 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
721 #ifndef IXGBE_LEGACY_TX
722 taskqueue_drain(que->tq, &txr->txq_task);
724 taskqueue_drain(que->tq, &que->que_task);
725 taskqueue_free(que->tq);
729 /* Drain the Link queue */
731 taskqueue_drain(adapter->tq, &adapter->link_task);
732 taskqueue_drain(adapter->tq, &adapter->mod_task);
733 taskqueue_drain(adapter->tq, &adapter->msf_task);
735 taskqueue_drain(adapter->tq, &adapter->mbx_task);
737 taskqueue_drain(adapter->tq, &adapter->phy_task);
739 taskqueue_drain(adapter->tq, &adapter->fdir_task);
741 taskqueue_free(adapter->tq);
744 /* let hardware know driver is unloading */
745 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
746 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
747 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
749 /* Unregister VLAN events */
750 if (adapter->vlan_attach != NULL)
751 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
752 if (adapter->vlan_detach != NULL)
753 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
755 callout_drain(&adapter->timer);
757 netmap_detach(adapter->ifp);
758 #endif /* DEV_NETMAP */
759 ixgbe_free_pci_resources(adapter);
760 bus_generic_detach(dev);
761 if_free(adapter->ifp);
763 ixgbe_free_transmit_structures(adapter);
764 ixgbe_free_receive_structures(adapter);
765 free(adapter->mta, M_DEVBUF);
767 IXGBE_CORE_LOCK_DESTROY(adapter);
771 /*********************************************************************
773 * Shutdown entry point
775 **********************************************************************/
778 ixgbe_shutdown(device_t dev)
780 struct adapter *adapter = device_get_softc(dev);
783 INIT_DEBUGOUT("ixgbe_shutdown: begin");
785 IXGBE_CORE_LOCK(adapter);
786 error = ixgbe_setup_low_power_mode(adapter);
787 IXGBE_CORE_UNLOCK(adapter);
793 * Methods for going from:
794 * D0 -> D3: ixgbe_suspend
795 * D3 -> D0: ixgbe_resume
798 ixgbe_suspend(device_t dev)
800 struct adapter *adapter = device_get_softc(dev);
803 INIT_DEBUGOUT("ixgbe_suspend: begin");
805 IXGBE_CORE_LOCK(adapter);
807 error = ixgbe_setup_low_power_mode(adapter);
809 IXGBE_CORE_UNLOCK(adapter);
815 ixgbe_resume(device_t dev)
817 struct adapter *adapter = device_get_softc(dev);
818 struct ifnet *ifp = adapter->ifp;
819 struct ixgbe_hw *hw = &adapter->hw;
822 INIT_DEBUGOUT("ixgbe_resume: begin");
824 IXGBE_CORE_LOCK(adapter);
826 /* Read & clear WUS register */
827 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
829 device_printf(dev, "Woken up by (WUS): %#010x\n",
830 IXGBE_READ_REG(hw, IXGBE_WUS));
831 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
832 /* And clear WUFC until next low-power transition */
833 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
836 * Required after D3->D0 transition;
837 * will re-advertise all previous advertised speeds
839 if (ifp->if_flags & IFF_UP)
840 ixgbe_init_locked(adapter);
842 IXGBE_CORE_UNLOCK(adapter);
848 /*********************************************************************
851 * ixgbe_ioctl is called when the user wants to configure the
854 * return 0 on success, positive on failure
855 **********************************************************************/
858 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
860 struct adapter *adapter = ifp->if_softc;
861 struct ifreq *ifr = (struct ifreq *) data;
862 #if defined(INET) || defined(INET6)
863 struct ifaddr *ifa = (struct ifaddr *)data;
866 bool avoid_reset = FALSE;
872 if (ifa->ifa_addr->sa_family == AF_INET)
876 if (ifa->ifa_addr->sa_family == AF_INET6)
880 ** Calling init results in link renegotiation,
881 ** so we avoid doing it when possible.
884 ifp->if_flags |= IFF_UP;
885 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
888 if (!(ifp->if_flags & IFF_NOARP))
889 arp_ifinit(ifp, ifa);
892 error = ether_ioctl(ifp, command, data);
895 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
896 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
899 IXGBE_CORE_LOCK(adapter);
900 ifp->if_mtu = ifr->ifr_mtu;
901 adapter->max_frame_size =
902 ifp->if_mtu + IXGBE_MTU_HDR;
903 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
904 ixgbe_init_locked(adapter);
906 ixgbe_recalculate_max_frame(adapter);
908 IXGBE_CORE_UNLOCK(adapter);
912 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
913 IXGBE_CORE_LOCK(adapter);
914 if (ifp->if_flags & IFF_UP) {
915 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
916 if ((ifp->if_flags ^ adapter->if_flags) &
917 (IFF_PROMISC | IFF_ALLMULTI)) {
918 ixgbe_set_promisc(adapter);
921 ixgbe_init_locked(adapter);
923 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
925 adapter->if_flags = ifp->if_flags;
926 IXGBE_CORE_UNLOCK(adapter);
930 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
931 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
932 IXGBE_CORE_LOCK(adapter);
933 ixgbe_disable_intr(adapter);
934 ixgbe_set_multi(adapter);
935 ixgbe_enable_intr(adapter);
936 IXGBE_CORE_UNLOCK(adapter);
941 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
942 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
946 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
948 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
952 /* HW cannot turn these on/off separately */
953 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
954 ifp->if_capenable ^= IFCAP_RXCSUM;
955 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
957 if (mask & IFCAP_TXCSUM)
958 ifp->if_capenable ^= IFCAP_TXCSUM;
959 if (mask & IFCAP_TXCSUM_IPV6)
960 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
961 if (mask & IFCAP_TSO4)
962 ifp->if_capenable ^= IFCAP_TSO4;
963 if (mask & IFCAP_TSO6)
964 ifp->if_capenable ^= IFCAP_TSO6;
965 if (mask & IFCAP_LRO)
966 ifp->if_capenable ^= IFCAP_LRO;
967 if (mask & IFCAP_VLAN_HWTAGGING)
968 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
969 if (mask & IFCAP_VLAN_HWFILTER)
970 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
971 if (mask & IFCAP_VLAN_HWTSO)
972 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
974 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
975 IXGBE_CORE_LOCK(adapter);
976 ixgbe_init_locked(adapter);
977 IXGBE_CORE_UNLOCK(adapter);
979 VLAN_CAPABILITIES(ifp);
982 #if __FreeBSD_version >= 1002500
985 struct ixgbe_hw *hw = &adapter->hw;
988 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
989 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
992 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
996 if (i2c.len > sizeof(i2c.data)) {
1001 for (i = 0; i < i2c.len; i++)
1002 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
1003 i2c.dev_addr, &i2c.data[i]);
1004 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1009 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
1010 error = ether_ioctl(ifp, command, data);
1018 * Set the various hardware offload abilities.
1020 * This takes the ifnet's if_capenable flags (e.g. set by the user using
1021 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
1022 * mbuf offload flags the driver will understand.
1025 ixgbe_set_if_hwassist(struct adapter *adapter)
1027 struct ifnet *ifp = adapter->ifp;
1028 struct ixgbe_hw *hw = &adapter->hw;
1030 ifp->if_hwassist = 0;
1031 #if __FreeBSD_version >= 1000000
1032 if (ifp->if_capenable & IFCAP_TSO4)
1033 ifp->if_hwassist |= CSUM_IP_TSO;
1034 if (ifp->if_capenable & IFCAP_TSO6)
1035 ifp->if_hwassist |= CSUM_IP6_TSO;
1036 if (ifp->if_capenable & IFCAP_TXCSUM) {
1037 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
1038 if (hw->mac.type != ixgbe_mac_82598EB)
1039 ifp->if_hwassist |= CSUM_IP_SCTP;
1041 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
1042 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
1043 if (hw->mac.type != ixgbe_mac_82598EB)
1044 ifp->if_hwassist |= CSUM_IP6_SCTP;
1047 if (ifp->if_capenable & IFCAP_TSO)
1048 ifp->if_hwassist |= CSUM_TSO;
1049 if (ifp->if_capenable & IFCAP_TXCSUM) {
1050 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1051 if (hw->mac.type != ixgbe_mac_82598EB)
1052 ifp->if_hwassist |= CSUM_SCTP;
1057 /*********************************************************************
1060 * This routine is used in two ways. It is used by the stack as
1061 * init entry point in network interface structure. It is also used
1062 * by the driver as a hw/sw initialization routine to get to a
1065 * return 0 on success, positive on failure
1066 **********************************************************************/
1067 #define IXGBE_MHADD_MFS_SHIFT 16
1070 ixgbe_init_locked(struct adapter *adapter)
1072 struct ifnet *ifp = adapter->ifp;
1073 device_t dev = adapter->dev;
1074 struct ixgbe_hw *hw = &adapter->hw;
1075 struct tx_ring *txr;
1076 struct rx_ring *rxr;
1081 enum ixgbe_iov_mode mode;
1084 mtx_assert(&adapter->core_mtx, MA_OWNED);
1085 INIT_DEBUGOUT("ixgbe_init_locked: begin");
1087 hw->adapter_stopped = FALSE;
1088 ixgbe_stop_adapter(hw);
1089 callout_stop(&adapter->timer);
1092 mode = ixgbe_get_iov_mode(adapter);
1093 adapter->pool = ixgbe_max_vfs(mode);
1094 /* Queue indices may change with IOV mode */
1095 for (int i = 0; i < adapter->num_queues; i++) {
1096 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
1097 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
1100 /* reprogram the RAR[0] in case user changed it. */
1101 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1103 /* Get the latest mac address, User can use a LAA */
1104 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1105 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1106 hw->addr_ctrl.rar_used_count = 1;
1108 /* Set hardware offload abilities from ifnet flags */
1109 ixgbe_set_if_hwassist(adapter);
1111 /* Prepare transmit descriptors and buffers */
1112 if (ixgbe_setup_transmit_structures(adapter)) {
1113 device_printf(dev, "Could not setup transmit structures\n");
1114 ixgbe_stop(adapter);
1120 ixgbe_initialize_iov(adapter);
1122 ixgbe_initialize_transmit_units(adapter);
1124 /* Setup Multicast table */
1125 ixgbe_set_multi(adapter);
1127 /* Determine the correct mbuf pool, based on frame size */
1128 if (adapter->max_frame_size <= MCLBYTES)
1129 adapter->rx_mbuf_sz = MCLBYTES;
1131 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1133 /* Prepare receive descriptors and buffers */
1134 if (ixgbe_setup_receive_structures(adapter)) {
1135 device_printf(dev, "Could not setup receive structures\n");
1136 ixgbe_stop(adapter);
1140 /* Configure RX settings */
1141 ixgbe_initialize_receive_units(adapter);
1143 /* Enable SDP & MSIX interrupts based on adapter */
1144 ixgbe_config_gpie(adapter);
1147 if (ifp->if_mtu > ETHERMTU) {
1148 /* aka IXGBE_MAXFRS on 82599 and newer */
1149 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1150 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1151 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1152 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1155 /* Now enable all the queues */
1156 for (int i = 0; i < adapter->num_queues; i++) {
1157 txr = &adapter->tx_rings[i];
1158 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1159 txdctl |= IXGBE_TXDCTL_ENABLE;
1160 /* Set WTHRESH to 8, burst writeback */
1161 txdctl |= (8 << 16);
1163 * When the internal queue falls below PTHRESH (32),
1164 * start prefetching as long as there are at least
1165 * HTHRESH (1) buffers ready. The values are taken
1166 * from the Intel linux driver 3.8.21.
1167 * Prefetching enables tx line rate even with 1 queue.
1169 txdctl |= (32 << 0) | (1 << 8);
1170 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1173 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1174 rxr = &adapter->rx_rings[i];
1175 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1176 if (hw->mac.type == ixgbe_mac_82598EB) {
1182 rxdctl &= ~0x3FFFFF;
1185 rxdctl |= IXGBE_RXDCTL_ENABLE;
1186 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1187 for (; j < 10; j++) {
1188 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1189 IXGBE_RXDCTL_ENABLE)
1197 * In netmap mode, we must preserve the buffers made
1198 * available to userspace before the if_init()
1199 * (this is true by default on the TX side, because
1200 * init makes all buffers available to userspace).
1202 * netmap_reset() and the device specific routines
1203 * (e.g. ixgbe_setup_receive_rings()) map these
1204 * buffers at the end of the NIC ring, so here we
1205 * must set the RDT (tail) register to make sure
1206 * they are not overwritten.
1208 * In this driver the NIC ring starts at RDH = 0,
1209 * RDT points to the last slot available for reception (?),
1210 * so RDT = num_rx_desc - 1 means the whole ring is available.
1212 if (ifp->if_capenable & IFCAP_NETMAP) {
1213 struct netmap_adapter *na = NA(adapter->ifp);
1214 struct netmap_kring *kring = &na->rx_rings[i];
1215 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1217 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1219 #endif /* DEV_NETMAP */
1220 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
1223 /* Enable Receive engine */
1224 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1225 if (hw->mac.type == ixgbe_mac_82598EB)
1226 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1227 rxctrl |= IXGBE_RXCTRL_RXEN;
1228 ixgbe_enable_rx_dma(hw, rxctrl);
1230 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1232 /* Set up MSI/X routing */
1233 if (ixgbe_enable_msix) {
1234 ixgbe_configure_ivars(adapter);
1235 /* Set up auto-mask */
1236 if (hw->mac.type == ixgbe_mac_82598EB)
1237 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1239 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1240 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1242 } else { /* Simple settings for Legacy/MSI */
1243 ixgbe_set_ivar(adapter, 0, 0, 0);
1244 ixgbe_set_ivar(adapter, 0, 0, 1);
1245 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1249 /* Init Flow director */
1250 if (hw->mac.type != ixgbe_mac_82598EB) {
1251 u32 hdrm = 32 << fdir_pballoc;
1253 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1254 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1259 * Check on any SFP devices that
1260 * need to be kick-started
1262 if (hw->phy.type == ixgbe_phy_none) {
1263 err = hw->phy.ops.identify(hw);
1264 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1266 "Unsupported SFP+ module type was detected.\n");
1271 /* Set moderation on the Link interrupt */
1272 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1274 /* Configure Energy Efficient Ethernet for supported devices */
1275 if (hw->mac.ops.setup_eee) {
1276 err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled);
1278 device_printf(dev, "Error setting up EEE: %d\n", err);
1281 /* Enable power to the phy. */
1282 ixgbe_set_phy_power(hw, TRUE);
1284 /* Config/Enable Link */
1285 ixgbe_config_link(adapter);
1287 /* Hardware Packet Buffer & Flow Control setup */
1288 ixgbe_config_delay_values(adapter);
1290 /* Initialize the FC settings */
1293 /* Set up VLAN support and filter */
1294 ixgbe_setup_vlan_hw_support(adapter);
1296 /* Setup DMA Coalescing */
1297 ixgbe_config_dmac(adapter);
1299 /* And now turn on interrupts */
1300 ixgbe_enable_intr(adapter);
1303 /* Enable the use of the MBX by the VF's */
1305 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1306 reg |= IXGBE_CTRL_EXT_PFRSTD;
1307 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
1311 /* Now inform the stack we're ready */
1312 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1318 ixgbe_init(void *arg)
1320 struct adapter *adapter = arg;
1322 IXGBE_CORE_LOCK(adapter);
1323 ixgbe_init_locked(adapter);
1324 IXGBE_CORE_UNLOCK(adapter);
1329 ixgbe_config_gpie(struct adapter *adapter)
1331 struct ixgbe_hw *hw = &adapter->hw;
1334 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1336 /* Fan Failure Interrupt */
1337 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1338 gpie |= IXGBE_SDP1_GPIEN;
1341 * Module detection (SDP2)
1342 * Media ready (SDP1)
1344 if (hw->mac.type == ixgbe_mac_82599EB) {
1345 gpie |= IXGBE_SDP2_GPIEN;
1346 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
1347 gpie |= IXGBE_SDP1_GPIEN;
1351 * Thermal Failure Detection (X540)
1352 * Link Detection (X552 SFP+, X552/X557-AT)
1354 if (hw->mac.type == ixgbe_mac_X540 ||
1355 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1356 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1357 gpie |= IXGBE_SDP0_GPIEN_X540;
1359 if (adapter->msix > 1) {
1360 /* Enable Enhanced MSIX mode */
1361 gpie |= IXGBE_GPIE_MSIX_MODE;
1362 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1366 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1371 * Requires adapter->max_frame_size to be set.
1374 ixgbe_config_delay_values(struct adapter *adapter)
1376 struct ixgbe_hw *hw = &adapter->hw;
1377 u32 rxpb, frame, size, tmp;
1379 frame = adapter->max_frame_size;
1381 /* Calculate High Water */
1382 switch (hw->mac.type) {
1383 case ixgbe_mac_X540:
1384 case ixgbe_mac_X550:
1385 case ixgbe_mac_X550EM_x:
1386 tmp = IXGBE_DV_X540(frame, frame);
1389 tmp = IXGBE_DV(frame, frame);
1392 size = IXGBE_BT2KB(tmp);
1393 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1394 hw->fc.high_water[0] = rxpb - size;
1396 /* Now calculate Low Water */
1397 switch (hw->mac.type) {
1398 case ixgbe_mac_X540:
1399 case ixgbe_mac_X550:
1400 case ixgbe_mac_X550EM_x:
1401 tmp = IXGBE_LOW_DV_X540(frame);
1404 tmp = IXGBE_LOW_DV(frame);
1407 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1409 hw->fc.requested_mode = adapter->fc;
1410 hw->fc.pause_time = IXGBE_FC_PAUSE;
1411 hw->fc.send_xon = TRUE;
1416 ** MSIX Interrupt Handlers and Tasklets
1421 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1423 struct ixgbe_hw *hw = &adapter->hw;
1424 u64 queue = (u64)(1 << vector);
1427 if (hw->mac.type == ixgbe_mac_82598EB) {
1428 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1429 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1431 mask = (queue & 0xFFFFFFFF);
1433 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1434 mask = (queue >> 32);
1436 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1441 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1443 struct ixgbe_hw *hw = &adapter->hw;
1444 u64 queue = (u64)(1 << vector);
1447 if (hw->mac.type == ixgbe_mac_82598EB) {
1448 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1449 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1451 mask = (queue & 0xFFFFFFFF);
1453 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1454 mask = (queue >> 32);
1456 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1461 ixgbe_handle_que(void *context, int pending)
1463 struct ix_queue *que = context;
1464 struct adapter *adapter = que->adapter;
1465 struct tx_ring *txr = que->txr;
1466 struct ifnet *ifp = adapter->ifp;
1468 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1472 #ifndef IXGBE_LEGACY_TX
1473 if (!drbr_empty(ifp, txr->br))
1474 ixgbe_mq_start_locked(ifp, txr);
1476 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1477 ixgbe_start_locked(txr, ifp);
1479 IXGBE_TX_UNLOCK(txr);
1482 /* Reenable this interrupt */
1483 if (que->res != NULL)
1484 ixgbe_enable_queue(adapter, que->msix);
1486 ixgbe_enable_intr(adapter);
1491 /*********************************************************************
1493 * Legacy Interrupt Service routine
1495 **********************************************************************/
1498 ixgbe_legacy_irq(void *arg)
1500 struct ix_queue *que = arg;
1501 struct adapter *adapter = que->adapter;
1502 struct ixgbe_hw *hw = &adapter->hw;
1503 struct ifnet *ifp = adapter->ifp;
1504 struct tx_ring *txr = adapter->tx_rings;
1509 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1512 if (reg_eicr == 0) {
1513 ixgbe_enable_intr(adapter);
1517 more = ixgbe_rxeof(que);
1521 #ifdef IXGBE_LEGACY_TX
1522 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1523 ixgbe_start_locked(txr, ifp);
1525 if (!drbr_empty(ifp, txr->br))
1526 ixgbe_mq_start_locked(ifp, txr);
1528 IXGBE_TX_UNLOCK(txr);
1530 /* Check for fan failure */
1531 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1532 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1533 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1534 "REPLACE IMMEDIATELY!!\n");
1535 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1538 /* Link status change */
1539 if (reg_eicr & IXGBE_EICR_LSC)
1540 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1542 /* External PHY interrupt */
1543 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1544 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1545 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1548 taskqueue_enqueue(que->tq, &que->que_task);
1550 ixgbe_enable_intr(adapter);
1555 /*********************************************************************
1557 * MSIX Queue Interrupt Service routine
1559 **********************************************************************/
1561 ixgbe_msix_que(void *arg)
1563 struct ix_queue *que = arg;
1564 struct adapter *adapter = que->adapter;
1565 struct ifnet *ifp = adapter->ifp;
1566 struct tx_ring *txr = que->txr;
1567 struct rx_ring *rxr = que->rxr;
1572 /* Protect against spurious interrupts */
1573 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1576 ixgbe_disable_queue(adapter, que->msix);
1579 more = ixgbe_rxeof(que);
1583 #ifdef IXGBE_LEGACY_TX
1584 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1585 ixgbe_start_locked(txr, ifp);
1587 if (!drbr_empty(ifp, txr->br))
1588 ixgbe_mq_start_locked(ifp, txr);
1590 IXGBE_TX_UNLOCK(txr);
1594 if (adapter->enable_aim == FALSE)
1597 ** Do Adaptive Interrupt Moderation:
1598 ** - Write out last calculated setting
1599 ** - Calculate based on average size over
1600 ** the last interval.
1602 if (que->eitr_setting)
1603 IXGBE_WRITE_REG(&adapter->hw,
1604 IXGBE_EITR(que->msix), que->eitr_setting);
1606 que->eitr_setting = 0;
1608 /* Idle, do nothing */
1609 if ((txr->bytes == 0) && (rxr->bytes == 0))
1612 if ((txr->bytes) && (txr->packets))
1613 newitr = txr->bytes/txr->packets;
1614 if ((rxr->bytes) && (rxr->packets))
1615 newitr = max(newitr,
1616 (rxr->bytes / rxr->packets));
1617 newitr += 24; /* account for hardware frame, crc */
1619 /* set an upper boundary */
1620 newitr = min(newitr, 3000);
1622 /* Be nice to the mid range */
1623 if ((newitr > 300) && (newitr < 1200))
1624 newitr = (newitr / 3);
1626 newitr = (newitr / 2);
1628 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1629 newitr |= newitr << 16;
1631 newitr |= IXGBE_EITR_CNT_WDIS;
1633 /* save for next interrupt */
1634 que->eitr_setting = newitr;
1644 taskqueue_enqueue(que->tq, &que->que_task);
1646 ixgbe_enable_queue(adapter, que->msix);
1652 ixgbe_msix_link(void *arg)
1654 struct adapter *adapter = arg;
1655 struct ixgbe_hw *hw = &adapter->hw;
1656 u32 reg_eicr, mod_mask;
1658 ++adapter->link_irq;
1660 /* Pause other interrupts */
1661 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1663 /* First get the cause */
1664 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1665 /* Be sure the queue bits are not cleared */
1666 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1667 /* Clear interrupt with write */
1668 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1670 /* Link status change */
1671 if (reg_eicr & IXGBE_EICR_LSC) {
1672 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1673 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1676 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1678 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1679 /* This is probably overkill :) */
1680 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1682 /* Disable the interrupt */
1683 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1684 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1687 if (reg_eicr & IXGBE_EICR_ECC) {
1688 device_printf(adapter->dev, "CRITICAL: ECC ERROR!! "
1689 "Please Reboot!!\n");
1690 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1693 /* Check for over temp condition */
1694 if (reg_eicr & IXGBE_EICR_TS) {
1695 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! "
1696 "PHY IS SHUT DOWN!!\n");
1697 device_printf(adapter->dev, "System shutdown required!\n");
1698 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1701 if (reg_eicr & IXGBE_EICR_MAILBOX)
1702 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1706 /* Pluggable optics-related interrupt */
1707 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1708 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1710 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1712 if (ixgbe_is_sfp(hw)) {
1713 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1714 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1715 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1716 } else if (reg_eicr & mod_mask) {
1717 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1718 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1722 /* Check for fan failure */
1723 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1724 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1725 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1726 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1727 "REPLACE IMMEDIATELY!!\n");
1730 /* External PHY interrupt */
1731 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1732 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1733 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1734 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1737 /* Re-enable other interrupts */
1738 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1742 /*********************************************************************
1744 * Media Ioctl callback
1746 * This routine is called whenever the user queries the status of
1747 * the interface using ifconfig.
1749 **********************************************************************/
1751 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1753 struct adapter *adapter = ifp->if_softc;
1754 struct ixgbe_hw *hw = &adapter->hw;
1757 INIT_DEBUGOUT("ixgbe_media_status: begin");
1758 IXGBE_CORE_LOCK(adapter);
1759 ixgbe_update_link_status(adapter);
1761 ifmr->ifm_status = IFM_AVALID;
1762 ifmr->ifm_active = IFM_ETHER;
1764 if (!adapter->link_active) {
1765 IXGBE_CORE_UNLOCK(adapter);
1769 ifmr->ifm_status |= IFM_ACTIVE;
1770 layer = adapter->phy_layer;
1772 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1773 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1774 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1775 switch (adapter->link_speed) {
1776 case IXGBE_LINK_SPEED_10GB_FULL:
1777 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1779 case IXGBE_LINK_SPEED_1GB_FULL:
1780 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1782 case IXGBE_LINK_SPEED_100_FULL:
1783 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1786 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1787 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1788 switch (adapter->link_speed) {
1789 case IXGBE_LINK_SPEED_10GB_FULL:
1790 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1793 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1794 switch (adapter->link_speed) {
1795 case IXGBE_LINK_SPEED_10GB_FULL:
1796 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1798 case IXGBE_LINK_SPEED_1GB_FULL:
1799 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1802 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1803 switch (adapter->link_speed) {
1804 case IXGBE_LINK_SPEED_10GB_FULL:
1805 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1807 case IXGBE_LINK_SPEED_1GB_FULL:
1808 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1811 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1812 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1813 switch (adapter->link_speed) {
1814 case IXGBE_LINK_SPEED_10GB_FULL:
1815 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1817 case IXGBE_LINK_SPEED_1GB_FULL:
1818 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1821 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1822 switch (adapter->link_speed) {
1823 case IXGBE_LINK_SPEED_10GB_FULL:
1824 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1828 ** XXX: These need to use the proper media types once
1831 #ifndef IFM_ETH_XTYPE
1832 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1833 switch (adapter->link_speed) {
1834 case IXGBE_LINK_SPEED_10GB_FULL:
1835 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1837 case IXGBE_LINK_SPEED_2_5GB_FULL:
1838 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1840 case IXGBE_LINK_SPEED_1GB_FULL:
1841 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1844 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1845 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1846 switch (adapter->link_speed) {
1847 case IXGBE_LINK_SPEED_10GB_FULL:
1848 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1850 case IXGBE_LINK_SPEED_2_5GB_FULL:
1851 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1853 case IXGBE_LINK_SPEED_1GB_FULL:
1854 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1858 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1859 switch (adapter->link_speed) {
1860 case IXGBE_LINK_SPEED_10GB_FULL:
1861 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
1863 case IXGBE_LINK_SPEED_2_5GB_FULL:
1864 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1866 case IXGBE_LINK_SPEED_1GB_FULL:
1867 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1870 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1871 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1872 switch (adapter->link_speed) {
1873 case IXGBE_LINK_SPEED_10GB_FULL:
1874 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
1876 case IXGBE_LINK_SPEED_2_5GB_FULL:
1877 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1879 case IXGBE_LINK_SPEED_1GB_FULL:
1880 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1885 /* If nothing is recognized... */
1886 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1887 ifmr->ifm_active |= IFM_UNKNOWN;
1889 #if __FreeBSD_version >= 900025
1890 /* Display current flow control setting used on link */
1891 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1892 hw->fc.current_mode == ixgbe_fc_full)
1893 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1894 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1895 hw->fc.current_mode == ixgbe_fc_full)
1896 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1899 IXGBE_CORE_UNLOCK(adapter);
1904 /*********************************************************************
1906 * Media Ioctl callback
1908 * This routine is called when the user changes speed/duplex using
1909 * media/mediopt option with ifconfig.
1911 **********************************************************************/
1913 ixgbe_media_change(struct ifnet * ifp)
1915 struct adapter *adapter = ifp->if_softc;
1916 struct ifmedia *ifm = &adapter->media;
1917 struct ixgbe_hw *hw = &adapter->hw;
1918 ixgbe_link_speed speed = 0;
1920 INIT_DEBUGOUT("ixgbe_media_change: begin");
1922 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1925 if (hw->phy.media_type == ixgbe_media_type_backplane)
1929 ** We don't actually need to check against the supported
1930 ** media types of the adapter; ifmedia will take care of
1933 #ifndef IFM_ETH_XTYPE
1934 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1937 speed |= IXGBE_LINK_SPEED_100_FULL;
1939 case IFM_10G_SR: /* KR, too */
1941 case IFM_10G_CX4: /* KX4 */
1942 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1943 case IFM_10G_TWINAX:
1944 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1947 speed |= IXGBE_LINK_SPEED_100_FULL;
1950 case IFM_1000_CX: /* KX */
1951 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1954 speed |= IXGBE_LINK_SPEED_100_FULL;
1960 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1963 speed |= IXGBE_LINK_SPEED_100_FULL;
1968 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1969 case IFM_10G_TWINAX:
1970 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1973 speed |= IXGBE_LINK_SPEED_100_FULL;
1977 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1980 speed |= IXGBE_LINK_SPEED_100_FULL;
1987 hw->mac.autotry_restart = TRUE;
1988 hw->mac.ops.setup_link(hw, speed, TRUE);
1989 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1990 adapter->advertise = 0;
1992 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
1993 adapter->advertise |= 1 << 2;
1994 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
1995 adapter->advertise |= 1 << 1;
1996 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
1997 adapter->advertise |= 1 << 0;
2003 device_printf(adapter->dev, "Invalid media type!\n");
2008 ixgbe_set_promisc(struct adapter *adapter)
2011 struct ifnet *ifp = adapter->ifp;
2014 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2015 reg_rctl &= (~IXGBE_FCTRL_UPE);
2016 if (ifp->if_flags & IFF_ALLMULTI)
2017 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2019 struct ifmultiaddr *ifma;
2020 #if __FreeBSD_version < 800000
2023 if_maddr_rlock(ifp);
2025 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2026 if (ifma->ifma_addr->sa_family != AF_LINK)
2028 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2032 #if __FreeBSD_version < 800000
2033 IF_ADDR_UNLOCK(ifp);
2035 if_maddr_runlock(ifp);
2038 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2039 reg_rctl &= (~IXGBE_FCTRL_MPE);
2040 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2042 if (ifp->if_flags & IFF_PROMISC) {
2043 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2044 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2045 } else if (ifp->if_flags & IFF_ALLMULTI) {
2046 reg_rctl |= IXGBE_FCTRL_MPE;
2047 reg_rctl &= ~IXGBE_FCTRL_UPE;
2048 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2054 /*********************************************************************
2057 * This routine is called whenever multicast address list is updated.
2059 **********************************************************************/
2060 #define IXGBE_RAR_ENTRIES 16
2063 ixgbe_set_multi(struct adapter *adapter)
2067 struct ifmultiaddr *ifma;
2068 struct ixgbe_mc_addr *mta;
2070 struct ifnet *ifp = adapter->ifp;
2072 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
2075 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
2077 #if __FreeBSD_version < 800000
2080 if_maddr_rlock(ifp);
2082 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2083 if (ifma->ifma_addr->sa_family != AF_LINK)
2085 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2087 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2088 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2089 mta[mcnt].vmdq = adapter->pool;
2092 #if __FreeBSD_version < 800000
2093 IF_ADDR_UNLOCK(ifp);
2095 if_maddr_runlock(ifp);
2098 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2099 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2100 if (ifp->if_flags & IFF_PROMISC)
2101 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2102 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
2103 ifp->if_flags & IFF_ALLMULTI) {
2104 fctrl |= IXGBE_FCTRL_MPE;
2105 fctrl &= ~IXGBE_FCTRL_UPE;
2107 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2109 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2111 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
2112 update_ptr = (u8 *)mta;
2113 ixgbe_update_mc_addr_list(&adapter->hw,
2114 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
2121 * This is an iterator function now needed by the multicast
2122 * shared code. It simply feeds the shared code routine the
2123 * addresses in the array of ixgbe_set_multi() one by one.
2126 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
2128 struct ixgbe_mc_addr *mta;
2130 mta = (struct ixgbe_mc_addr *)*update_ptr;
2133 *update_ptr = (u8*)(mta + 1);;
2138 /*********************************************************************
2141 * This routine checks for link status,updates statistics,
2142 * and runs the watchdog check.
2144 **********************************************************************/
2147 ixgbe_local_timer(void *arg)
2149 struct adapter *adapter = arg;
2150 device_t dev = adapter->dev;
2151 struct ix_queue *que = adapter->queues;
2155 mtx_assert(&adapter->core_mtx, MA_OWNED);
2157 /* Check for pluggable optics */
2158 if (adapter->sfp_probe)
2159 if (!ixgbe_sfp_probe(adapter))
2160 goto out; /* Nothing to do */
2162 ixgbe_update_link_status(adapter);
2163 ixgbe_update_stats_counters(adapter);
2166 ** Check the TX queues status
2167 ** - mark hung queues so we don't schedule on them
2168 ** - watchdog only if all queues show hung
2170 for (int i = 0; i < adapter->num_queues; i++, que++) {
2171 /* Keep track of queues with work for soft irq */
2173 queues |= ((u64)1 << que->me);
2175 ** Each time txeof runs without cleaning, but there
2176 ** are uncleaned descriptors it increments busy. If
2177 ** we get to the MAX we declare it hung.
2179 if (que->busy == IXGBE_QUEUE_HUNG) {
2181 /* Mark the queue as inactive */
2182 adapter->active_queues &= ~((u64)1 << que->me);
2185 /* Check if we've come back from hung */
2186 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2187 adapter->active_queues |= ((u64)1 << que->me);
2189 if (que->busy >= IXGBE_MAX_TX_BUSY) {
2190 device_printf(dev,"Warning queue %d "
2191 "appears to be hung!\n", i);
2192 que->txr->busy = IXGBE_QUEUE_HUNG;
2198 /* Only truly watchdog if all queues show hung */
2199 if (hung == adapter->num_queues)
2201 else if (queues != 0) { /* Force an IRQ on queues with work */
2202 ixgbe_rearm_queues(adapter, queues);
2206 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2210 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2211 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2212 adapter->watchdog_events++;
2213 ixgbe_init_locked(adapter);
2218 ** Note: this routine updates the OS on the link state
2219 ** the real check of the hardware only happens with
2220 ** a link interrupt.
2223 ixgbe_update_link_status(struct adapter *adapter)
2225 struct ifnet *ifp = adapter->ifp;
2226 device_t dev = adapter->dev;
2228 if (adapter->link_up){
2229 if (adapter->link_active == FALSE) {
2231 device_printf(dev,"Link is up %d Gbps %s \n",
2232 ((adapter->link_speed == 128)? 10:1),
2234 adapter->link_active = TRUE;
2235 /* Update any Flow Control changes */
2236 ixgbe_fc_enable(&adapter->hw);
2237 /* Update DMA coalescing config */
2238 ixgbe_config_dmac(adapter);
2239 if_link_state_change(ifp, LINK_STATE_UP);
2241 ixgbe_ping_all_vfs(adapter);
2244 } else { /* Link down */
2245 if (adapter->link_active == TRUE) {
2247 device_printf(dev,"Link is Down\n");
2248 if_link_state_change(ifp, LINK_STATE_DOWN);
2249 adapter->link_active = FALSE;
2251 ixgbe_ping_all_vfs(adapter);
2260 /*********************************************************************
2262 * This routine disables all traffic on the adapter by issuing a
2263 * global reset on the MAC and deallocates TX/RX buffers.
2265 **********************************************************************/
2268 ixgbe_stop(void *arg)
2271 struct adapter *adapter = arg;
2272 struct ixgbe_hw *hw = &adapter->hw;
2275 mtx_assert(&adapter->core_mtx, MA_OWNED);
2277 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2278 ixgbe_disable_intr(adapter);
2279 callout_stop(&adapter->timer);
2281 /* Let the stack know...*/
2282 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2285 hw->adapter_stopped = FALSE;
2286 ixgbe_stop_adapter(hw);
2287 if (hw->mac.type == ixgbe_mac_82599EB)
2288 ixgbe_stop_mac_link_on_d3_82599(hw);
2289 /* Turn off the laser - noop with no optics */
2290 ixgbe_disable_tx_laser(hw);
2292 /* Update the stack */
2293 adapter->link_up = FALSE;
2294 ixgbe_update_link_status(adapter);
2296 /* reprogram the RAR[0] in case user changed it. */
2297 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2303 /*********************************************************************
2305 * Determine hardware revision.
2307 **********************************************************************/
2309 ixgbe_identify_hardware(struct adapter *adapter)
2311 device_t dev = adapter->dev;
2312 struct ixgbe_hw *hw = &adapter->hw;
2314 /* Save off the information about this board */
2315 hw->vendor_id = pci_get_vendor(dev);
2316 hw->device_id = pci_get_device(dev);
2317 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2318 hw->subsystem_vendor_id =
2319 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2320 hw->subsystem_device_id =
2321 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2324 ** Make sure BUSMASTER is set
2326 pci_enable_busmaster(dev);
2328 /* We need this here to set the num_segs below */
2329 ixgbe_set_mac_type(hw);
2331 /* Pick up the 82599 settings */
2332 if (hw->mac.type != ixgbe_mac_82598EB) {
2333 hw->phy.smart_speed = ixgbe_smart_speed;
2334 adapter->num_segs = IXGBE_82599_SCATTER;
2336 adapter->num_segs = IXGBE_82598_SCATTER;
2341 /*********************************************************************
2343 * Determine optic type
2345 **********************************************************************/
2347 ixgbe_setup_optics(struct adapter *adapter)
2349 struct ixgbe_hw *hw = &adapter->hw;
2352 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2354 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2355 adapter->optics = IFM_10G_T;
2359 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2360 adapter->optics = IFM_1000_T;
2364 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2365 adapter->optics = IFM_1000_SX;
2369 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2370 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2371 adapter->optics = IFM_10G_LR;
2375 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2376 adapter->optics = IFM_10G_SR;
2380 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2381 adapter->optics = IFM_10G_TWINAX;
2385 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2386 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2387 adapter->optics = IFM_10G_CX4;
2391 /* If we get here just set the default */
2392 adapter->optics = IFM_ETHER | IFM_AUTO;
2396 /*********************************************************************
2398 * Setup the Legacy or MSI Interrupt handler
2400 **********************************************************************/
2402 ixgbe_allocate_legacy(struct adapter *adapter)
2404 device_t dev = adapter->dev;
2405 struct ix_queue *que = adapter->queues;
2406 #ifndef IXGBE_LEGACY_TX
2407 struct tx_ring *txr = adapter->tx_rings;
2412 if (adapter->msix == 1)
2415 /* We allocate a single interrupt resource */
2416 adapter->res = bus_alloc_resource_any(dev,
2417 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2418 if (adapter->res == NULL) {
2419 device_printf(dev, "Unable to allocate bus resource: "
2425 * Try allocating a fast interrupt and the associated deferred
2426 * processing contexts.
2428 #ifndef IXGBE_LEGACY_TX
2429 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2431 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2432 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2433 taskqueue_thread_enqueue, &que->tq);
2434 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2435 device_get_nameunit(adapter->dev));
2437 /* Tasklets for Link, SFP and Multispeed Fiber */
2438 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2439 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2440 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2441 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2443 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2445 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2446 taskqueue_thread_enqueue, &adapter->tq);
2447 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2448 device_get_nameunit(adapter->dev));
2450 if ((error = bus_setup_intr(dev, adapter->res,
2451 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2452 que, &adapter->tag)) != 0) {
2453 device_printf(dev, "Failed to register fast interrupt "
2454 "handler: %d\n", error);
2455 taskqueue_free(que->tq);
2456 taskqueue_free(adapter->tq);
2461 /* For simplicity in the handlers */
2462 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2468 /*********************************************************************
2470 * Setup MSIX Interrupt resources and handlers
2472 **********************************************************************/
2474 ixgbe_allocate_msix(struct adapter *adapter)
2476 device_t dev = adapter->dev;
2477 struct ix_queue *que = adapter->queues;
2478 struct tx_ring *txr = adapter->tx_rings;
2479 int error, rid, vector = 0;
2487 * If we're doing RSS, the number of queues needs to
2488 * match the number of RSS buckets that are configured.
2490 * + If there's more queues than RSS buckets, we'll end
2491 * up with queues that get no traffic.
2493 * + If there's more RSS buckets than queues, we'll end
2494 * up having multiple RSS buckets map to the same queue,
2495 * so there'll be some contention.
2497 if (adapter->num_queues != rss_getnumbuckets()) {
2499 "%s: number of queues (%d) != number of RSS buckets (%d)"
2500 "; performance will be impacted.\n",
2502 adapter->num_queues,
2503 rss_getnumbuckets());
2507 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2509 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2510 RF_SHAREABLE | RF_ACTIVE);
2511 if (que->res == NULL) {
2512 device_printf(dev,"Unable to allocate"
2513 " bus resource: que interrupt [%d]\n", vector);
2516 /* Set the handler function */
2517 error = bus_setup_intr(dev, que->res,
2518 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2519 ixgbe_msix_que, que, &que->tag);
2522 device_printf(dev, "Failed to register QUE handler");
2525 #if __FreeBSD_version >= 800504
2526 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2529 adapter->active_queues |= (u64)(1 << que->msix);
2532 * The queue ID is used as the RSS layer bucket ID.
2533 * We look up the queue ID -> RSS CPU ID and select
2536 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2539 * Bind the msix vector, and thus the
2540 * rings to the corresponding cpu.
2542 * This just happens to match the default RSS round-robin
2543 * bucket -> queue -> CPU allocation.
2545 if (adapter->num_queues > 1)
2548 if (adapter->num_queues > 1)
2549 bus_bind_intr(dev, que->res, cpu_id);
2553 "Bound RSS bucket %d to CPU %d\n",
2557 "Bound queue %d to cpu %d\n",
2560 #endif /* IXGBE_DEBUG */
2563 #ifndef IXGBE_LEGACY_TX
2564 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2566 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2567 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2568 taskqueue_thread_enqueue, &que->tq);
2570 CPU_SETOF(cpu_id, &cpu_mask);
2571 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2574 device_get_nameunit(adapter->dev),
2577 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
2578 device_get_nameunit(adapter->dev), i);
2584 adapter->res = bus_alloc_resource_any(dev,
2585 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2586 if (!adapter->res) {
2587 device_printf(dev,"Unable to allocate"
2588 " bus resource: Link interrupt [%d]\n", rid);
2591 /* Set the link handler function */
2592 error = bus_setup_intr(dev, adapter->res,
2593 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2594 ixgbe_msix_link, adapter, &adapter->tag);
2596 adapter->res = NULL;
2597 device_printf(dev, "Failed to register LINK handler");
2600 #if __FreeBSD_version >= 800504
2601 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2603 adapter->vector = vector;
2604 /* Tasklets for Link, SFP and Multispeed Fiber */
2605 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2606 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2607 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2609 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2611 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2613 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2615 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2616 taskqueue_thread_enqueue, &adapter->tq);
2617 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2618 device_get_nameunit(adapter->dev));
2624 * Setup Either MSI/X or MSI
2627 ixgbe_setup_msix(struct adapter *adapter)
2629 device_t dev = adapter->dev;
2630 int rid, want, queues, msgs;
2632 /* Override by tuneable */
2633 if (ixgbe_enable_msix == 0)
2636 /* First try MSI/X */
2637 msgs = pci_msix_count(dev);
2640 rid = PCIR_BAR(MSIX_82598_BAR);
2641 adapter->msix_mem = bus_alloc_resource_any(dev,
2642 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2643 if (adapter->msix_mem == NULL) {
2644 rid += 4; /* 82599 maps in higher BAR */
2645 adapter->msix_mem = bus_alloc_resource_any(dev,
2646 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2648 if (adapter->msix_mem == NULL) {
2649 /* May not be enabled */
2650 device_printf(adapter->dev,
2651 "Unable to map MSIX table \n");
2655 /* Figure out a reasonable auto config value */
2656 queues = (mp_ncpus > (msgs - 1)) ? (msgs - 1) : mp_ncpus;
2659 /* If we're doing RSS, clamp at the number of RSS buckets */
2660 if (queues > rss_getnumbuckets())
2661 queues = rss_getnumbuckets();
2664 if (ixgbe_num_queues != 0)
2665 queues = ixgbe_num_queues;
2666 /* Set max queues to 8 when autoconfiguring */
2667 else if ((ixgbe_num_queues == 0) && (queues > 8))
2670 /* reflect correct sysctl value */
2671 ixgbe_num_queues = queues;
2674 ** Want one vector (RX/TX pair) per queue
2675 ** plus an additional for Link.
2681 device_printf(adapter->dev,
2682 "MSIX Configuration Problem, "
2683 "%d vectors but %d queues wanted!\n",
2687 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2688 device_printf(adapter->dev,
2689 "Using MSIX interrupts with %d vectors\n", msgs);
2690 adapter->num_queues = queues;
2694 ** If MSIX alloc failed or provided us with
2695 ** less than needed, free and fall through to MSI
2697 pci_release_msi(dev);
2700 if (adapter->msix_mem != NULL) {
2701 bus_release_resource(dev, SYS_RES_MEMORY,
2702 rid, adapter->msix_mem);
2703 adapter->msix_mem = NULL;
2706 if (pci_alloc_msi(dev, &msgs) == 0) {
2707 device_printf(adapter->dev, "Using an MSI interrupt\n");
2710 device_printf(adapter->dev, "Using a Legacy interrupt\n");
2716 ixgbe_allocate_pci_resources(struct adapter *adapter)
2719 device_t dev = adapter->dev;
2722 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2725 if (!(adapter->pci_mem)) {
2726 device_printf(dev, "Unable to allocate bus resource: memory\n");
2730 /* Save bus_space values for READ/WRITE_REG macros */
2731 adapter->osdep.mem_bus_space_tag =
2732 rman_get_bustag(adapter->pci_mem);
2733 adapter->osdep.mem_bus_space_handle =
2734 rman_get_bushandle(adapter->pci_mem);
2735 /* Set hw values for shared code */
2736 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2737 adapter->hw.back = adapter;
2739 /* Default to 1 queue if MSI-X setup fails */
2740 adapter->num_queues = 1;
2743 ** Now setup MSI or MSI-X, should
2744 ** return us the number of supported
2745 ** vectors. (Will be 1 for MSI)
2747 adapter->msix = ixgbe_setup_msix(adapter);
2752 ixgbe_free_pci_resources(struct adapter * adapter)
2754 struct ix_queue *que = adapter->queues;
2755 device_t dev = adapter->dev;
2758 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2759 memrid = PCIR_BAR(MSIX_82598_BAR);
2761 memrid = PCIR_BAR(MSIX_82599_BAR);
2764 ** There is a slight possibility of a failure mode
2765 ** in attach that will result in entering this function
2766 ** before interrupt resources have been initialized, and
2767 ** in that case we do not want to execute the loops below
2768 ** We can detect this reliably by the state of the adapter
2771 if (adapter->res == NULL)
2775 ** Release all msix queue resources:
2777 for (int i = 0; i < adapter->num_queues; i++, que++) {
2778 rid = que->msix + 1;
2779 if (que->tag != NULL) {
2780 bus_teardown_intr(dev, que->res, que->tag);
2783 if (que->res != NULL)
2784 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2788 /* Clean the Legacy or Link interrupt last */
2789 if (adapter->vector) /* we are doing MSIX */
2790 rid = adapter->vector + 1;
2792 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2794 if (adapter->tag != NULL) {
2795 bus_teardown_intr(dev, adapter->res, adapter->tag);
2796 adapter->tag = NULL;
2798 if (adapter->res != NULL)
2799 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2803 pci_release_msi(dev);
2805 if (adapter->msix_mem != NULL)
2806 bus_release_resource(dev, SYS_RES_MEMORY,
2807 memrid, adapter->msix_mem);
2809 if (adapter->pci_mem != NULL)
2810 bus_release_resource(dev, SYS_RES_MEMORY,
2811 PCIR_BAR(0), adapter->pci_mem);
2816 /*********************************************************************
2818 * Setup networking device structure and register an interface.
2820 **********************************************************************/
2822 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2826 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2828 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2830 device_printf(dev, "can not allocate ifnet structure\n");
2833 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2834 if_initbaudrate(ifp, IF_Gbps(10));
2835 ifp->if_init = ixgbe_init;
2836 ifp->if_softc = adapter;
2837 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2838 ifp->if_ioctl = ixgbe_ioctl;
2839 #if __FreeBSD_version >= 1100036
2840 if_setgetcounterfn(ifp, ixgbe_get_counter);
2842 #if __FreeBSD_version >= 1100045
2843 /* TSO parameters */
2844 ifp->if_hw_tsomax = 65518;
2845 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2846 ifp->if_hw_tsomaxsegsize = 2048;
2848 #ifndef IXGBE_LEGACY_TX
2849 ifp->if_transmit = ixgbe_mq_start;
2850 ifp->if_qflush = ixgbe_qflush;
2852 ifp->if_start = ixgbe_start;
2853 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2854 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2855 IFQ_SET_READY(&ifp->if_snd);
2858 ether_ifattach(ifp, adapter->hw.mac.addr);
2860 adapter->max_frame_size =
2861 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2864 * Tell the upper layer(s) we support long frames.
2866 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2868 /* Set capability flags */
2869 ifp->if_capabilities |= IFCAP_RXCSUM
2876 | IFCAP_VLAN_HWTAGGING
2883 /* Enable the above capabilities by default */
2884 ifp->if_capenable = ifp->if_capabilities;
2887 ** Don't turn this on by default, if vlans are
2888 ** created on another pseudo device (eg. lagg)
2889 ** then vlan events are not passed thru, breaking
2890 ** operation, but with HW FILTER off it works. If
2891 ** using vlans directly on the ixgbe driver you can
2892 ** enable this and get full hardware tag filtering.
2894 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2897 * Specify the media types supported by this adapter and register
2898 * callbacks to update media and link information
2900 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2901 ixgbe_media_status);
2903 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
2904 ixgbe_add_media_types(adapter);
2906 /* Set autoselect media by default */
2907 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2913 ixgbe_add_media_types(struct adapter *adapter)
2915 struct ixgbe_hw *hw = &adapter->hw;
2916 device_t dev = adapter->dev;
2919 layer = adapter->phy_layer;
2921 /* Media types with matching FreeBSD media defines */
2922 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2923 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2924 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2925 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2926 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2927 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2929 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2930 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2931 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2933 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
2934 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2935 if (hw->phy.multispeed_fiber)
2936 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2938 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2939 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2940 if (hw->phy.multispeed_fiber)
2941 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2942 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2943 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2944 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2945 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2947 #ifdef IFM_ETH_XTYPE
2948 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2949 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2950 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
2951 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2952 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2953 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2955 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2956 device_printf(dev, "Media supported: 10GbaseKR\n");
2957 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2958 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2960 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2961 device_printf(dev, "Media supported: 10GbaseKX4\n");
2962 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2963 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2965 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2966 device_printf(dev, "Media supported: 1000baseKX\n");
2967 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2968 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2971 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
2972 device_printf(dev, "Media supported: 1000baseBX\n");
2974 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2975 ifmedia_add(&adapter->media,
2976 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2977 ifmedia_add(&adapter->media,
2978 IFM_ETHER | IFM_1000_T, 0, NULL);
2981 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2985 ixgbe_config_link(struct adapter *adapter)
2987 struct ixgbe_hw *hw = &adapter->hw;
2988 u32 autoneg, err = 0;
2989 bool sfp, negotiate;
2991 sfp = ixgbe_is_sfp(hw);
2994 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2996 if (hw->mac.ops.check_link)
2997 err = ixgbe_check_link(hw, &adapter->link_speed,
2998 &adapter->link_up, FALSE);
3001 autoneg = hw->phy.autoneg_advertised;
3002 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3003 err = hw->mac.ops.get_link_capabilities(hw,
3004 &autoneg, &negotiate);
3007 if (hw->mac.ops.setup_link)
3008 err = hw->mac.ops.setup_link(hw,
3009 autoneg, adapter->link_up);
3016 /*********************************************************************
3018 * Enable transmit units.
3020 **********************************************************************/
3022 ixgbe_initialize_transmit_units(struct adapter *adapter)
3024 struct tx_ring *txr = adapter->tx_rings;
3025 struct ixgbe_hw *hw = &adapter->hw;
3027 /* Setup the Base and Length of the Tx Descriptor Ring */
3028 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3029 u64 tdba = txr->txdma.dma_paddr;
3033 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
3034 (tdba & 0x00000000ffffffffULL));
3035 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
3036 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
3037 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
3039 /* Setup the HW Tx Head and Tail descriptor pointers */
3040 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
3041 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
3043 /* Cache the tail address */
3044 txr->tail = IXGBE_TDT(j);
3046 /* Disable Head Writeback */
3048 * Note: for X550 series devices, these registers are actually
3049 * prefixed with TPH_ isntead of DCA_, but the addresses and
3050 * fields remain the same.
3052 switch (hw->mac.type) {
3053 case ixgbe_mac_82598EB:
3054 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
3057 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
3060 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3061 switch (hw->mac.type) {
3062 case ixgbe_mac_82598EB:
3063 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
3066 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
3072 if (hw->mac.type != ixgbe_mac_82598EB) {
3073 u32 dmatxctl, rttdcs;
3075 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
3077 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3078 dmatxctl |= IXGBE_DMATXCTL_TE;
3079 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3080 /* Disable arbiter to set MTQC */
3081 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3082 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3083 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3085 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
3087 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
3089 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3090 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3097 ixgbe_initialize_rss_mapping(struct adapter *adapter)
3099 struct ixgbe_hw *hw = &adapter->hw;
3100 u32 reta = 0, mrqc, rss_key[10];
3101 int queue_id, table_size, index_mult;
3103 u32 rss_hash_config;
3106 enum ixgbe_iov_mode mode;
3110 /* Fetch the configured RSS key */
3111 rss_getkey((uint8_t *) &rss_key);
3113 /* set up random bits */
3114 arc4rand(&rss_key, sizeof(rss_key), 0);
3117 /* Set multiplier for RETA setup and table size based on MAC */
3120 switch (adapter->hw.mac.type) {
3121 case ixgbe_mac_82598EB:
3124 case ixgbe_mac_X550:
3125 case ixgbe_mac_X550EM_x:
3132 /* Set up the redirection table */
3133 for (int i = 0, j = 0; i < table_size; i++, j++) {
3134 if (j == adapter->num_queues) j = 0;
3137 * Fetch the RSS bucket id for the given indirection entry.
3138 * Cap it at the number of configured buckets (which is
3141 queue_id = rss_get_indirection_to_bucket(i);
3142 queue_id = queue_id % adapter->num_queues;
3144 queue_id = (j * index_mult);
3147 * The low 8 bits are for hash value (n+0);
3148 * The next 8 bits are for hash value (n+1), etc.
3151 reta = reta | ( ((uint32_t) queue_id) << 24);
3154 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3156 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
3161 /* Now fill our hash function seeds */
3162 for (int i = 0; i < 10; i++)
3163 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3165 /* Perform hash on these packet types */
3167 mrqc = IXGBE_MRQC_RSSEN;
3168 rss_hash_config = rss_gethashconfig();
3169 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3170 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3171 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3172 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3173 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3174 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3175 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3176 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3177 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3178 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3179 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3180 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3181 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3182 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3183 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3184 device_printf(adapter->dev,
3185 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
3186 "but not supported\n", __func__);
3187 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3188 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3189 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3190 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3193 * Disable UDP - IP fragments aren't currently being handled
3194 * and so we end up with a mix of 2-tuple and 4-tuple
3197 mrqc = IXGBE_MRQC_RSSEN
3198 | IXGBE_MRQC_RSS_FIELD_IPV4
3199 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3200 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3201 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3202 | IXGBE_MRQC_RSS_FIELD_IPV6
3203 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3207 mode = ixgbe_get_iov_mode(adapter);
3208 mrqc |= ixgbe_get_mrqc(mode);
3210 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3214 /*********************************************************************
3216 * Setup receive registers and features.
3218 **********************************************************************/
3219 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3221 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3224 ixgbe_initialize_receive_units(struct adapter *adapter)
3226 struct rx_ring *rxr = adapter->rx_rings;
3227 struct ixgbe_hw *hw = &adapter->hw;
3228 struct ifnet *ifp = adapter->ifp;
3229 u32 bufsz, fctrl, srrctl, rxcsum;
3233 * Make sure receives are disabled while
3234 * setting up the descriptor ring
3236 ixgbe_disable_rx(hw);
3238 /* Enable broadcasts */
3239 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3240 fctrl |= IXGBE_FCTRL_BAM;
3241 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3242 fctrl |= IXGBE_FCTRL_DPF;
3243 fctrl |= IXGBE_FCTRL_PMCF;
3245 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3247 /* Set for Jumbo Frames? */
3248 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3249 if (ifp->if_mtu > ETHERMTU)
3250 hlreg |= IXGBE_HLREG0_JUMBOEN;
3252 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3254 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
3255 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3256 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3258 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3259 #endif /* DEV_NETMAP */
3260 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3262 bufsz = (adapter->rx_mbuf_sz +
3263 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3265 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3266 u64 rdba = rxr->rxdma.dma_paddr;
3269 /* Setup the Base and Length of the Rx Descriptor Ring */
3270 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3271 (rdba & 0x00000000ffffffffULL));
3272 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3273 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3274 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3276 /* Set up the SRRCTL register */
3277 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3278 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3279 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3281 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3284 * Set DROP_EN iff we have no flow control and >1 queue.
3285 * Note that srrctl was cleared shortly before during reset,
3286 * so we do not need to clear the bit, but do it just in case
3287 * this code is moved elsewhere.
3289 if (adapter->num_queues > 1 &&
3290 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3291 srrctl |= IXGBE_SRRCTL_DROP_EN;
3293 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3296 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3298 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3299 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3300 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3302 /* Set the driver rx tail address */
3303 rxr->tail = IXGBE_RDT(rxr->me);
3306 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3307 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3308 IXGBE_PSRTYPE_UDPHDR |
3309 IXGBE_PSRTYPE_IPV4HDR |
3310 IXGBE_PSRTYPE_IPV6HDR;
3311 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3314 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3316 ixgbe_initialize_rss_mapping(adapter);
3318 if (adapter->num_queues > 1) {
3319 /* RSS and RX IPP Checksum are mutually exclusive */
3320 rxcsum |= IXGBE_RXCSUM_PCSD;
3323 if (ifp->if_capenable & IFCAP_RXCSUM)
3324 rxcsum |= IXGBE_RXCSUM_PCSD;
3326 /* This is useful for calculating UDP/IP fragment checksums */
3327 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3328 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3330 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3337 ** This routine is run via an vlan config EVENT,
3338 ** it enables us to use the HW Filter table since
3339 ** we can get the vlan id. This just creates the
3340 ** entry in the soft version of the VFTA, init will
3341 ** repopulate the real table.
3344 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3346 struct adapter *adapter = ifp->if_softc;
3349 if (ifp->if_softc != arg) /* Not our event */
3352 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3355 IXGBE_CORE_LOCK(adapter);
3356 index = (vtag >> 5) & 0x7F;
3358 adapter->shadow_vfta[index] |= (1 << bit);
3359 ++adapter->num_vlans;
3360 ixgbe_setup_vlan_hw_support(adapter);
3361 IXGBE_CORE_UNLOCK(adapter);
3365 ** This routine is run via an vlan
3366 ** unconfig EVENT, remove our entry
3367 ** in the soft vfta.
3370 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3372 struct adapter *adapter = ifp->if_softc;
3375 if (ifp->if_softc != arg)
3378 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3381 IXGBE_CORE_LOCK(adapter);
3382 index = (vtag >> 5) & 0x7F;
3384 adapter->shadow_vfta[index] &= ~(1 << bit);
3385 --adapter->num_vlans;
3386 /* Re-init to load the changes */
3387 ixgbe_setup_vlan_hw_support(adapter);
3388 IXGBE_CORE_UNLOCK(adapter);
3392 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3394 struct ifnet *ifp = adapter->ifp;
3395 struct ixgbe_hw *hw = &adapter->hw;
3396 struct rx_ring *rxr;
3401 ** We get here thru init_locked, meaning
3402 ** a soft reset, this has already cleared
3403 ** the VFTA and other state, so if there
3404 ** have been no vlan's registered do nothing.
3406 if (adapter->num_vlans == 0)
3409 /* Setup the queues for vlans */
3410 for (int i = 0; i < adapter->num_queues; i++) {
3411 rxr = &adapter->rx_rings[i];
3412 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3413 if (hw->mac.type != ixgbe_mac_82598EB) {
3414 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3415 ctrl |= IXGBE_RXDCTL_VME;
3416 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3418 rxr->vtag_strip = TRUE;
3421 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3424 ** A soft reset zero's out the VFTA, so
3425 ** we need to repopulate it now.
3427 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3428 if (adapter->shadow_vfta[i] != 0)
3429 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3430 adapter->shadow_vfta[i]);
3432 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3433 /* Enable the Filter Table if enabled */
3434 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3435 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3436 ctrl |= IXGBE_VLNCTRL_VFE;
3438 if (hw->mac.type == ixgbe_mac_82598EB)
3439 ctrl |= IXGBE_VLNCTRL_VME;
3440 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3444 ixgbe_enable_intr(struct adapter *adapter)
3446 struct ixgbe_hw *hw = &adapter->hw;
3447 struct ix_queue *que = adapter->queues;
3450 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3451 /* Enable Fan Failure detection */
3452 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3453 mask |= IXGBE_EIMS_GPI_SDP1;
3455 switch (adapter->hw.mac.type) {
3456 case ixgbe_mac_82599EB:
3457 mask |= IXGBE_EIMS_ECC;
3458 /* Temperature sensor on some adapters */
3459 mask |= IXGBE_EIMS_GPI_SDP0;
3460 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3461 mask |= IXGBE_EIMS_GPI_SDP1;
3462 mask |= IXGBE_EIMS_GPI_SDP2;
3464 mask |= IXGBE_EIMS_FLOW_DIR;
3467 mask |= IXGBE_EIMS_MAILBOX;
3470 case ixgbe_mac_X540:
3471 /* Detect if Thermal Sensor is enabled */
3472 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3473 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3474 mask |= IXGBE_EIMS_TS;
3475 mask |= IXGBE_EIMS_ECC;
3477 mask |= IXGBE_EIMS_FLOW_DIR;
3480 case ixgbe_mac_X550:
3481 case ixgbe_mac_X550EM_x:
3482 /* MAC thermal sensor is automatically enabled */
3483 mask |= IXGBE_EIMS_TS;
3484 /* Some devices use SDP0 for important information */
3485 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3486 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3487 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3488 mask |= IXGBE_EIMS_ECC;
3490 mask |= IXGBE_EIMS_FLOW_DIR;
3493 mask |= IXGBE_EIMS_MAILBOX;
3500 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3502 /* With MSI-X we use auto clear */
3503 if (adapter->msix_mem) {
3504 mask = IXGBE_EIMS_ENABLE_MASK;
3505 /* Don't autoclear Link */
3506 mask &= ~IXGBE_EIMS_OTHER;
3507 mask &= ~IXGBE_EIMS_LSC;
3509 mask &= ~IXGBE_EIMS_MAILBOX;
3511 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3515 ** Now enable all queues, this is done separately to
3516 ** allow for handling the extended (beyond 32) MSIX
3517 ** vectors that can be used by 82599
3519 for (int i = 0; i < adapter->num_queues; i++, que++)
3520 ixgbe_enable_queue(adapter, que->msix);
3522 IXGBE_WRITE_FLUSH(hw);
3528 ixgbe_disable_intr(struct adapter *adapter)
3530 if (adapter->msix_mem)
3531 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3532 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3536 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3539 IXGBE_WRITE_FLUSH(&adapter->hw);
3544 ** Get the width and transaction speed of
3545 ** the slot this adapter is plugged into.
3548 ixgbe_get_slot_info(struct adapter *adapter)
3550 device_t dev = adapter->dev;
3551 struct ixgbe_hw *hw = &adapter->hw;
3552 struct ixgbe_mac_info *mac = &hw->mac;
3556 /* For most devices simply call the shared code routine */
3557 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3558 ixgbe_get_bus_info(hw);
3559 /* These devices don't use PCI-E */
3560 switch (hw->mac.type) {
3561 case ixgbe_mac_X550EM_x:
3569 ** For the Quad port adapter we need to parse back
3570 ** up the PCI tree to find the speed of the expansion
3571 ** slot into which this adapter is plugged. A bit more work.
3573 dev = device_get_parent(device_get_parent(dev));
3575 device_printf(dev, "parent pcib = %x,%x,%x\n",
3576 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3578 dev = device_get_parent(device_get_parent(dev));
3580 device_printf(dev, "slot pcib = %x,%x,%x\n",
3581 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3583 /* Now get the PCI Express Capabilities offset */
3584 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3585 /* ...and read the Link Status Register */
3586 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3587 switch (link & IXGBE_PCI_LINK_WIDTH) {
3588 case IXGBE_PCI_LINK_WIDTH_1:
3589 hw->bus.width = ixgbe_bus_width_pcie_x1;
3591 case IXGBE_PCI_LINK_WIDTH_2:
3592 hw->bus.width = ixgbe_bus_width_pcie_x2;
3594 case IXGBE_PCI_LINK_WIDTH_4:
3595 hw->bus.width = ixgbe_bus_width_pcie_x4;
3597 case IXGBE_PCI_LINK_WIDTH_8:
3598 hw->bus.width = ixgbe_bus_width_pcie_x8;
3601 hw->bus.width = ixgbe_bus_width_unknown;
3605 switch (link & IXGBE_PCI_LINK_SPEED) {
3606 case IXGBE_PCI_LINK_SPEED_2500:
3607 hw->bus.speed = ixgbe_bus_speed_2500;
3609 case IXGBE_PCI_LINK_SPEED_5000:
3610 hw->bus.speed = ixgbe_bus_speed_5000;
3612 case IXGBE_PCI_LINK_SPEED_8000:
3613 hw->bus.speed = ixgbe_bus_speed_8000;
3616 hw->bus.speed = ixgbe_bus_speed_unknown;
3620 mac->ops.set_lan_id(hw);
3623 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3624 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3625 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3626 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3627 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3628 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3629 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3632 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3633 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3634 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3635 device_printf(dev, "PCI-Express bandwidth available"
3636 " for this card\n is not sufficient for"
3637 " optimal performance.\n");
3638 device_printf(dev, "For optimal performance a x8 "
3639 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3641 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3642 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3643 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3644 device_printf(dev, "PCI-Express bandwidth available"
3645 " for this card\n is not sufficient for"
3646 " optimal performance.\n");
3647 device_printf(dev, "For optimal performance a x8 "
3648 "PCIE Gen3 slot is required.\n");
3656 ** Setup the correct IVAR register for a particular MSIX interrupt
3657 ** (yes this is all very magic and confusing :)
3658 ** - entry is the register array entry
3659 ** - vector is the MSIX vector for this queue
3660 ** - type is RX/TX/MISC
3663 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3665 struct ixgbe_hw *hw = &adapter->hw;
3668 vector |= IXGBE_IVAR_ALLOC_VAL;
3670 switch (hw->mac.type) {
3672 case ixgbe_mac_82598EB:
3674 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3676 entry += (type * 64);
3677 index = (entry >> 2) & 0x1F;
3678 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3679 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3680 ivar |= (vector << (8 * (entry & 0x3)));
3681 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3684 case ixgbe_mac_82599EB:
3685 case ixgbe_mac_X540:
3686 case ixgbe_mac_X550:
3687 case ixgbe_mac_X550EM_x:
3688 if (type == -1) { /* MISC IVAR */
3689 index = (entry & 1) * 8;
3690 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3691 ivar &= ~(0xFF << index);
3692 ivar |= (vector << index);
3693 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3694 } else { /* RX/TX IVARS */
3695 index = (16 * (entry & 1)) + (8 * type);
3696 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3697 ivar &= ~(0xFF << index);
3698 ivar |= (vector << index);
3699 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3708 ixgbe_configure_ivars(struct adapter *adapter)
3710 struct ix_queue *que = adapter->queues;
3713 if (ixgbe_max_interrupt_rate > 0)
3714 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3717 ** Disable DMA coalescing if interrupt moderation is
3724 for (int i = 0; i < adapter->num_queues; i++, que++) {
3725 struct rx_ring *rxr = &adapter->rx_rings[i];
3726 struct tx_ring *txr = &adapter->tx_rings[i];
3727 /* First the RX queue entry */
3728 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3729 /* ... and the TX */
3730 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3731 /* Set an Initial EITR value */
3732 IXGBE_WRITE_REG(&adapter->hw,
3733 IXGBE_EITR(que->msix), newitr);
3736 /* For the Link interrupt */
3737 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3741 ** ixgbe_sfp_probe - called in the local timer to
3742 ** determine if a port had optics inserted.
3745 ixgbe_sfp_probe(struct adapter *adapter)
3747 struct ixgbe_hw *hw = &adapter->hw;
3748 device_t dev = adapter->dev;
3749 bool result = FALSE;
3751 if ((hw->phy.type == ixgbe_phy_nl) &&
3752 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3753 s32 ret = hw->phy.ops.identify_sfp(hw);
3756 ret = hw->phy.ops.reset(hw);
3757 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3758 device_printf(dev, "Unsupported SFP+ module detected!");
3759 device_printf(dev, "Reload driver with supported module.\n");
3760 adapter->sfp_probe = FALSE;
3763 device_printf(dev, "SFP+ module detected!\n");
3764 /* We now have supported optics */
3765 adapter->sfp_probe = FALSE;
3766 /* Set the optics type so system reports correctly */
3767 ixgbe_setup_optics(adapter);
3775 ** Tasklet handler for MSIX Link interrupts
3776 ** - do outside interrupt since it might sleep
3779 ixgbe_handle_link(void *context, int pending)
3781 struct adapter *adapter = context;
3782 struct ixgbe_hw *hw = &adapter->hw;
3784 ixgbe_check_link(hw,
3785 &adapter->link_speed, &adapter->link_up, 0);
3786 ixgbe_update_link_status(adapter);
3788 /* Re-enable link interrupts */
3789 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3793 ** Tasklet for handling SFP module interrupts
3796 ixgbe_handle_mod(void *context, int pending)
3798 struct adapter *adapter = context;
3799 struct ixgbe_hw *hw = &adapter->hw;
3800 enum ixgbe_phy_type orig_type = hw->phy.type;
3801 device_t dev = adapter->dev;
3804 IXGBE_CORE_LOCK(adapter);
3806 /* Check to see if the PHY type changed */
3807 if (hw->phy.ops.identify) {
3808 hw->phy.type = ixgbe_phy_unknown;
3809 hw->phy.ops.identify(hw);
3812 if (hw->phy.type != orig_type) {
3813 device_printf(dev, "Detected phy_type %d\n", hw->phy.type);
3815 if (hw->phy.type == ixgbe_phy_none) {
3816 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
3820 /* Try to do the initialization that was skipped before */
3821 if (hw->phy.ops.init)
3822 hw->phy.ops.init(hw);
3823 if (hw->phy.ops.reset)
3824 hw->phy.ops.reset(hw);
3827 err = hw->phy.ops.identify_sfp(hw);
3828 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3830 "Unsupported SFP+ module type was detected.\n");
3834 err = hw->mac.ops.setup_sfp(hw);
3835 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3837 "Setup failure - unsupported SFP+ module type.\n");
3840 if (hw->phy.multispeed_fiber)
3841 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3843 /* Update media type */
3844 switch (hw->mac.ops.get_media_type(hw)) {
3845 case ixgbe_media_type_fiber:
3846 adapter->optics = IFM_10G_SR;
3848 case ixgbe_media_type_copper:
3849 adapter->optics = IFM_10G_TWINAX;
3851 case ixgbe_media_type_cx4:
3852 adapter->optics = IFM_10G_CX4;
3855 adapter->optics = 0;
3859 IXGBE_CORE_UNLOCK(adapter);
3865 ** Tasklet for handling MSF (multispeed fiber) interrupts
3868 ixgbe_handle_msf(void *context, int pending)
3870 struct adapter *adapter = context;
3871 struct ixgbe_hw *hw = &adapter->hw;
3875 IXGBE_CORE_LOCK(adapter);
3876 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3877 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3879 autoneg = hw->phy.autoneg_advertised;
3880 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3881 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3882 if (hw->mac.ops.setup_link)
3883 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3885 /* Adjust media types shown in ifconfig */
3886 ifmedia_removeall(&adapter->media);
3887 ixgbe_add_media_types(adapter);
3888 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3889 IXGBE_CORE_UNLOCK(adapter);
3894 ** Tasklet for handling interrupts from an external PHY
3897 ixgbe_handle_phy(void *context, int pending)
3899 struct adapter *adapter = context;
3900 struct ixgbe_hw *hw = &adapter->hw;
3903 error = hw->phy.ops.handle_lasi(hw);
3904 if (error == IXGBE_ERR_OVERTEMP)
3905 device_printf(adapter->dev,
3906 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3907 " PHY will downshift to lower power state!\n");
3909 device_printf(adapter->dev,
3910 "Error handling LASI interrupt: %d\n",
3917 ** Tasklet for reinitializing the Flow Director filter table
3920 ixgbe_reinit_fdir(void *context, int pending)
3922 struct adapter *adapter = context;
3923 struct ifnet *ifp = adapter->ifp;
3925 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3927 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3928 adapter->fdir_reinit = 0;
3929 /* re-enable flow director interrupts */
3930 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3931 /* Restart the interface */
3932 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3937 /*********************************************************************
3939 * Configure DMA Coalescing
3941 **********************************************************************/
3943 ixgbe_config_dmac(struct adapter *adapter)
3945 struct ixgbe_hw *hw = &adapter->hw;
3946 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3948 if (hw->mac.type < ixgbe_mac_X550 ||
3949 !hw->mac.ops.dmac_config)
3952 if (dcfg->watchdog_timer ^ adapter->dmac ||
3953 dcfg->link_speed ^ adapter->link_speed) {
3954 dcfg->watchdog_timer = adapter->dmac;
3955 dcfg->fcoe_en = false;
3956 dcfg->link_speed = adapter->link_speed;
3959 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3960 dcfg->watchdog_timer, dcfg->link_speed);
3962 hw->mac.ops.dmac_config(hw);
3967 * Checks whether the adapter's ports are capable of
3968 * Wake On LAN by reading the adapter's NVM.
3970 * Sets each port's hw->wol_enabled value depending
3971 * on the value read here.
3974 ixgbe_check_wol_support(struct adapter *adapter)
3976 struct ixgbe_hw *hw = &adapter->hw;
3979 /* Find out WoL support for port */
3980 adapter->wol_support = hw->wol_enabled = 0;
3981 ixgbe_get_device_caps(hw, &dev_caps);
3982 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3983 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3985 adapter->wol_support = hw->wol_enabled = 1;
3987 /* Save initial wake up filter configuration */
3988 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3994 * Prepare the adapter/port for LPLU and/or WoL
3997 ixgbe_setup_low_power_mode(struct adapter *adapter)
3999 struct ixgbe_hw *hw = &adapter->hw;
4000 device_t dev = adapter->dev;
4003 mtx_assert(&adapter->core_mtx, MA_OWNED);
4005 if (!hw->wol_enabled)
4006 ixgbe_set_phy_power(hw, FALSE);
4008 /* Limit power management flow to X550EM baseT */
4009 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
4010 && hw->phy.ops.enter_lplu) {
4011 /* Turn off support for APM wakeup. (Using ACPI instead) */
4012 IXGBE_WRITE_REG(hw, IXGBE_GRC,
4013 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
4016 * Clear Wake Up Status register to prevent any previous wakeup
4017 * events from waking us up immediately after we suspend.
4019 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4022 * Program the Wakeup Filter Control register with user filter
4025 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
4027 /* Enable wakeups and power management in Wakeup Control */
4028 IXGBE_WRITE_REG(hw, IXGBE_WUC,
4029 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
4031 /* X550EM baseT adapters need a special LPLU flow */
4032 hw->phy.reset_disable = true;
4033 ixgbe_stop(adapter);
4034 error = hw->phy.ops.enter_lplu(hw);
4037 "Error entering LPLU: %d\n", error);
4038 hw->phy.reset_disable = false;
4040 /* Just stop for other adapters */
4041 ixgbe_stop(adapter);
4047 /**********************************************************************
4049 * Update the board statistics counters.
4051 **********************************************************************/
4053 ixgbe_update_stats_counters(struct adapter *adapter)
4055 struct ixgbe_hw *hw = &adapter->hw;
4056 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4057 u64 total_missed_rx = 0;
4059 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4060 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4061 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4062 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4064 for (int i = 0; i < 16; i++) {
4065 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4066 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4067 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4069 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4070 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4071 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4073 /* Hardware workaround, gprc counts missed packets */
4074 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4075 adapter->stats.pf.gprc -= missed_rx;
4077 if (hw->mac.type != ixgbe_mac_82598EB) {
4078 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4079 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4080 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4081 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4082 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4083 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4084 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4085 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4087 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4088 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4089 /* 82598 only has a counter in the high register */
4090 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4091 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4092 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4096 * Workaround: mprc hardware is incorrectly counting
4097 * broadcasts, so for now we subtract those.
4099 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4100 adapter->stats.pf.bprc += bprc;
4101 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4102 if (hw->mac.type == ixgbe_mac_82598EB)
4103 adapter->stats.pf.mprc -= bprc;
4105 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4106 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4107 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4108 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4109 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4110 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4112 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4113 adapter->stats.pf.lxontxc += lxon;
4114 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4115 adapter->stats.pf.lxofftxc += lxoff;
4116 total = lxon + lxoff;
4118 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4119 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4120 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4121 adapter->stats.pf.gptc -= total;
4122 adapter->stats.pf.mptc -= total;
4123 adapter->stats.pf.ptc64 -= total;
4124 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
4126 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4127 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4128 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4129 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4130 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4131 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4132 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4133 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4134 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4135 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4136 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4137 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4138 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4139 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4140 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4141 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4142 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4143 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4144 /* Only read FCOE on 82599 */
4145 if (hw->mac.type != ixgbe_mac_82598EB) {
4146 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4147 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4148 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4149 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4150 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4153 /* Fill out the OS statistics structure */
4154 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
4155 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
4156 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
4157 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
4158 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
4159 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
4160 IXGBE_SET_COLLISIONS(adapter, 0);
4161 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
4162 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
4163 + adapter->stats.pf.rlec);
4166 #if __FreeBSD_version >= 1100036
4168 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
4170 struct adapter *adapter;
4171 struct tx_ring *txr;
4174 adapter = if_getsoftc(ifp);
4177 case IFCOUNTER_IPACKETS:
4178 return (adapter->ipackets);
4179 case IFCOUNTER_OPACKETS:
4180 return (adapter->opackets);
4181 case IFCOUNTER_IBYTES:
4182 return (adapter->ibytes);
4183 case IFCOUNTER_OBYTES:
4184 return (adapter->obytes);
4185 case IFCOUNTER_IMCASTS:
4186 return (adapter->imcasts);
4187 case IFCOUNTER_OMCASTS:
4188 return (adapter->omcasts);
4189 case IFCOUNTER_COLLISIONS:
4191 case IFCOUNTER_IQDROPS:
4192 return (adapter->iqdrops);
4193 case IFCOUNTER_OQDROPS:
4195 txr = adapter->tx_rings;
4196 for (int i = 0; i < adapter->num_queues; i++, txr++)
4197 rv += txr->br->br_drops;
4199 case IFCOUNTER_IERRORS:
4200 return (adapter->ierrors);
4202 return (if_get_counter_default(ifp, cnt));
4207 /** ixgbe_sysctl_tdh_handler - Handler function
4208 * Retrieves the TDH value from the hardware
4211 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4215 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4218 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4219 error = sysctl_handle_int(oidp, &val, 0, req);
4220 if (error || !req->newptr)
4225 /** ixgbe_sysctl_tdt_handler - Handler function
4226 * Retrieves the TDT value from the hardware
4229 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4233 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4236 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4237 error = sysctl_handle_int(oidp, &val, 0, req);
4238 if (error || !req->newptr)
4243 /** ixgbe_sysctl_rdh_handler - Handler function
4244 * Retrieves the RDH value from the hardware
4247 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4251 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4254 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4255 error = sysctl_handle_int(oidp, &val, 0, req);
4256 if (error || !req->newptr)
4261 /** ixgbe_sysctl_rdt_handler - Handler function
4262 * Retrieves the RDT value from the hardware
4265 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4269 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4272 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4273 error = sysctl_handle_int(oidp, &val, 0, req);
4274 if (error || !req->newptr)
4280 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4283 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4284 unsigned int reg, usec, rate;
4286 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4287 usec = ((reg & 0x0FF8) >> 3);
4289 rate = 500000 / usec;
4292 error = sysctl_handle_int(oidp, &rate, 0, req);
4293 if (error || !req->newptr)
4295 reg &= ~0xfff; /* default, no limitation */
4296 ixgbe_max_interrupt_rate = 0;
4297 if (rate > 0 && rate < 500000) {
4300 ixgbe_max_interrupt_rate = rate;
4301 reg |= ((4000000/rate) & 0xff8 );
4303 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4308 ixgbe_add_device_sysctls(struct adapter *adapter)
4310 device_t dev = adapter->dev;
4311 struct ixgbe_hw *hw = &adapter->hw;
4312 struct sysctl_oid_list *child;
4313 struct sysctl_ctx_list *ctx;
4315 ctx = device_get_sysctl_ctx(dev);
4316 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4318 /* Sysctls for all devices */
4319 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
4320 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4321 ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4323 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
4325 &ixgbe_enable_aim, 1, "Interrupt Moderation");
4327 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4328 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4329 ixgbe_sysctl_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
4331 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
4332 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4333 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
4336 /* testing sysctls (for all devices) */
4337 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
4338 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4339 ixgbe_sysctl_power_state, "I", "PCI Power State");
4341 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
4342 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
4343 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
4345 /* for X550 series devices */
4346 if (hw->mac.type >= ixgbe_mac_X550)
4347 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4348 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4349 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
4351 /* for X552 backplane devices */
4352 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
4353 struct sysctl_oid *eee_node;
4354 struct sysctl_oid_list *eee_list;
4356 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4358 "Energy Efficient Ethernet sysctls");
4359 eee_list = SYSCTL_CHILDREN(eee_node);
4361 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4362 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4363 ixgbe_sysctl_eee_enable, "I",
4364 "Enable or Disable EEE");
4366 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4367 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4368 ixgbe_sysctl_eee_negotiated, "I",
4369 "EEE negotiated on link");
4371 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4372 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4373 ixgbe_sysctl_eee_tx_lpi_status, "I",
4374 "Whether or not TX link is in LPI state");
4376 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4377 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4378 ixgbe_sysctl_eee_rx_lpi_status, "I",
4379 "Whether or not RX link is in LPI state");
4381 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_delay",
4382 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4383 ixgbe_sysctl_eee_tx_lpi_delay, "I",
4384 "TX LPI entry delay in microseconds");
4387 /* for WoL-capable devices */
4388 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4389 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4390 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4391 ixgbe_sysctl_wol_enable, "I",
4392 "Enable/Disable Wake on LAN");
4394 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4395 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4396 ixgbe_sysctl_wufc, "I",
4397 "Enable/Disable Wake Up Filters");
4400 /* for X552/X557-AT devices */
4401 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4402 struct sysctl_oid *phy_node;
4403 struct sysctl_oid_list *phy_list;
4405 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4407 "External PHY sysctls");
4408 phy_list = SYSCTL_CHILDREN(phy_node);
4410 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4411 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4412 ixgbe_sysctl_phy_temp, "I",
4413 "Current External PHY Temperature (Celsius)");
4415 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4416 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4417 ixgbe_sysctl_phy_overtemp_occurred, "I",
4418 "External PHY High Temperature Event Occurred");
4423 * Add sysctl variables, one per statistic, to the system.
4426 ixgbe_add_hw_stats(struct adapter *adapter)
4428 device_t dev = adapter->dev;
4430 struct tx_ring *txr = adapter->tx_rings;
4431 struct rx_ring *rxr = adapter->rx_rings;
4433 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4434 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4435 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4436 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4438 struct sysctl_oid *stat_node, *queue_node;
4439 struct sysctl_oid_list *stat_list, *queue_list;
4441 #define QUEUE_NAME_LEN 32
4442 char namebuf[QUEUE_NAME_LEN];
4444 /* Driver Statistics */
4445 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4446 CTLFLAG_RD, &adapter->dropped_pkts,
4447 "Driver dropped packets");
4448 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4449 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4450 "m_defrag() failed");
4451 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4452 CTLFLAG_RD, &adapter->watchdog_events,
4453 "Watchdog timeouts");
4454 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4455 CTLFLAG_RD, &adapter->link_irq,
4456 "Link MSIX IRQ Handled");
4458 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4459 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4460 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4461 CTLFLAG_RD, NULL, "Queue Name");
4462 queue_list = SYSCTL_CHILDREN(queue_node);
4464 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4465 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4466 sizeof(&adapter->queues[i]),
4467 ixgbe_sysctl_interrupt_rate_handler, "IU",
4469 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4470 CTLFLAG_RD, &(adapter->queues[i].irqs),
4471 "irqs on this queue");
4472 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4473 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4474 ixgbe_sysctl_tdh_handler, "IU",
4475 "Transmit Descriptor Head");
4476 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4477 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4478 ixgbe_sysctl_tdt_handler, "IU",
4479 "Transmit Descriptor Tail");
4480 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4481 CTLFLAG_RD, &txr->tso_tx,
4483 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4484 CTLFLAG_RD, &txr->no_tx_dma_setup,
4485 "Driver tx dma failure in xmit");
4486 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4487 CTLFLAG_RD, &txr->no_desc_avail,
4488 "Queue No Descriptor Available");
4489 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4490 CTLFLAG_RD, &txr->total_packets,
4491 "Queue Packets Transmitted");
4492 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4493 CTLFLAG_RD, &txr->br->br_drops,
4494 "Packets dropped in buf_ring");
4497 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4498 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4499 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4500 CTLFLAG_RD, NULL, "Queue Name");
4501 queue_list = SYSCTL_CHILDREN(queue_node);
4503 struct lro_ctrl *lro = &rxr->lro;
4505 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4506 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4507 CTLFLAG_RD, NULL, "Queue Name");
4508 queue_list = SYSCTL_CHILDREN(queue_node);
4510 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4511 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4512 ixgbe_sysctl_rdh_handler, "IU",
4513 "Receive Descriptor Head");
4514 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4515 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4516 ixgbe_sysctl_rdt_handler, "IU",
4517 "Receive Descriptor Tail");
4518 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4519 CTLFLAG_RD, &rxr->rx_packets,
4520 "Queue Packets Received");
4521 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4522 CTLFLAG_RD, &rxr->rx_bytes,
4523 "Queue Bytes Received");
4524 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4525 CTLFLAG_RD, &rxr->rx_copies,
4526 "Copied RX Frames");
4527 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4528 CTLFLAG_RD, &lro->lro_queued, 0,
4530 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4531 CTLFLAG_RD, &lro->lro_flushed, 0,
4535 /* MAC stats get the own sub node */
4537 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4538 CTLFLAG_RD, NULL, "MAC Statistics");
4539 stat_list = SYSCTL_CHILDREN(stat_node);
4541 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4542 CTLFLAG_RD, &stats->crcerrs,
4544 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4545 CTLFLAG_RD, &stats->illerrc,
4546 "Illegal Byte Errors");
4547 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4548 CTLFLAG_RD, &stats->errbc,
4550 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4551 CTLFLAG_RD, &stats->mspdc,
4552 "MAC Short Packets Discarded");
4553 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4554 CTLFLAG_RD, &stats->mlfc,
4555 "MAC Local Faults");
4556 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4557 CTLFLAG_RD, &stats->mrfc,
4558 "MAC Remote Faults");
4559 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4560 CTLFLAG_RD, &stats->rlec,
4561 "Receive Length Errors");
4563 /* Flow Control stats */
4564 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4565 CTLFLAG_RD, &stats->lxontxc,
4566 "Link XON Transmitted");
4567 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4568 CTLFLAG_RD, &stats->lxonrxc,
4569 "Link XON Received");
4570 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4571 CTLFLAG_RD, &stats->lxofftxc,
4572 "Link XOFF Transmitted");
4573 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4574 CTLFLAG_RD, &stats->lxoffrxc,
4575 "Link XOFF Received");
4577 /* Packet Reception Stats */
4578 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4579 CTLFLAG_RD, &stats->tor,
4580 "Total Octets Received");
4581 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4582 CTLFLAG_RD, &stats->gorc,
4583 "Good Octets Received");
4584 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4585 CTLFLAG_RD, &stats->tpr,
4586 "Total Packets Received");
4587 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4588 CTLFLAG_RD, &stats->gprc,
4589 "Good Packets Received");
4590 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4591 CTLFLAG_RD, &stats->mprc,
4592 "Multicast Packets Received");
4593 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4594 CTLFLAG_RD, &stats->bprc,
4595 "Broadcast Packets Received");
4596 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4597 CTLFLAG_RD, &stats->prc64,
4598 "64 byte frames received ");
4599 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4600 CTLFLAG_RD, &stats->prc127,
4601 "65-127 byte frames received");
4602 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4603 CTLFLAG_RD, &stats->prc255,
4604 "128-255 byte frames received");
4605 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4606 CTLFLAG_RD, &stats->prc511,
4607 "256-511 byte frames received");
4608 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4609 CTLFLAG_RD, &stats->prc1023,
4610 "512-1023 byte frames received");
4611 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4612 CTLFLAG_RD, &stats->prc1522,
4613 "1023-1522 byte frames received");
4614 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4615 CTLFLAG_RD, &stats->ruc,
4616 "Receive Undersized");
4617 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4618 CTLFLAG_RD, &stats->rfc,
4619 "Fragmented Packets Received ");
4620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4621 CTLFLAG_RD, &stats->roc,
4622 "Oversized Packets Received");
4623 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4624 CTLFLAG_RD, &stats->rjc,
4626 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4627 CTLFLAG_RD, &stats->mngprc,
4628 "Management Packets Received");
4629 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4630 CTLFLAG_RD, &stats->mngptc,
4631 "Management Packets Dropped");
4632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4633 CTLFLAG_RD, &stats->xec,
4636 /* Packet Transmission Stats */
4637 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4638 CTLFLAG_RD, &stats->gotc,
4639 "Good Octets Transmitted");
4640 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4641 CTLFLAG_RD, &stats->tpt,
4642 "Total Packets Transmitted");
4643 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4644 CTLFLAG_RD, &stats->gptc,
4645 "Good Packets Transmitted");
4646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4647 CTLFLAG_RD, &stats->bptc,
4648 "Broadcast Packets Transmitted");
4649 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4650 CTLFLAG_RD, &stats->mptc,
4651 "Multicast Packets Transmitted");
4652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4653 CTLFLAG_RD, &stats->mngptc,
4654 "Management Packets Transmitted");
4655 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4656 CTLFLAG_RD, &stats->ptc64,
4657 "64 byte frames transmitted ");
4658 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4659 CTLFLAG_RD, &stats->ptc127,
4660 "65-127 byte frames transmitted");
4661 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4662 CTLFLAG_RD, &stats->ptc255,
4663 "128-255 byte frames transmitted");
4664 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4665 CTLFLAG_RD, &stats->ptc511,
4666 "256-511 byte frames transmitted");
4667 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4668 CTLFLAG_RD, &stats->ptc1023,
4669 "512-1023 byte frames transmitted");
4670 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4671 CTLFLAG_RD, &stats->ptc1522,
4672 "1024-1522 byte frames transmitted");
4676 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4677 const char *description, int *limit, int value)
4680 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4681 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4682 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4686 ** Set flow control using sysctl:
4687 ** Flow control values:
4694 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4697 struct adapter *adapter;
4699 adapter = (struct adapter *) arg1;
4702 error = sysctl_handle_int(oidp, &fc, 0, req);
4703 if ((error) || (req->newptr == NULL))
4706 /* Don't bother if it's not changed */
4707 if (adapter->fc == fc)
4710 return ixgbe_set_flowcntl(adapter, fc);
4715 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4719 case ixgbe_fc_rx_pause:
4720 case ixgbe_fc_tx_pause:
4722 adapter->hw.fc.requested_mode = adapter->fc;
4723 if (adapter->num_queues > 1)
4724 ixgbe_disable_rx_drop(adapter);
4727 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4728 if (adapter->num_queues > 1)
4729 ixgbe_enable_rx_drop(adapter);
4735 /* Don't autoneg if forcing a value */
4736 adapter->hw.fc.disable_fc_autoneg = TRUE;
4737 ixgbe_fc_enable(&adapter->hw);
4742 ** Control advertised link speed:
4744 ** 0x1 - advertise 100 Mb
4745 ** 0x2 - advertise 1G
4746 ** 0x4 - advertise 10G
4749 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4751 int error, advertise;
4752 struct adapter *adapter;
4754 adapter = (struct adapter *) arg1;
4755 advertise = adapter->advertise;
4757 error = sysctl_handle_int(oidp, &advertise, 0, req);
4758 if ((error) || (req->newptr == NULL))
4761 return ixgbe_set_advertise(adapter, advertise);
4765 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4768 struct ixgbe_hw *hw;
4769 ixgbe_link_speed speed;
4771 /* Checks to validate new value */
4772 if (adapter->advertise == advertise) /* no change */
4778 /* No speed changes for backplane media */
4779 if (hw->phy.media_type == ixgbe_media_type_backplane)
4782 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4783 (hw->phy.multispeed_fiber))) {
4785 "Advertised speed can only be set on copper or "
4786 "multispeed fiber media types.\n");
4790 if (advertise < 0x1 || advertise > 0x7) {
4792 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4796 if ((advertise & 0x1)
4797 && (hw->mac.type != ixgbe_mac_X540)
4798 && (hw->mac.type != ixgbe_mac_X550)) {
4799 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4803 /* Set new value and report new advertised mode */
4805 if (advertise & 0x1)
4806 speed |= IXGBE_LINK_SPEED_100_FULL;
4807 if (advertise & 0x2)
4808 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4809 if (advertise & 0x4)
4810 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4811 adapter->advertise = advertise;
4813 hw->mac.autotry_restart = TRUE;
4814 hw->mac.ops.setup_link(hw, speed, TRUE);
4820 * The following two sysctls are for X552/X557-AT devices;
4821 * they deal with the external PHY used in them.
4824 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4826 struct adapter *adapter = (struct adapter *) arg1;
4827 struct ixgbe_hw *hw = &adapter->hw;
4830 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4831 device_printf(adapter->dev,
4832 "Device has no supported external thermal sensor.\n");
4836 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4837 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4839 device_printf(adapter->dev,
4840 "Error reading from PHY's current temperature register\n");
4844 /* Shift temp for output */
4847 return (sysctl_handle_int(oidp, NULL, reg, req));
4851 * Reports whether the current PHY temperature is over
4852 * the overtemp threshold.
4853 * - This is reported directly from the PHY
4856 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4858 struct adapter *adapter = (struct adapter *) arg1;
4859 struct ixgbe_hw *hw = &adapter->hw;
4862 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4863 device_printf(adapter->dev,
4864 "Device has no supported external thermal sensor.\n");
4868 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4869 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4871 device_printf(adapter->dev,
4872 "Error reading from PHY's temperature status register\n");
4876 /* Get occurrence bit */
4877 reg = !!(reg & 0x4000);
4878 return (sysctl_handle_int(oidp, 0, reg, req));
4882 ** Thermal Shutdown Trigger (internal MAC)
4883 ** - Set this to 1 to cause an overtemp event to occur
4886 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4888 struct adapter *adapter = (struct adapter *) arg1;
4889 struct ixgbe_hw *hw = &adapter->hw;
4890 int error, fire = 0;
4892 error = sysctl_handle_int(oidp, &fire, 0, req);
4893 if ((error) || (req->newptr == NULL))
4897 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4898 reg |= IXGBE_EICR_TS;
4899 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4906 ** Manage DMA Coalescing.
4908 ** 0/1 - off / on (use default value of 1000)
4910 ** Legal timer values are:
4911 ** 50,100,250,500,1000,2000,5000,10000
4913 ** Turning off interrupt moderation will also turn this off.
4916 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4918 struct adapter *adapter = (struct adapter *) arg1;
4919 struct ifnet *ifp = adapter->ifp;
4923 newval = adapter->dmac;
4924 error = sysctl_handle_int(oidp, &newval, 0, req);
4925 if ((error) || (req->newptr == NULL))
4934 /* Enable and use default */
4935 adapter->dmac = 1000;
4945 /* Legal values - allow */
4946 adapter->dmac = newval;
4949 /* Do nothing, illegal value */
4953 /* Re-initialize hardware if it's already running */
4954 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4955 ixgbe_init(adapter);
4962 * Sysctl to test power states
4964 * 0 - set device to D0
4965 * 3 - set device to D3
4966 * (none) - get current device power state
4969 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4971 struct adapter *adapter = (struct adapter *) arg1;
4972 device_t dev = adapter->dev;
4973 int curr_ps, new_ps, error = 0;
4975 curr_ps = new_ps = pci_get_powerstate(dev);
4977 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4978 if ((error) || (req->newptr == NULL))
4981 if (new_ps == curr_ps)
4984 if (new_ps == 3 && curr_ps == 0)
4985 error = DEVICE_SUSPEND(dev);
4986 else if (new_ps == 0 && curr_ps == 3)
4987 error = DEVICE_RESUME(dev);
4991 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4997 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
5003 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
5005 struct adapter *adapter = (struct adapter *) arg1;
5006 struct ixgbe_hw *hw = &adapter->hw;
5007 int new_wol_enabled;
5010 new_wol_enabled = hw->wol_enabled;
5011 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
5012 if ((error) || (req->newptr == NULL))
5014 new_wol_enabled = !!(new_wol_enabled);
5015 if (new_wol_enabled == hw->wol_enabled)
5018 if (new_wol_enabled > 0 && !adapter->wol_support)
5021 hw->wol_enabled = new_wol_enabled;
5027 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
5028 * if supported by the adapter.
5034 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
5036 struct adapter *adapter = (struct adapter *) arg1;
5037 struct ixgbe_hw *hw = &adapter->hw;
5038 struct ifnet *ifp = adapter->ifp;
5039 int new_eee_enabled, error = 0;
5041 new_eee_enabled = adapter->eee_enabled;
5042 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
5043 if ((error) || (req->newptr == NULL))
5045 new_eee_enabled = !!(new_eee_enabled);
5046 if (new_eee_enabled == adapter->eee_enabled)
5049 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
5052 adapter->eee_enabled = new_eee_enabled;
5054 /* Re-initialize hardware if it's already running */
5055 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5056 ixgbe_init(adapter);
5062 * Read-only sysctl indicating whether EEE support was negotiated
5066 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
5068 struct adapter *adapter = (struct adapter *) arg1;
5069 struct ixgbe_hw *hw = &adapter->hw;
5072 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
5074 return (sysctl_handle_int(oidp, 0, status, req));
5078 * Read-only sysctl indicating whether RX Link is in LPI state.
5081 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
5083 struct adapter *adapter = (struct adapter *) arg1;
5084 struct ixgbe_hw *hw = &adapter->hw;
5087 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5088 IXGBE_EEE_RX_LPI_STATUS);
5090 return (sysctl_handle_int(oidp, 0, status, req));
5094 * Read-only sysctl indicating whether TX Link is in LPI state.
5097 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
5099 struct adapter *adapter = (struct adapter *) arg1;
5100 struct ixgbe_hw *hw = &adapter->hw;
5103 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5104 IXGBE_EEE_TX_LPI_STATUS);
5106 return (sysctl_handle_int(oidp, 0, status, req));
5110 * Read-only sysctl indicating TX Link LPI delay
5113 ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS)
5115 struct adapter *adapter = (struct adapter *) arg1;
5116 struct ixgbe_hw *hw = &adapter->hw;
5119 reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU);
5121 return (sysctl_handle_int(oidp, 0, reg >> 26, req));
5125 * Sysctl to enable/disable the types of packets that the
5126 * adapter will wake up on upon receipt.
5127 * WUFC - Wake Up Filter Control
5129 * 0x1 - Link Status Change
5130 * 0x2 - Magic Packet
5131 * 0x4 - Direct Exact
5132 * 0x8 - Directed Multicast
5134 * 0x20 - ARP/IPv4 Request Packet
5135 * 0x40 - Direct IPv4 Packet
5136 * 0x80 - Direct IPv6 Packet
5138 * Setting another flag will cause the sysctl to return an
5142 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5144 struct adapter *adapter = (struct adapter *) arg1;
5148 new_wufc = adapter->wufc;
5150 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
5151 if ((error) || (req->newptr == NULL))
5153 if (new_wufc == adapter->wufc)
5156 if (new_wufc & 0xffffff00)
5160 new_wufc |= (0xffffff & adapter->wufc);
5161 adapter->wufc = new_wufc;
5169 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5171 struct adapter *adapter = (struct adapter *)arg1;
5172 struct ixgbe_hw *hw = &adapter->hw;
5173 device_t dev = adapter->dev;
5174 int error = 0, reta_size;
5178 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5180 device_printf(dev, "Could not allocate sbuf for output.\n");
5184 // TODO: use sbufs to make a string to print out
5185 /* Set multiplier for RETA setup and table size based on MAC */
5186 switch (adapter->hw.mac.type) {
5187 case ixgbe_mac_X550:
5188 case ixgbe_mac_X550EM_x:
5196 /* Print out the redirection table */
5197 sbuf_cat(buf, "\n");
5198 for (int i = 0; i < reta_size; i++) {
5200 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5201 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5203 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5204 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5208 // TODO: print more config
5210 error = sbuf_finish(buf);
5212 device_printf(dev, "Error finishing sbuf: %d\n", error);
5217 #endif /* IXGBE_DEBUG */
5220 ** Enable the hardware to drop packets when the buffer is
5221 ** full. This is useful when multiqueue,so that no single
5222 ** queue being full stalls the entire RX engine. We only
5223 ** enable this when Multiqueue AND when Flow Control is
5227 ixgbe_enable_rx_drop(struct adapter *adapter)
5229 struct ixgbe_hw *hw = &adapter->hw;
5231 for (int i = 0; i < adapter->num_queues; i++) {
5232 struct rx_ring *rxr = &adapter->rx_rings[i];
5233 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5234 srrctl |= IXGBE_SRRCTL_DROP_EN;
5235 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5238 /* enable drop for each vf */
5239 for (int i = 0; i < adapter->num_vfs; i++) {
5240 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5241 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5248 ixgbe_disable_rx_drop(struct adapter *adapter)
5250 struct ixgbe_hw *hw = &adapter->hw;
5252 for (int i = 0; i < adapter->num_queues; i++) {
5253 struct rx_ring *rxr = &adapter->rx_rings[i];
5254 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5255 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5256 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5259 /* disable drop for each vf */
5260 for (int i = 0; i < adapter->num_vfs; i++) {
5261 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5262 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5268 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5272 switch (adapter->hw.mac.type) {
5273 case ixgbe_mac_82598EB:
5274 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5275 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5277 case ixgbe_mac_82599EB:
5278 case ixgbe_mac_X540:
5279 case ixgbe_mac_X550:
5280 case ixgbe_mac_X550EM_x:
5281 mask = (queues & 0xFFFFFFFF);
5282 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5283 mask = (queues >> 32);
5284 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5294 ** Support functions for SRIOV/VF management
5298 ixgbe_ping_all_vfs(struct adapter *adapter)
5300 struct ixgbe_vf *vf;
5302 for (int i = 0; i < adapter->num_vfs; i++) {
5303 vf = &adapter->vfs[i];
5304 if (vf->flags & IXGBE_VF_ACTIVE)
5305 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5311 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
5314 struct ixgbe_hw *hw;
5315 uint32_t vmolr, vmvir;
5321 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
5323 /* Do not receive packets that pass inexact filters. */
5324 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
5326 /* Disable Multicast Promicuous Mode. */
5327 vmolr &= ~IXGBE_VMOLR_MPE;
5329 /* Accept broadcasts. */
5330 vmolr |= IXGBE_VMOLR_BAM;
5333 /* Accept non-vlan tagged traffic. */
5334 //vmolr |= IXGBE_VMOLR_AUPE;
5336 /* Allow VM to tag outgoing traffic; no default tag. */
5339 /* Require vlan-tagged traffic. */
5340 vmolr &= ~IXGBE_VMOLR_AUPE;
5342 /* Tag all traffic with provided vlan tag. */
5343 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
5345 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
5346 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
5351 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
5355 * Frame size compatibility between PF and VF is only a problem on
5356 * 82599-based cards. X540 and later support any combination of jumbo
5357 * frames on PFs and VFs.
5359 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
5362 switch (vf->api_ver) {
5363 case IXGBE_API_VER_1_0:
5364 case IXGBE_API_VER_UNKNOWN:
5366 * On legacy (1.0 and older) VF versions, we don't support jumbo
5367 * frames on either the PF or the VF.
5369 if (adapter->max_frame_size > ETHER_MAX_LEN ||
5370 vf->max_frame_size > ETHER_MAX_LEN)
5376 case IXGBE_API_VER_1_1:
5379 * 1.1 or later VF versions always work if they aren't using
5382 if (vf->max_frame_size <= ETHER_MAX_LEN)
5386 * Jumbo frames only work with VFs if the PF is also using jumbo
5389 if (adapter->max_frame_size <= ETHER_MAX_LEN)
5399 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
5401 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
5403 // XXX clear multicast addresses
5405 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
5407 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5412 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
5414 struct ixgbe_hw *hw;
5415 uint32_t vf_index, vfte;
5419 vf_index = IXGBE_VF_INDEX(vf->pool);
5420 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
5421 vfte |= IXGBE_VF_BIT(vf->pool);
5422 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
5427 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
5429 struct ixgbe_hw *hw;
5430 uint32_t vf_index, vfre;
5434 vf_index = IXGBE_VF_INDEX(vf->pool);
5435 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
5436 if (ixgbe_vf_frame_size_compatible(adapter, vf))
5437 vfre |= IXGBE_VF_BIT(vf->pool);
5439 vfre &= ~IXGBE_VF_BIT(vf->pool);
5440 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
5445 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5447 struct ixgbe_hw *hw;
5449 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
5453 ixgbe_process_vf_reset(adapter, vf);
5455 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5456 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5457 vf->ether_addr, vf->pool, TRUE);
5458 ack = IXGBE_VT_MSGTYPE_ACK;
5460 ack = IXGBE_VT_MSGTYPE_NACK;
5462 ixgbe_vf_enable_transmit(adapter, vf);
5463 ixgbe_vf_enable_receive(adapter, vf);
5465 vf->flags |= IXGBE_VF_CTS;
5467 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
5468 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
5469 resp[3] = hw->mac.mc_filter_type;
5470 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
5475 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5479 mac = (uint8_t*)&msg[1];
5481 /* Check that the VF has permission to change the MAC address. */
5482 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
5483 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5487 if (ixgbe_validate_mac_addr(mac) != 0) {
5488 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5492 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5494 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
5497 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5502 ** VF multicast addresses are set by using the appropriate bit in
5503 ** 1 of 128 32 bit addresses (4096 possible).
5506 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
5508 u16 *list = (u16*)&msg[1];
5510 u32 vmolr, vec_bit, vec_reg, mta_reg;
5512 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
5513 entries = min(entries, IXGBE_MAX_VF_MC);
5515 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
5517 vf->num_mc_hashes = entries;
5519 /* Set the appropriate MTA bit */
5520 for (int i = 0; i < entries; i++) {
5521 vf->mc_hash[i] = list[i];
5522 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
5523 vec_bit = vf->mc_hash[i] & 0x1F;
5524 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
5525 mta_reg |= (1 << vec_bit);
5526 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
5529 vmolr |= IXGBE_VMOLR_ROMPE;
5530 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
5531 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5537 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5539 struct ixgbe_hw *hw;
5544 enable = IXGBE_VT_MSGINFO(msg[0]);
5545 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
5547 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
5548 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5552 /* It is illegal to enable vlan tag 0. */
5553 if (tag == 0 && enable != 0){
5554 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5558 ixgbe_set_vfta(hw, tag, vf->pool, enable);
5559 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5564 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5566 struct ixgbe_hw *hw;
5567 uint32_t vf_max_size, pf_max_size, mhadd;
5570 vf_max_size = msg[1];
5572 if (vf_max_size < ETHER_CRC_LEN) {
5573 /* We intentionally ACK invalid LPE requests. */
5574 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5578 vf_max_size -= ETHER_CRC_LEN;
5580 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5581 /* We intentionally ACK invalid LPE requests. */
5582 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5586 vf->max_frame_size = vf_max_size;
5587 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5590 * We might have to disable reception to this VF if the frame size is
5591 * not compatible with the config on the PF.
5593 ixgbe_vf_enable_receive(adapter, vf);
5595 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5596 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
5598 if (pf_max_size < adapter->max_frame_size) {
5599 mhadd &= ~IXGBE_MHADD_MFS_MASK;
5600 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5601 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
5604 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5609 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
5612 //XXX implement this
5613 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5618 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
5623 case IXGBE_API_VER_1_0:
5624 case IXGBE_API_VER_1_1:
5625 vf->api_ver = msg[1];
5626 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5629 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5630 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5637 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
5640 struct ixgbe_hw *hw;
5641 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5646 /* GET_QUEUES is not supported on pre-1.1 APIs. */
5648 case IXGBE_API_VER_1_0:
5649 case IXGBE_API_VER_UNKNOWN:
5650 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5654 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
5655 IXGBE_VT_MSGTYPE_CTS;
5657 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
5658 resp[IXGBE_VF_TX_QUEUES] = num_queues;
5659 resp[IXGBE_VF_RX_QUEUES] = num_queues;
5660 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5661 resp[IXGBE_VF_DEF_QUEUE] = 0;
5663 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5668 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
5670 struct ixgbe_hw *hw;
5671 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5676 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
5681 CTR3(KTR_MALLOC, "%s: received msg %x from %d",
5682 adapter->ifp->if_xname, msg[0], vf->pool);
5683 if (msg[0] == IXGBE_VF_RESET) {
5684 ixgbe_vf_reset_msg(adapter, vf, msg);
5688 if (!(vf->flags & IXGBE_VF_CTS)) {
5689 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5693 switch (msg[0] & IXGBE_VT_MSG_MASK) {
5694 case IXGBE_VF_SET_MAC_ADDR:
5695 ixgbe_vf_set_mac(adapter, vf, msg);
5697 case IXGBE_VF_SET_MULTICAST:
5698 ixgbe_vf_set_mc_addr(adapter, vf, msg);
5700 case IXGBE_VF_SET_VLAN:
5701 ixgbe_vf_set_vlan(adapter, vf, msg);
5703 case IXGBE_VF_SET_LPE:
5704 ixgbe_vf_set_lpe(adapter, vf, msg);
5706 case IXGBE_VF_SET_MACVLAN:
5707 ixgbe_vf_set_macvlan(adapter, vf, msg);
5709 case IXGBE_VF_API_NEGOTIATE:
5710 ixgbe_vf_api_negotiate(adapter, vf, msg);
5712 case IXGBE_VF_GET_QUEUES:
5713 ixgbe_vf_get_queues(adapter, vf, msg);
5716 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5722 * Tasklet for handling VF -> PF mailbox messages.
5725 ixgbe_handle_mbx(void *context, int pending)
5727 struct adapter *adapter;
5728 struct ixgbe_hw *hw;
5729 struct ixgbe_vf *vf;
5735 IXGBE_CORE_LOCK(adapter);
5736 for (i = 0; i < adapter->num_vfs; i++) {
5737 vf = &adapter->vfs[i];
5739 if (vf->flags & IXGBE_VF_ACTIVE) {
5740 if (ixgbe_check_for_rst(hw, vf->pool) == 0)
5741 ixgbe_process_vf_reset(adapter, vf);
5743 if (ixgbe_check_for_msg(hw, vf->pool) == 0)
5744 ixgbe_process_vf_msg(adapter, vf);
5746 if (ixgbe_check_for_ack(hw, vf->pool) == 0)
5747 ixgbe_process_vf_ack(adapter, vf);
5750 IXGBE_CORE_UNLOCK(adapter);
5755 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
5757 struct adapter *adapter;
5758 enum ixgbe_iov_mode mode;
5760 adapter = device_get_softc(dev);
5761 adapter->num_vfs = num_vfs;
5762 mode = ixgbe_get_iov_mode(adapter);
5764 if (num_vfs > ixgbe_max_vfs(mode)) {
5765 adapter->num_vfs = 0;
5769 IXGBE_CORE_LOCK(adapter);
5771 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
5774 if (adapter->vfs == NULL) {
5775 adapter->num_vfs = 0;
5776 IXGBE_CORE_UNLOCK(adapter);
5780 ixgbe_init_locked(adapter);
5782 IXGBE_CORE_UNLOCK(adapter);
5789 ixgbe_uninit_iov(device_t dev)
5791 struct ixgbe_hw *hw;
5792 struct adapter *adapter;
5793 uint32_t pf_reg, vf_reg;
5795 adapter = device_get_softc(dev);
5798 IXGBE_CORE_LOCK(adapter);
5800 /* Enable rx/tx for the PF and disable it for all VFs. */
5801 pf_reg = IXGBE_VF_INDEX(adapter->pool);
5802 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
5803 IXGBE_VF_BIT(adapter->pool));
5804 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
5805 IXGBE_VF_BIT(adapter->pool));
5811 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
5812 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
5814 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
5816 free(adapter->vfs, M_IXGBE);
5817 adapter->vfs = NULL;
5818 adapter->num_vfs = 0;
5820 IXGBE_CORE_UNLOCK(adapter);
5825 ixgbe_initialize_iov(struct adapter *adapter)
5827 struct ixgbe_hw *hw = &adapter->hw;
5828 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
5829 enum ixgbe_iov_mode mode;
5832 mode = ixgbe_get_iov_mode(adapter);
5833 if (mode == IXGBE_NO_VM)
5836 IXGBE_CORE_LOCK_ASSERT(adapter);
5838 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
5839 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
5843 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
5846 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
5849 panic("Unexpected SR-IOV mode %d", mode);
5851 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
5853 mtqc = IXGBE_MTQC_VT_ENA;
5856 mtqc |= IXGBE_MTQC_64VF;
5859 mtqc |= IXGBE_MTQC_32VF;
5862 panic("Unexpected SR-IOV mode %d", mode);
5864 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
5867 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
5868 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
5869 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
5872 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
5875 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
5878 panic("Unexpected SR-IOV mode %d", mode);
5880 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
5883 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5884 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
5887 gpie |= IXGBE_GPIE_VTMODE_64;
5890 gpie |= IXGBE_GPIE_VTMODE_32;
5893 panic("Unexpected SR-IOV mode %d", mode);
5895 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5897 /* Enable rx/tx for the PF. */
5898 vf_reg = IXGBE_VF_INDEX(adapter->pool);
5899 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
5900 IXGBE_VF_BIT(adapter->pool));
5901 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
5902 IXGBE_VF_BIT(adapter->pool));
5904 /* Allow VM-to-VM communication. */
5905 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
5907 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
5908 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
5909 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
5911 for (i = 0; i < adapter->num_vfs; i++)
5912 ixgbe_init_vf(adapter, &adapter->vfs[i]);
5917 ** Check the max frame setting of all active VF's
5920 ixgbe_recalculate_max_frame(struct adapter *adapter)
5922 struct ixgbe_vf *vf;
5924 IXGBE_CORE_LOCK_ASSERT(adapter);
5926 for (int i = 0; i < adapter->num_vfs; i++) {
5927 vf = &adapter->vfs[i];
5928 if (vf->flags & IXGBE_VF_ACTIVE)
5929 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5935 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
5937 struct ixgbe_hw *hw;
5938 uint32_t vf_index, pfmbimr;
5940 IXGBE_CORE_LOCK_ASSERT(adapter);
5944 if (!(vf->flags & IXGBE_VF_ACTIVE))
5947 vf_index = IXGBE_VF_INDEX(vf->pool);
5948 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
5949 pfmbimr |= IXGBE_VF_BIT(vf->pool);
5950 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
5952 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
5954 // XXX multicast addresses
5956 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5957 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5958 vf->ether_addr, vf->pool, TRUE);
5961 ixgbe_vf_enable_transmit(adapter, vf);
5962 ixgbe_vf_enable_receive(adapter, vf);
5964 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5968 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
5970 struct adapter *adapter;
5971 struct ixgbe_vf *vf;
5974 adapter = device_get_softc(dev);
5976 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
5977 vfnum, adapter->num_vfs));
5979 IXGBE_CORE_LOCK(adapter);
5980 vf = &adapter->vfs[vfnum];
5983 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
5984 vf->rar_index = vfnum + 1;
5985 vf->default_vlan = 0;
5986 vf->max_frame_size = ETHER_MAX_LEN;
5987 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5989 if (nvlist_exists_binary(config, "mac-addr")) {
5990 mac = nvlist_get_binary(config, "mac-addr", NULL);
5991 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5992 if (nvlist_get_bool(config, "allow-set-mac"))
5993 vf->flags |= IXGBE_VF_CAP_MAC;
5996 * If the administrator has not specified a MAC address then
5997 * we must allow the VF to choose one.
5999 vf->flags |= IXGBE_VF_CAP_MAC;
6001 vf->flags = IXGBE_VF_ACTIVE;
6003 ixgbe_init_vf(adapter, vf);
6004 IXGBE_CORE_UNLOCK(adapter);
6008 #endif /* PCI_IOV */