1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
44 #include <net/rss_config.h>
45 #include <netinet/in_rss.h>
48 /*********************************************************************
50 *********************************************************************/
51 char ixgbe_driver_version[] = "3.1.13-k";
54 /*********************************************************************
57 * Used by probe to select devices to load on
58 * Last field stores an index into ixgbe_strings
59 * Last entry must be all 0s
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
64 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
98 /* required last entry */
102 /*********************************************************************
103 * Table of branding strings
104 *********************************************************************/
106 static char *ixgbe_strings[] = {
107 "Intel(R) PRO/10GbE PCI-Express Network Driver"
110 /*********************************************************************
111 * Function prototypes
112 *********************************************************************/
113 static int ixgbe_probe(device_t);
114 static int ixgbe_attach(device_t);
115 static int ixgbe_detach(device_t);
116 static int ixgbe_shutdown(device_t);
117 static int ixgbe_suspend(device_t);
118 static int ixgbe_resume(device_t);
119 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
120 static void ixgbe_init(void *);
121 static void ixgbe_init_locked(struct adapter *);
122 static void ixgbe_stop(void *);
123 #if __FreeBSD_version >= 1100036
124 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
126 static void ixgbe_add_media_types(struct adapter *);
127 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
128 static int ixgbe_media_change(struct ifnet *);
129 static void ixgbe_identify_hardware(struct adapter *);
130 static int ixgbe_allocate_pci_resources(struct adapter *);
131 static void ixgbe_get_slot_info(struct adapter *);
132 static int ixgbe_allocate_msix(struct adapter *);
133 static int ixgbe_allocate_legacy(struct adapter *);
134 static int ixgbe_setup_msix(struct adapter *);
135 static void ixgbe_free_pci_resources(struct adapter *);
136 static void ixgbe_local_timer(void *);
137 static int ixgbe_setup_interface(device_t, struct adapter *);
138 static void ixgbe_config_gpie(struct adapter *);
139 static void ixgbe_config_dmac(struct adapter *);
140 static void ixgbe_config_delay_values(struct adapter *);
141 static void ixgbe_config_link(struct adapter *);
142 static void ixgbe_check_wol_support(struct adapter *);
143 static int ixgbe_setup_low_power_mode(struct adapter *);
144 static void ixgbe_rearm_queues(struct adapter *, u64);
146 static void ixgbe_initialize_transmit_units(struct adapter *);
147 static void ixgbe_initialize_receive_units(struct adapter *);
148 static void ixgbe_enable_rx_drop(struct adapter *);
149 static void ixgbe_disable_rx_drop(struct adapter *);
150 static void ixgbe_initialize_rss_mapping(struct adapter *);
152 static void ixgbe_enable_intr(struct adapter *);
153 static void ixgbe_disable_intr(struct adapter *);
154 static void ixgbe_update_stats_counters(struct adapter *);
155 static void ixgbe_set_promisc(struct adapter *);
156 static void ixgbe_set_multi(struct adapter *);
157 static void ixgbe_update_link_status(struct adapter *);
158 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
159 static void ixgbe_configure_ivars(struct adapter *);
160 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 static void ixgbe_setup_vlan_hw_support(struct adapter *);
163 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
164 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
166 static void ixgbe_add_device_sysctls(struct adapter *);
167 static void ixgbe_add_hw_stats(struct adapter *);
168 static int ixgbe_set_flowcntl(struct adapter *, int);
169 static int ixgbe_set_advertise(struct adapter *, int);
171 /* Sysctl handlers */
172 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
173 const char *, int *, int);
174 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
175 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
176 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
177 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
178 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
179 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
181 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
182 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
184 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
185 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
186 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
187 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
188 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
189 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS);
192 /* Support for pluggable optic modules */
193 static bool ixgbe_sfp_probe(struct adapter *);
194 static void ixgbe_setup_optics(struct adapter *);
196 /* Legacy (single vector interrupt handler */
197 static void ixgbe_legacy_irq(void *);
199 /* The MSI/X Interrupt handlers */
200 static void ixgbe_msix_que(void *);
201 static void ixgbe_msix_link(void *);
203 /* Deferred interrupt tasklets */
204 static void ixgbe_handle_que(void *, int);
205 static void ixgbe_handle_link(void *, int);
206 static void ixgbe_handle_msf(void *, int);
207 static void ixgbe_handle_mod(void *, int);
208 static void ixgbe_handle_phy(void *, int);
211 static void ixgbe_reinit_fdir(void *, int);
215 static void ixgbe_ping_all_vfs(struct adapter *);
216 static void ixgbe_handle_mbx(void *, int);
217 static int ixgbe_init_iov(device_t, u16, const nvlist_t *);
218 static void ixgbe_uninit_iov(device_t);
219 static int ixgbe_add_vf(device_t, u16, const nvlist_t *);
220 static void ixgbe_initialize_iov(struct adapter *);
221 static void ixgbe_recalculate_max_frame(struct adapter *);
222 static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
226 /*********************************************************************
227 * FreeBSD Device Interface Entry Points
228 *********************************************************************/
230 static device_method_t ix_methods[] = {
231 /* Device interface */
232 DEVMETHOD(device_probe, ixgbe_probe),
233 DEVMETHOD(device_attach, ixgbe_attach),
234 DEVMETHOD(device_detach, ixgbe_detach),
235 DEVMETHOD(device_shutdown, ixgbe_shutdown),
236 DEVMETHOD(device_suspend, ixgbe_suspend),
237 DEVMETHOD(device_resume, ixgbe_resume),
239 DEVMETHOD(pci_iov_init, ixgbe_init_iov),
240 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
241 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
246 static driver_t ix_driver = {
247 "ix", ix_methods, sizeof(struct adapter),
250 devclass_t ix_devclass;
251 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
253 MODULE_DEPEND(ix, pci, 1, 1, 1);
254 MODULE_DEPEND(ix, ether, 1, 1, 1);
256 MODULE_DEPEND(ix, netmap, 1, 1, 1);
257 #endif /* DEV_NETMAP */
260 ** TUNEABLE PARAMETERS:
263 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
264 "IXGBE driver parameters");
267 ** AIM: Adaptive Interrupt Moderation
268 ** which means that the interrupt rate
269 ** is varied over time based on the
270 ** traffic for that interrupt vector
272 static int ixgbe_enable_aim = TRUE;
273 TUNABLE_INT("hw.ix.enable_aim", &ixgbe_enable_aim);
274 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
275 "Enable adaptive interrupt moderation");
277 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
278 TUNABLE_INT("hw.ix.max_interrupt_rate", &ixgbe_max_interrupt_rate);
279 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
280 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
282 /* How many packets rxeof tries to clean at a time */
283 static int ixgbe_rx_process_limit = 256;
284 TUNABLE_INT("hw.ix.rx_process_limit", &ixgbe_rx_process_limit);
285 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
286 &ixgbe_rx_process_limit, 0,
287 "Maximum number of received packets to process at a time,"
288 "-1 means unlimited");
290 /* How many packets txeof tries to clean at a time */
291 static int ixgbe_tx_process_limit = 256;
292 TUNABLE_INT("hw.ix.tx_process_limit", &ixgbe_tx_process_limit);
293 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
294 &ixgbe_tx_process_limit, 0,
295 "Maximum number of sent packets to process at a time,"
296 "-1 means unlimited");
298 /* Flow control setting, default to full */
299 static int ixgbe_flow_control = ixgbe_fc_full;
300 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
301 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
303 /* Advertise Speed, default to 0 (auto) */
304 static int ixgbe_advertise_speed = 0;
305 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
306 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
309 ** Smart speed setting, default to on
310 ** this only works as a compile option
311 ** right now as its during attach, set
312 ** this to 'ixgbe_smart_speed_off' to
315 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
318 * MSIX should be the default for best performance,
319 * but this allows it to be forced off for testing.
321 static int ixgbe_enable_msix = 1;
322 TUNABLE_INT("hw.ix.enable_msix", &ixgbe_enable_msix);
323 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
324 "Enable MSI-X interrupts");
327 * Number of Queues, can be set to 0,
328 * it then autoconfigures based on the
329 * number of cpus with a max of 8. This
330 * can be overriden manually here.
332 static int ixgbe_num_queues = 0;
333 TUNABLE_INT("hw.ix.num_queues", &ixgbe_num_queues);
334 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
335 "Number of queues to configure up to a mximum of 8,"
336 "0 indicates autoconfigure");
339 ** Number of TX descriptors per ring,
340 ** setting higher than RX as this seems
341 ** the better performing choice.
343 static int ixgbe_txd = PERFORM_TXD;
344 TUNABLE_INT("hw.ix.txd", &ixgbe_txd);
345 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
346 "Number of transmit descriptors per queue");
348 /* Number of RX descriptors per ring */
349 static int ixgbe_rxd = PERFORM_RXD;
350 TUNABLE_INT("hw.ix.rxd", &ixgbe_rxd);
351 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
352 "Number of receive descriptors per queue");
355 ** Defining this on will allow the use
356 ** of unsupported SFP+ modules, note that
357 ** doing so you are on your own :)
359 static int allow_unsupported_sfp = FALSE;
360 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
362 /* Keep running tab on them for sanity check */
363 static int ixgbe_total_ports;
367 ** Flow Director actually 'steals'
368 ** part of the packet buffer as its
369 ** filter pool, this variable controls
371 ** 0 = 64K, 1 = 128K, 2 = 256K
373 static int fdir_pballoc = 1;
378 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
379 * be a reference on how to implement netmap support in a driver.
380 * Additional comments are in ixgbe_netmap.h .
382 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
383 * that extend the standard driver.
385 #include <dev/netmap/ixgbe_netmap.h>
386 #endif /* DEV_NETMAP */
388 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
390 /*********************************************************************
391 * Device identification routine
393 * ixgbe_probe determines if the driver should be loaded on
394 * adapter based on PCI vendor/device id of the adapter.
396 * return BUS_PROBE_DEFAULT on success, positive on failure
397 *********************************************************************/
400 ixgbe_probe(device_t dev)
402 ixgbe_vendor_info_t *ent;
404 u16 pci_vendor_id = 0;
405 u16 pci_device_id = 0;
406 u16 pci_subvendor_id = 0;
407 u16 pci_subdevice_id = 0;
408 char adapter_name[256];
410 INIT_DEBUGOUT("ixgbe_probe: begin");
412 pci_vendor_id = pci_get_vendor(dev);
413 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
416 pci_device_id = pci_get_device(dev);
417 pci_subvendor_id = pci_get_subvendor(dev);
418 pci_subdevice_id = pci_get_subdevice(dev);
420 ent = ixgbe_vendor_info_array;
421 while (ent->vendor_id != 0) {
422 if ((pci_vendor_id == ent->vendor_id) &&
423 (pci_device_id == ent->device_id) &&
425 ((pci_subvendor_id == ent->subvendor_id) ||
426 (ent->subvendor_id == 0)) &&
428 ((pci_subdevice_id == ent->subdevice_id) ||
429 (ent->subdevice_id == 0))) {
430 sprintf(adapter_name, "%s, Version - %s",
431 ixgbe_strings[ent->index],
432 ixgbe_driver_version);
433 device_set_desc_copy(dev, adapter_name);
435 return (BUS_PROBE_DEFAULT);
442 /*********************************************************************
443 * Device initialization routine
445 * The attach entry point is called when the driver is being loaded.
446 * This routine identifies the type of hardware, allocates all resources
447 * and initializes the hardware.
449 * return 0 on success, positive on failure
450 *********************************************************************/
453 ixgbe_attach(device_t dev)
455 struct adapter *adapter;
461 INIT_DEBUGOUT("ixgbe_attach: begin");
463 /* Allocate, clear, and link in our adapter structure */
464 adapter = device_get_softc(dev);
469 adapter->init_locked = ixgbe_init_locked;
470 adapter->stop_locked = ixgbe_stop;
473 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
475 /* Set up the timer callout */
476 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
478 /* Determine hardware revision */
479 ixgbe_identify_hardware(adapter);
481 /* Do base PCI setup - map BAR0 */
482 if (ixgbe_allocate_pci_resources(adapter)) {
483 device_printf(dev, "Allocation of PCI resources failed\n");
488 /* Sysctls for limiting the amount of work done in the taskqueues */
489 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
490 "max number of rx packets to process",
491 &adapter->rx_process_limit, ixgbe_rx_process_limit);
493 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
494 "max number of tx packets to process",
495 &adapter->tx_process_limit, ixgbe_tx_process_limit);
497 /* Do descriptor calc and sanity checks */
498 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
499 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
500 device_printf(dev, "TXD config issue, using default!\n");
501 adapter->num_tx_desc = DEFAULT_TXD;
503 adapter->num_tx_desc = ixgbe_txd;
506 ** With many RX rings it is easy to exceed the
507 ** system mbuf allocation. Tuning nmbclusters
508 ** can alleviate this.
510 if (nmbclusters > 0) {
512 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
513 if (s > nmbclusters) {
514 device_printf(dev, "RX Descriptors exceed "
515 "system mbuf max, using default instead!\n");
516 ixgbe_rxd = DEFAULT_RXD;
520 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
521 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
522 device_printf(dev, "RXD config issue, using default!\n");
523 adapter->num_rx_desc = DEFAULT_RXD;
525 adapter->num_rx_desc = ixgbe_rxd;
527 /* Allocate our TX/RX Queues */
528 if (ixgbe_allocate_queues(adapter)) {
533 /* Allocate multicast array memory. */
534 adapter->mta = malloc(sizeof(*adapter->mta) *
535 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
536 if (adapter->mta == NULL) {
537 device_printf(dev, "Can not allocate multicast setup array\n");
542 /* Initialize the shared code */
543 hw->allow_unsupported_sfp = allow_unsupported_sfp;
544 error = ixgbe_init_shared_code(hw);
545 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
547 ** No optics in this port, set up
548 ** so the timer routine will probe
549 ** for later insertion.
551 adapter->sfp_probe = TRUE;
553 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
554 device_printf(dev, "Unsupported SFP+ module detected!\n");
558 device_printf(dev, "Unable to initialize the shared code\n");
563 /* Make sure we have a good EEPROM before we read from it */
564 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
565 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
570 error = ixgbe_init_hw(hw);
572 case IXGBE_ERR_EEPROM_VERSION:
573 device_printf(dev, "This device is a pre-production adapter/"
574 "LOM. Please be aware there may be issues associated "
575 "with your hardware.\nIf you are experiencing problems "
576 "please contact your Intel or hardware representative "
577 "who provided you with this hardware.\n");
579 case IXGBE_ERR_SFP_NOT_SUPPORTED:
580 device_printf(dev, "Unsupported SFP+ Module\n");
583 case IXGBE_ERR_SFP_NOT_PRESENT:
584 device_printf(dev, "No SFP+ Module found\n");
590 /* hw.ix defaults init */
591 ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
592 ixgbe_set_flowcntl(adapter, ixgbe_flow_control);
593 adapter->enable_aim = ixgbe_enable_aim;
595 if ((adapter->msix > 1) && (ixgbe_enable_msix))
596 error = ixgbe_allocate_msix(adapter);
598 error = ixgbe_allocate_legacy(adapter);
602 /* Enable the optics for 82599 SFP+ fiber */
603 ixgbe_enable_tx_laser(hw);
605 /* Enable power to the phy. */
606 ixgbe_set_phy_power(hw, TRUE);
608 /* Setup OS specific network interface */
609 if (ixgbe_setup_interface(dev, adapter) != 0)
612 /* Initialize statistics */
613 ixgbe_update_stats_counters(adapter);
615 /* Register for VLAN events */
616 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
617 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
618 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
619 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
621 /* Check PCIE slot type/speed/width */
622 ixgbe_get_slot_info(adapter);
624 /* Set an initial default flow control & dmac value */
625 adapter->fc = ixgbe_fc_full;
627 adapter->eee_enabled = 0;
630 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
631 nvlist_t *pf_schema, *vf_schema;
633 hw->mbx.ops.init_params(hw);
634 pf_schema = pci_iov_schema_alloc_node();
635 vf_schema = pci_iov_schema_alloc_node();
636 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
637 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
638 IOV_SCHEMA_HASDEFAULT, TRUE);
639 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
640 IOV_SCHEMA_HASDEFAULT, FALSE);
641 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
642 IOV_SCHEMA_HASDEFAULT, FALSE);
643 error = pci_iov_attach(dev, pf_schema, vf_schema);
646 "Error %d setting up SR-IOV\n", error);
651 /* Check for certain supported features */
652 ixgbe_check_wol_support(adapter);
655 ixgbe_add_device_sysctls(adapter);
656 ixgbe_add_hw_stats(adapter);
658 /* let hardware know driver is loaded */
659 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
660 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
661 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
664 ixgbe_netmap_attach(adapter);
665 #endif /* DEV_NETMAP */
666 INIT_DEBUGOUT("ixgbe_attach: end");
670 ixgbe_free_transmit_structures(adapter);
671 ixgbe_free_receive_structures(adapter);
673 if (adapter->ifp != NULL)
674 if_free(adapter->ifp);
675 ixgbe_free_pci_resources(adapter);
676 free(adapter->mta, M_DEVBUF);
680 /*********************************************************************
681 * Device removal routine
683 * The detach entry point is called when the driver is being removed.
684 * This routine stops the adapter and deallocates all the resources
685 * that were allocated for driver operation.
687 * return 0 on success, positive on failure
688 *********************************************************************/
691 ixgbe_detach(device_t dev)
693 struct adapter *adapter = device_get_softc(dev);
694 struct ix_queue *que = adapter->queues;
695 struct tx_ring *txr = adapter->tx_rings;
698 INIT_DEBUGOUT("ixgbe_detach: begin");
700 /* Make sure VLANS are not using driver */
701 if (adapter->ifp->if_vlantrunk != NULL) {
702 device_printf(dev,"Vlan in use, detach first\n");
707 if (pci_iov_detach(dev) != 0) {
708 device_printf(dev, "SR-IOV in use; detach first.\n");
713 ether_ifdetach(adapter->ifp);
714 /* Stop the adapter */
715 IXGBE_CORE_LOCK(adapter);
716 ixgbe_setup_low_power_mode(adapter);
717 IXGBE_CORE_UNLOCK(adapter);
719 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
721 #ifndef IXGBE_LEGACY_TX
722 taskqueue_drain(que->tq, &txr->txq_task);
724 taskqueue_drain(que->tq, &que->que_task);
725 taskqueue_free(que->tq);
729 /* Drain the Link queue */
731 taskqueue_drain(adapter->tq, &adapter->link_task);
732 taskqueue_drain(adapter->tq, &adapter->mod_task);
733 taskqueue_drain(adapter->tq, &adapter->msf_task);
735 taskqueue_drain(adapter->tq, &adapter->mbx_task);
737 taskqueue_drain(adapter->tq, &adapter->phy_task);
739 taskqueue_drain(adapter->tq, &adapter->fdir_task);
741 taskqueue_free(adapter->tq);
744 /* let hardware know driver is unloading */
745 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
746 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
747 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
749 /* Unregister VLAN events */
750 if (adapter->vlan_attach != NULL)
751 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
752 if (adapter->vlan_detach != NULL)
753 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
755 callout_drain(&adapter->timer);
757 netmap_detach(adapter->ifp);
758 #endif /* DEV_NETMAP */
759 ixgbe_free_pci_resources(adapter);
760 bus_generic_detach(dev);
761 if_free(adapter->ifp);
763 ixgbe_free_transmit_structures(adapter);
764 ixgbe_free_receive_structures(adapter);
765 free(adapter->mta, M_DEVBUF);
767 IXGBE_CORE_LOCK_DESTROY(adapter);
771 /*********************************************************************
773 * Shutdown entry point
775 **********************************************************************/
778 ixgbe_shutdown(device_t dev)
780 struct adapter *adapter = device_get_softc(dev);
783 INIT_DEBUGOUT("ixgbe_shutdown: begin");
785 IXGBE_CORE_LOCK(adapter);
786 error = ixgbe_setup_low_power_mode(adapter);
787 IXGBE_CORE_UNLOCK(adapter);
793 * Methods for going from:
794 * D0 -> D3: ixgbe_suspend
795 * D3 -> D0: ixgbe_resume
798 ixgbe_suspend(device_t dev)
800 struct adapter *adapter = device_get_softc(dev);
803 INIT_DEBUGOUT("ixgbe_suspend: begin");
805 IXGBE_CORE_LOCK(adapter);
807 error = ixgbe_setup_low_power_mode(adapter);
809 IXGBE_CORE_UNLOCK(adapter);
815 ixgbe_resume(device_t dev)
817 struct adapter *adapter = device_get_softc(dev);
818 struct ifnet *ifp = adapter->ifp;
819 struct ixgbe_hw *hw = &adapter->hw;
822 INIT_DEBUGOUT("ixgbe_resume: begin");
824 IXGBE_CORE_LOCK(adapter);
826 /* Read & clear WUS register */
827 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
829 device_printf(dev, "Woken up by (WUS): %#010x\n",
830 IXGBE_READ_REG(hw, IXGBE_WUS));
831 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
832 /* And clear WUFC until next low-power transition */
833 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
836 * Required after D3->D0 transition;
837 * will re-advertise all previous advertised speeds
839 if (ifp->if_flags & IFF_UP)
840 ixgbe_init_locked(adapter);
842 IXGBE_CORE_UNLOCK(adapter);
848 /*********************************************************************
851 * ixgbe_ioctl is called when the user wants to configure the
854 * return 0 on success, positive on failure
855 **********************************************************************/
858 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
860 struct adapter *adapter = ifp->if_softc;
861 struct ifreq *ifr = (struct ifreq *) data;
862 #if defined(INET) || defined(INET6)
863 struct ifaddr *ifa = (struct ifaddr *)data;
866 bool avoid_reset = FALSE;
872 if (ifa->ifa_addr->sa_family == AF_INET)
876 if (ifa->ifa_addr->sa_family == AF_INET6)
880 ** Calling init results in link renegotiation,
881 ** so we avoid doing it when possible.
884 ifp->if_flags |= IFF_UP;
885 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
888 if (!(ifp->if_flags & IFF_NOARP))
889 arp_ifinit(ifp, ifa);
892 error = ether_ioctl(ifp, command, data);
895 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
896 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
899 IXGBE_CORE_LOCK(adapter);
900 ifp->if_mtu = ifr->ifr_mtu;
901 adapter->max_frame_size =
902 ifp->if_mtu + IXGBE_MTU_HDR;
903 ixgbe_init_locked(adapter);
905 ixgbe_recalculate_max_frame(adapter);
907 IXGBE_CORE_UNLOCK(adapter);
911 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
912 IXGBE_CORE_LOCK(adapter);
913 if (ifp->if_flags & IFF_UP) {
914 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
915 if ((ifp->if_flags ^ adapter->if_flags) &
916 (IFF_PROMISC | IFF_ALLMULTI)) {
917 ixgbe_set_promisc(adapter);
920 ixgbe_init_locked(adapter);
922 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
924 adapter->if_flags = ifp->if_flags;
925 IXGBE_CORE_UNLOCK(adapter);
929 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
930 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
931 IXGBE_CORE_LOCK(adapter);
932 ixgbe_disable_intr(adapter);
933 ixgbe_set_multi(adapter);
934 ixgbe_enable_intr(adapter);
935 IXGBE_CORE_UNLOCK(adapter);
940 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
941 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
945 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
947 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
951 /* HW cannot turn these on/off separately */
952 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
953 ifp->if_capenable ^= IFCAP_RXCSUM;
954 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
956 if (mask & IFCAP_TXCSUM)
957 ifp->if_capenable ^= IFCAP_TXCSUM;
958 if (mask & IFCAP_TXCSUM_IPV6)
959 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
960 if (mask & IFCAP_TSO4)
961 ifp->if_capenable ^= IFCAP_TSO4;
962 if (mask & IFCAP_TSO6)
963 ifp->if_capenable ^= IFCAP_TSO6;
964 if (mask & IFCAP_LRO)
965 ifp->if_capenable ^= IFCAP_LRO;
966 if (mask & IFCAP_VLAN_HWTAGGING)
967 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
968 if (mask & IFCAP_VLAN_HWFILTER)
969 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
970 if (mask & IFCAP_VLAN_HWTSO)
971 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
973 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
974 IXGBE_CORE_LOCK(adapter);
975 ixgbe_init_locked(adapter);
976 IXGBE_CORE_UNLOCK(adapter);
978 VLAN_CAPABILITIES(ifp);
981 #if __FreeBSD_version >= 1002500
984 struct ixgbe_hw *hw = &adapter->hw;
987 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
988 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
991 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
995 if (i2c.len > sizeof(i2c.data)) {
1000 for (i = 0; i < i2c.len; i++)
1001 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
1002 i2c.dev_addr, &i2c.data[i]);
1003 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1008 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
1009 error = ether_ioctl(ifp, command, data);
1017 * Set the various hardware offload abilities.
1019 * This takes the ifnet's if_capenable flags (e.g. set by the user using
1020 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
1021 * mbuf offload flags the driver will understand.
1024 ixgbe_set_if_hwassist(struct adapter *adapter)
1026 struct ifnet *ifp = adapter->ifp;
1027 struct ixgbe_hw *hw = &adapter->hw;
1029 ifp->if_hwassist = 0;
1030 #if __FreeBSD_version >= 1000000
1031 if (ifp->if_capenable & IFCAP_TSO4)
1032 ifp->if_hwassist |= CSUM_IP_TSO;
1033 if (ifp->if_capenable & IFCAP_TSO6)
1034 ifp->if_hwassist |= CSUM_IP6_TSO;
1035 if (ifp->if_capenable & IFCAP_TXCSUM) {
1036 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
1037 if (hw->mac.type != ixgbe_mac_82598EB)
1038 ifp->if_hwassist |= CSUM_IP_SCTP;
1040 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
1041 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
1042 if (hw->mac.type != ixgbe_mac_82598EB)
1043 ifp->if_hwassist |= CSUM_IP6_SCTP;
1046 if (ifp->if_capenable & IFCAP_TSO)
1047 ifp->if_hwassist |= CSUM_TSO;
1048 if (ifp->if_capenable & IFCAP_TXCSUM) {
1049 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1050 if (hw->mac.type != ixgbe_mac_82598EB)
1051 ifp->if_hwassist |= CSUM_SCTP;
1056 /*********************************************************************
1059 * This routine is used in two ways. It is used by the stack as
1060 * init entry point in network interface structure. It is also used
1061 * by the driver as a hw/sw initialization routine to get to a
1064 * return 0 on success, positive on failure
1065 **********************************************************************/
1066 #define IXGBE_MHADD_MFS_SHIFT 16
1069 ixgbe_init_locked(struct adapter *adapter)
1071 struct ifnet *ifp = adapter->ifp;
1072 device_t dev = adapter->dev;
1073 struct ixgbe_hw *hw = &adapter->hw;
1074 struct tx_ring *txr;
1075 struct rx_ring *rxr;
1080 enum ixgbe_iov_mode mode;
1083 mtx_assert(&adapter->core_mtx, MA_OWNED);
1084 INIT_DEBUGOUT("ixgbe_init_locked: begin");
1086 hw->adapter_stopped = FALSE;
1087 ixgbe_stop_adapter(hw);
1088 callout_stop(&adapter->timer);
1091 mode = ixgbe_get_iov_mode(adapter);
1092 adapter->pool = ixgbe_max_vfs(mode);
1093 /* Queue indices may change with IOV mode */
1094 for (int i = 0; i < adapter->num_queues; i++) {
1095 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
1096 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
1099 /* reprogram the RAR[0] in case user changed it. */
1100 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1102 /* Get the latest mac address, User can use a LAA */
1103 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1104 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1105 hw->addr_ctrl.rar_used_count = 1;
1107 /* Set hardware offload abilities from ifnet flags */
1108 ixgbe_set_if_hwassist(adapter);
1110 /* Prepare transmit descriptors and buffers */
1111 if (ixgbe_setup_transmit_structures(adapter)) {
1112 device_printf(dev, "Could not setup transmit structures\n");
1113 ixgbe_stop(adapter);
1119 ixgbe_initialize_iov(adapter);
1121 ixgbe_initialize_transmit_units(adapter);
1123 /* Setup Multicast table */
1124 ixgbe_set_multi(adapter);
1126 /* Determine the correct mbuf pool, based on frame size */
1127 if (adapter->max_frame_size <= MCLBYTES)
1128 adapter->rx_mbuf_sz = MCLBYTES;
1130 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1132 /* Prepare receive descriptors and buffers */
1133 if (ixgbe_setup_receive_structures(adapter)) {
1134 device_printf(dev, "Could not setup receive structures\n");
1135 ixgbe_stop(adapter);
1139 /* Configure RX settings */
1140 ixgbe_initialize_receive_units(adapter);
1142 /* Enable SDP & MSIX interrupts based on adapter */
1143 ixgbe_config_gpie(adapter);
1146 if (ifp->if_mtu > ETHERMTU) {
1147 /* aka IXGBE_MAXFRS on 82599 and newer */
1148 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1149 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1150 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1151 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1154 /* Now enable all the queues */
1155 for (int i = 0; i < adapter->num_queues; i++) {
1156 txr = &adapter->tx_rings[i];
1157 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1158 txdctl |= IXGBE_TXDCTL_ENABLE;
1159 /* Set WTHRESH to 8, burst writeback */
1160 txdctl |= (8 << 16);
1162 * When the internal queue falls below PTHRESH (32),
1163 * start prefetching as long as there are at least
1164 * HTHRESH (1) buffers ready. The values are taken
1165 * from the Intel linux driver 3.8.21.
1166 * Prefetching enables tx line rate even with 1 queue.
1168 txdctl |= (32 << 0) | (1 << 8);
1169 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1172 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1173 rxr = &adapter->rx_rings[i];
1174 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1175 if (hw->mac.type == ixgbe_mac_82598EB) {
1181 rxdctl &= ~0x3FFFFF;
1184 rxdctl |= IXGBE_RXDCTL_ENABLE;
1185 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1186 for (; j < 10; j++) {
1187 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1188 IXGBE_RXDCTL_ENABLE)
1196 * In netmap mode, we must preserve the buffers made
1197 * available to userspace before the if_init()
1198 * (this is true by default on the TX side, because
1199 * init makes all buffers available to userspace).
1201 * netmap_reset() and the device specific routines
1202 * (e.g. ixgbe_setup_receive_rings()) map these
1203 * buffers at the end of the NIC ring, so here we
1204 * must set the RDT (tail) register to make sure
1205 * they are not overwritten.
1207 * In this driver the NIC ring starts at RDH = 0,
1208 * RDT points to the last slot available for reception (?),
1209 * so RDT = num_rx_desc - 1 means the whole ring is available.
1211 if (ifp->if_capenable & IFCAP_NETMAP) {
1212 struct netmap_adapter *na = NA(adapter->ifp);
1213 struct netmap_kring *kring = &na->rx_rings[i];
1214 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1216 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1218 #endif /* DEV_NETMAP */
1219 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
1222 /* Enable Receive engine */
1223 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1224 if (hw->mac.type == ixgbe_mac_82598EB)
1225 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1226 rxctrl |= IXGBE_RXCTRL_RXEN;
1227 ixgbe_enable_rx_dma(hw, rxctrl);
1229 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1231 /* Set up MSI/X routing */
1232 if (ixgbe_enable_msix) {
1233 ixgbe_configure_ivars(adapter);
1234 /* Set up auto-mask */
1235 if (hw->mac.type == ixgbe_mac_82598EB)
1236 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1238 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1239 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1241 } else { /* Simple settings for Legacy/MSI */
1242 ixgbe_set_ivar(adapter, 0, 0, 0);
1243 ixgbe_set_ivar(adapter, 0, 0, 1);
1244 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1248 /* Init Flow director */
1249 if (hw->mac.type != ixgbe_mac_82598EB) {
1250 u32 hdrm = 32 << fdir_pballoc;
1252 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1253 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1258 * Check on any SFP devices that
1259 * need to be kick-started
1261 if (hw->phy.type == ixgbe_phy_none) {
1262 err = hw->phy.ops.identify(hw);
1263 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1265 "Unsupported SFP+ module type was detected.\n");
1270 /* Set moderation on the Link interrupt */
1271 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1273 /* Configure Energy Efficient Ethernet for supported devices */
1274 if (hw->mac.ops.setup_eee) {
1275 err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled);
1277 device_printf(dev, "Error setting up EEE: %d\n", err);
1280 /* Enable power to the phy. */
1281 ixgbe_set_phy_power(hw, TRUE);
1283 /* Config/Enable Link */
1284 ixgbe_config_link(adapter);
1286 /* Hardware Packet Buffer & Flow Control setup */
1287 ixgbe_config_delay_values(adapter);
1289 /* Initialize the FC settings */
1292 /* Set up VLAN support and filter */
1293 ixgbe_setup_vlan_hw_support(adapter);
1295 /* Setup DMA Coalescing */
1296 ixgbe_config_dmac(adapter);
1298 /* And now turn on interrupts */
1299 ixgbe_enable_intr(adapter);
1302 /* Enable the use of the MBX by the VF's */
1304 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1305 reg |= IXGBE_CTRL_EXT_PFRSTD;
1306 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
1310 /* Now inform the stack we're ready */
1311 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1317 ixgbe_init(void *arg)
1319 struct adapter *adapter = arg;
1321 IXGBE_CORE_LOCK(adapter);
1322 ixgbe_init_locked(adapter);
1323 IXGBE_CORE_UNLOCK(adapter);
1328 ixgbe_config_gpie(struct adapter *adapter)
1330 struct ixgbe_hw *hw = &adapter->hw;
1333 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1335 /* Fan Failure Interrupt */
1336 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1337 gpie |= IXGBE_SDP1_GPIEN;
1340 * Module detection (SDP2)
1341 * Media ready (SDP1)
1343 if (hw->mac.type == ixgbe_mac_82599EB) {
1344 gpie |= IXGBE_SDP2_GPIEN;
1345 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
1346 gpie |= IXGBE_SDP1_GPIEN;
1350 * Thermal Failure Detection (X540)
1351 * Link Detection (X552 SFP+, X552/X557-AT)
1353 if (hw->mac.type == ixgbe_mac_X540 ||
1354 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1355 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1356 gpie |= IXGBE_SDP0_GPIEN_X540;
1358 if (adapter->msix > 1) {
1359 /* Enable Enhanced MSIX mode */
1360 gpie |= IXGBE_GPIE_MSIX_MODE;
1361 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1365 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1370 * Requires adapter->max_frame_size to be set.
1373 ixgbe_config_delay_values(struct adapter *adapter)
1375 struct ixgbe_hw *hw = &adapter->hw;
1376 u32 rxpb, frame, size, tmp;
1378 frame = adapter->max_frame_size;
1380 /* Calculate High Water */
1381 switch (hw->mac.type) {
1382 case ixgbe_mac_X540:
1383 case ixgbe_mac_X550:
1384 case ixgbe_mac_X550EM_x:
1385 tmp = IXGBE_DV_X540(frame, frame);
1388 tmp = IXGBE_DV(frame, frame);
1391 size = IXGBE_BT2KB(tmp);
1392 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1393 hw->fc.high_water[0] = rxpb - size;
1395 /* Now calculate Low Water */
1396 switch (hw->mac.type) {
1397 case ixgbe_mac_X540:
1398 case ixgbe_mac_X550:
1399 case ixgbe_mac_X550EM_x:
1400 tmp = IXGBE_LOW_DV_X540(frame);
1403 tmp = IXGBE_LOW_DV(frame);
1406 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1408 hw->fc.requested_mode = adapter->fc;
1409 hw->fc.pause_time = IXGBE_FC_PAUSE;
1410 hw->fc.send_xon = TRUE;
1415 ** MSIX Interrupt Handlers and Tasklets
1420 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1422 struct ixgbe_hw *hw = &adapter->hw;
1423 u64 queue = (u64)(1 << vector);
1426 if (hw->mac.type == ixgbe_mac_82598EB) {
1427 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1428 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1430 mask = (queue & 0xFFFFFFFF);
1432 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1433 mask = (queue >> 32);
1435 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1440 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1442 struct ixgbe_hw *hw = &adapter->hw;
1443 u64 queue = (u64)(1 << vector);
1446 if (hw->mac.type == ixgbe_mac_82598EB) {
1447 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1448 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1450 mask = (queue & 0xFFFFFFFF);
1452 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1453 mask = (queue >> 32);
1455 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1460 ixgbe_handle_que(void *context, int pending)
1462 struct ix_queue *que = context;
1463 struct adapter *adapter = que->adapter;
1464 struct tx_ring *txr = que->txr;
1465 struct ifnet *ifp = adapter->ifp;
1467 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1471 #ifndef IXGBE_LEGACY_TX
1472 if (!drbr_empty(ifp, txr->br))
1473 ixgbe_mq_start_locked(ifp, txr);
1475 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1476 ixgbe_start_locked(txr, ifp);
1478 IXGBE_TX_UNLOCK(txr);
1481 /* Reenable this interrupt */
1482 if (que->res != NULL)
1483 ixgbe_enable_queue(adapter, que->msix);
1485 ixgbe_enable_intr(adapter);
1490 /*********************************************************************
1492 * Legacy Interrupt Service routine
1494 **********************************************************************/
1497 ixgbe_legacy_irq(void *arg)
1499 struct ix_queue *que = arg;
1500 struct adapter *adapter = que->adapter;
1501 struct ixgbe_hw *hw = &adapter->hw;
1502 struct ifnet *ifp = adapter->ifp;
1503 struct tx_ring *txr = adapter->tx_rings;
1508 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1511 if (reg_eicr == 0) {
1512 ixgbe_enable_intr(adapter);
1516 more = ixgbe_rxeof(que);
1520 #ifdef IXGBE_LEGACY_TX
1521 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1522 ixgbe_start_locked(txr, ifp);
1524 if (!drbr_empty(ifp, txr->br))
1525 ixgbe_mq_start_locked(ifp, txr);
1527 IXGBE_TX_UNLOCK(txr);
1529 /* Check for fan failure */
1530 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1531 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1532 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1533 "REPLACE IMMEDIATELY!!\n");
1534 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1537 /* Link status change */
1538 if (reg_eicr & IXGBE_EICR_LSC)
1539 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1541 /* External PHY interrupt */
1542 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1543 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1544 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1547 taskqueue_enqueue(que->tq, &que->que_task);
1549 ixgbe_enable_intr(adapter);
1554 /*********************************************************************
1556 * MSIX Queue Interrupt Service routine
1558 **********************************************************************/
1560 ixgbe_msix_que(void *arg)
1562 struct ix_queue *que = arg;
1563 struct adapter *adapter = que->adapter;
1564 struct ifnet *ifp = adapter->ifp;
1565 struct tx_ring *txr = que->txr;
1566 struct rx_ring *rxr = que->rxr;
1571 /* Protect against spurious interrupts */
1572 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1575 ixgbe_disable_queue(adapter, que->msix);
1578 more = ixgbe_rxeof(que);
1582 #ifdef IXGBE_LEGACY_TX
1583 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1584 ixgbe_start_locked(txr, ifp);
1586 if (!drbr_empty(ifp, txr->br))
1587 ixgbe_mq_start_locked(ifp, txr);
1589 IXGBE_TX_UNLOCK(txr);
1593 if (adapter->enable_aim == FALSE)
1596 ** Do Adaptive Interrupt Moderation:
1597 ** - Write out last calculated setting
1598 ** - Calculate based on average size over
1599 ** the last interval.
1601 if (que->eitr_setting)
1602 IXGBE_WRITE_REG(&adapter->hw,
1603 IXGBE_EITR(que->msix), que->eitr_setting);
1605 que->eitr_setting = 0;
1607 /* Idle, do nothing */
1608 if ((txr->bytes == 0) && (rxr->bytes == 0))
1611 if ((txr->bytes) && (txr->packets))
1612 newitr = txr->bytes/txr->packets;
1613 if ((rxr->bytes) && (rxr->packets))
1614 newitr = max(newitr,
1615 (rxr->bytes / rxr->packets));
1616 newitr += 24; /* account for hardware frame, crc */
1618 /* set an upper boundary */
1619 newitr = min(newitr, 3000);
1621 /* Be nice to the mid range */
1622 if ((newitr > 300) && (newitr < 1200))
1623 newitr = (newitr / 3);
1625 newitr = (newitr / 2);
1627 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1628 newitr |= newitr << 16;
1630 newitr |= IXGBE_EITR_CNT_WDIS;
1632 /* save for next interrupt */
1633 que->eitr_setting = newitr;
1643 taskqueue_enqueue(que->tq, &que->que_task);
1645 ixgbe_enable_queue(adapter, que->msix);
1651 ixgbe_msix_link(void *arg)
1653 struct adapter *adapter = arg;
1654 struct ixgbe_hw *hw = &adapter->hw;
1655 u32 reg_eicr, mod_mask;
1657 ++adapter->link_irq;
1659 /* Pause other interrupts */
1660 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1662 /* First get the cause */
1663 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1664 /* Be sure the queue bits are not cleared */
1665 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1666 /* Clear interrupt with write */
1667 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1669 /* Link status change */
1670 if (reg_eicr & IXGBE_EICR_LSC) {
1671 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1672 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1675 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1677 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1678 /* This is probably overkill :) */
1679 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1681 /* Disable the interrupt */
1682 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1683 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1686 if (reg_eicr & IXGBE_EICR_ECC) {
1687 device_printf(adapter->dev, "CRITICAL: ECC ERROR!! "
1688 "Please Reboot!!\n");
1689 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1692 /* Check for over temp condition */
1693 if (reg_eicr & IXGBE_EICR_TS) {
1694 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! "
1695 "PHY IS SHUT DOWN!!\n");
1696 device_printf(adapter->dev, "System shutdown required!\n");
1697 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1700 if (reg_eicr & IXGBE_EICR_MAILBOX)
1701 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1705 /* Pluggable optics-related interrupt */
1706 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1707 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1709 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1711 if (ixgbe_is_sfp(hw)) {
1712 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1713 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1714 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1715 } else if (reg_eicr & mod_mask) {
1716 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1717 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1721 /* Check for fan failure */
1722 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1723 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1724 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1725 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1726 "REPLACE IMMEDIATELY!!\n");
1729 /* External PHY interrupt */
1730 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1731 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1732 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1733 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1736 /* Re-enable other interrupts */
1737 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1741 /*********************************************************************
1743 * Media Ioctl callback
1745 * This routine is called whenever the user queries the status of
1746 * the interface using ifconfig.
1748 **********************************************************************/
1750 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1752 struct adapter *adapter = ifp->if_softc;
1753 struct ixgbe_hw *hw = &adapter->hw;
1756 INIT_DEBUGOUT("ixgbe_media_status: begin");
1757 IXGBE_CORE_LOCK(adapter);
1758 ixgbe_update_link_status(adapter);
1760 ifmr->ifm_status = IFM_AVALID;
1761 ifmr->ifm_active = IFM_ETHER;
1763 if (!adapter->link_active) {
1764 IXGBE_CORE_UNLOCK(adapter);
1768 ifmr->ifm_status |= IFM_ACTIVE;
1769 layer = adapter->phy_layer;
1771 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1772 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1773 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1774 switch (adapter->link_speed) {
1775 case IXGBE_LINK_SPEED_10GB_FULL:
1776 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1778 case IXGBE_LINK_SPEED_1GB_FULL:
1779 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1781 case IXGBE_LINK_SPEED_100_FULL:
1782 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1785 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1786 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1787 switch (adapter->link_speed) {
1788 case IXGBE_LINK_SPEED_10GB_FULL:
1789 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1792 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1793 switch (adapter->link_speed) {
1794 case IXGBE_LINK_SPEED_10GB_FULL:
1795 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1797 case IXGBE_LINK_SPEED_1GB_FULL:
1798 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1801 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1802 switch (adapter->link_speed) {
1803 case IXGBE_LINK_SPEED_10GB_FULL:
1804 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1806 case IXGBE_LINK_SPEED_1GB_FULL:
1807 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1810 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1811 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1812 switch (adapter->link_speed) {
1813 case IXGBE_LINK_SPEED_10GB_FULL:
1814 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1816 case IXGBE_LINK_SPEED_1GB_FULL:
1817 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1820 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1821 switch (adapter->link_speed) {
1822 case IXGBE_LINK_SPEED_10GB_FULL:
1823 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1827 ** XXX: These need to use the proper media types once
1830 #ifndef IFM_ETH_XTYPE
1831 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1832 switch (adapter->link_speed) {
1833 case IXGBE_LINK_SPEED_10GB_FULL:
1834 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1836 case IXGBE_LINK_SPEED_2_5GB_FULL:
1837 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1839 case IXGBE_LINK_SPEED_1GB_FULL:
1840 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1843 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1844 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1845 switch (adapter->link_speed) {
1846 case IXGBE_LINK_SPEED_10GB_FULL:
1847 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1849 case IXGBE_LINK_SPEED_2_5GB_FULL:
1850 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1852 case IXGBE_LINK_SPEED_1GB_FULL:
1853 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1857 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1858 switch (adapter->link_speed) {
1859 case IXGBE_LINK_SPEED_10GB_FULL:
1860 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
1862 case IXGBE_LINK_SPEED_2_5GB_FULL:
1863 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1865 case IXGBE_LINK_SPEED_1GB_FULL:
1866 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1869 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1870 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1871 switch (adapter->link_speed) {
1872 case IXGBE_LINK_SPEED_10GB_FULL:
1873 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
1875 case IXGBE_LINK_SPEED_2_5GB_FULL:
1876 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1878 case IXGBE_LINK_SPEED_1GB_FULL:
1879 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1884 /* If nothing is recognized... */
1885 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1886 ifmr->ifm_active |= IFM_UNKNOWN;
1888 #if __FreeBSD_version >= 900025
1889 /* Display current flow control setting used on link */
1890 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1891 hw->fc.current_mode == ixgbe_fc_full)
1892 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1893 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1894 hw->fc.current_mode == ixgbe_fc_full)
1895 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1898 IXGBE_CORE_UNLOCK(adapter);
1903 /*********************************************************************
1905 * Media Ioctl callback
1907 * This routine is called when the user changes speed/duplex using
1908 * media/mediopt option with ifconfig.
1910 **********************************************************************/
1912 ixgbe_media_change(struct ifnet * ifp)
1914 struct adapter *adapter = ifp->if_softc;
1915 struct ifmedia *ifm = &adapter->media;
1916 struct ixgbe_hw *hw = &adapter->hw;
1917 ixgbe_link_speed speed = 0;
1919 INIT_DEBUGOUT("ixgbe_media_change: begin");
1921 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1924 if (hw->phy.media_type == ixgbe_media_type_backplane)
1928 ** We don't actually need to check against the supported
1929 ** media types of the adapter; ifmedia will take care of
1932 #ifndef IFM_ETH_XTYPE
1933 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1936 speed |= IXGBE_LINK_SPEED_100_FULL;
1938 case IFM_10G_SR: /* KR, too */
1940 case IFM_10G_CX4: /* KX4 */
1941 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1942 case IFM_10G_TWINAX:
1943 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1946 speed |= IXGBE_LINK_SPEED_100_FULL;
1949 case IFM_1000_CX: /* KX */
1950 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1953 speed |= IXGBE_LINK_SPEED_100_FULL;
1959 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1962 speed |= IXGBE_LINK_SPEED_100_FULL;
1967 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1968 case IFM_10G_TWINAX:
1969 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1972 speed |= IXGBE_LINK_SPEED_100_FULL;
1976 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1979 speed |= IXGBE_LINK_SPEED_100_FULL;
1986 hw->mac.autotry_restart = TRUE;
1987 hw->mac.ops.setup_link(hw, speed, TRUE);
1988 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1989 adapter->advertise = 0;
1991 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
1992 adapter->advertise |= 1 << 2;
1993 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
1994 adapter->advertise |= 1 << 1;
1995 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
1996 adapter->advertise |= 1 << 0;
2002 device_printf(adapter->dev, "Invalid media type!\n");
2007 ixgbe_set_promisc(struct adapter *adapter)
2010 struct ifnet *ifp = adapter->ifp;
2013 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2014 reg_rctl &= (~IXGBE_FCTRL_UPE);
2015 if (ifp->if_flags & IFF_ALLMULTI)
2016 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2018 struct ifmultiaddr *ifma;
2019 #if __FreeBSD_version < 800000
2022 if_maddr_rlock(ifp);
2024 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2025 if (ifma->ifma_addr->sa_family != AF_LINK)
2027 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2031 #if __FreeBSD_version < 800000
2032 IF_ADDR_UNLOCK(ifp);
2034 if_maddr_runlock(ifp);
2037 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2038 reg_rctl &= (~IXGBE_FCTRL_MPE);
2039 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2041 if (ifp->if_flags & IFF_PROMISC) {
2042 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2043 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2044 } else if (ifp->if_flags & IFF_ALLMULTI) {
2045 reg_rctl |= IXGBE_FCTRL_MPE;
2046 reg_rctl &= ~IXGBE_FCTRL_UPE;
2047 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2053 /*********************************************************************
2056 * This routine is called whenever multicast address list is updated.
2058 **********************************************************************/
2059 #define IXGBE_RAR_ENTRIES 16
2062 ixgbe_set_multi(struct adapter *adapter)
2066 struct ifmultiaddr *ifma;
2067 struct ixgbe_mc_addr *mta;
2069 struct ifnet *ifp = adapter->ifp;
2071 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
2074 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
2076 #if __FreeBSD_version < 800000
2079 if_maddr_rlock(ifp);
2081 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2082 if (ifma->ifma_addr->sa_family != AF_LINK)
2084 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2086 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2087 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2088 mta[mcnt].vmdq = adapter->pool;
2091 #if __FreeBSD_version < 800000
2092 IF_ADDR_UNLOCK(ifp);
2094 if_maddr_runlock(ifp);
2097 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2098 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2099 if (ifp->if_flags & IFF_PROMISC)
2100 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2101 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
2102 ifp->if_flags & IFF_ALLMULTI) {
2103 fctrl |= IXGBE_FCTRL_MPE;
2104 fctrl &= ~IXGBE_FCTRL_UPE;
2106 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2108 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2110 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
2111 update_ptr = (u8 *)mta;
2112 ixgbe_update_mc_addr_list(&adapter->hw,
2113 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
2120 * This is an iterator function now needed by the multicast
2121 * shared code. It simply feeds the shared code routine the
2122 * addresses in the array of ixgbe_set_multi() one by one.
2125 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
2127 struct ixgbe_mc_addr *mta;
2129 mta = (struct ixgbe_mc_addr *)*update_ptr;
2132 *update_ptr = (u8*)(mta + 1);;
2137 /*********************************************************************
2140 * This routine checks for link status,updates statistics,
2141 * and runs the watchdog check.
2143 **********************************************************************/
2146 ixgbe_local_timer(void *arg)
2148 struct adapter *adapter = arg;
2149 device_t dev = adapter->dev;
2150 struct ix_queue *que = adapter->queues;
2154 mtx_assert(&adapter->core_mtx, MA_OWNED);
2156 /* Check for pluggable optics */
2157 if (adapter->sfp_probe)
2158 if (!ixgbe_sfp_probe(adapter))
2159 goto out; /* Nothing to do */
2161 ixgbe_update_link_status(adapter);
2162 ixgbe_update_stats_counters(adapter);
2165 ** Check the TX queues status
2166 ** - mark hung queues so we don't schedule on them
2167 ** - watchdog only if all queues show hung
2169 for (int i = 0; i < adapter->num_queues; i++, que++) {
2170 /* Keep track of queues with work for soft irq */
2172 queues |= ((u64)1 << que->me);
2174 ** Each time txeof runs without cleaning, but there
2175 ** are uncleaned descriptors it increments busy. If
2176 ** we get to the MAX we declare it hung.
2178 if (que->busy == IXGBE_QUEUE_HUNG) {
2180 /* Mark the queue as inactive */
2181 adapter->active_queues &= ~((u64)1 << que->me);
2184 /* Check if we've come back from hung */
2185 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2186 adapter->active_queues |= ((u64)1 << que->me);
2188 if (que->busy >= IXGBE_MAX_TX_BUSY) {
2189 device_printf(dev,"Warning queue %d "
2190 "appears to be hung!\n", i);
2191 que->txr->busy = IXGBE_QUEUE_HUNG;
2197 /* Only truly watchdog if all queues show hung */
2198 if (hung == adapter->num_queues)
2200 else if (queues != 0) { /* Force an IRQ on queues with work */
2201 ixgbe_rearm_queues(adapter, queues);
2205 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2209 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2210 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2211 adapter->watchdog_events++;
2212 ixgbe_init_locked(adapter);
2217 ** Note: this routine updates the OS on the link state
2218 ** the real check of the hardware only happens with
2219 ** a link interrupt.
2222 ixgbe_update_link_status(struct adapter *adapter)
2224 struct ifnet *ifp = adapter->ifp;
2225 device_t dev = adapter->dev;
2227 if (adapter->link_up){
2228 if (adapter->link_active == FALSE) {
2230 device_printf(dev,"Link is up %d Gbps %s \n",
2231 ((adapter->link_speed == 128)? 10:1),
2233 adapter->link_active = TRUE;
2234 /* Update any Flow Control changes */
2235 ixgbe_fc_enable(&adapter->hw);
2236 /* Update DMA coalescing config */
2237 ixgbe_config_dmac(adapter);
2238 if_link_state_change(ifp, LINK_STATE_UP);
2240 ixgbe_ping_all_vfs(adapter);
2243 } else { /* Link down */
2244 if (adapter->link_active == TRUE) {
2246 device_printf(dev,"Link is Down\n");
2247 if_link_state_change(ifp, LINK_STATE_DOWN);
2248 adapter->link_active = FALSE;
2250 ixgbe_ping_all_vfs(adapter);
2259 /*********************************************************************
2261 * This routine disables all traffic on the adapter by issuing a
2262 * global reset on the MAC and deallocates TX/RX buffers.
2264 **********************************************************************/
2267 ixgbe_stop(void *arg)
2270 struct adapter *adapter = arg;
2271 struct ixgbe_hw *hw = &adapter->hw;
2274 mtx_assert(&adapter->core_mtx, MA_OWNED);
2276 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2277 ixgbe_disable_intr(adapter);
2278 callout_stop(&adapter->timer);
2280 /* Let the stack know...*/
2281 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2284 hw->adapter_stopped = FALSE;
2285 ixgbe_stop_adapter(hw);
2286 if (hw->mac.type == ixgbe_mac_82599EB)
2287 ixgbe_stop_mac_link_on_d3_82599(hw);
2288 /* Turn off the laser - noop with no optics */
2289 ixgbe_disable_tx_laser(hw);
2291 /* Update the stack */
2292 adapter->link_up = FALSE;
2293 ixgbe_update_link_status(adapter);
2295 /* reprogram the RAR[0] in case user changed it. */
2296 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2302 /*********************************************************************
2304 * Determine hardware revision.
2306 **********************************************************************/
2308 ixgbe_identify_hardware(struct adapter *adapter)
2310 device_t dev = adapter->dev;
2311 struct ixgbe_hw *hw = &adapter->hw;
2313 /* Save off the information about this board */
2314 hw->vendor_id = pci_get_vendor(dev);
2315 hw->device_id = pci_get_device(dev);
2316 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2317 hw->subsystem_vendor_id =
2318 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2319 hw->subsystem_device_id =
2320 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2323 ** Make sure BUSMASTER is set
2325 pci_enable_busmaster(dev);
2327 /* We need this here to set the num_segs below */
2328 ixgbe_set_mac_type(hw);
2330 /* Pick up the 82599 settings */
2331 if (hw->mac.type != ixgbe_mac_82598EB) {
2332 hw->phy.smart_speed = ixgbe_smart_speed;
2333 adapter->num_segs = IXGBE_82599_SCATTER;
2335 adapter->num_segs = IXGBE_82598_SCATTER;
2340 /*********************************************************************
2342 * Determine optic type
2344 **********************************************************************/
2346 ixgbe_setup_optics(struct adapter *adapter)
2348 struct ixgbe_hw *hw = &adapter->hw;
2351 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2353 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2354 adapter->optics = IFM_10G_T;
2358 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2359 adapter->optics = IFM_1000_T;
2363 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2364 adapter->optics = IFM_1000_SX;
2368 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2369 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2370 adapter->optics = IFM_10G_LR;
2374 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2375 adapter->optics = IFM_10G_SR;
2379 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2380 adapter->optics = IFM_10G_TWINAX;
2384 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2385 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2386 adapter->optics = IFM_10G_CX4;
2390 /* If we get here just set the default */
2391 adapter->optics = IFM_ETHER | IFM_AUTO;
2395 /*********************************************************************
2397 * Setup the Legacy or MSI Interrupt handler
2399 **********************************************************************/
2401 ixgbe_allocate_legacy(struct adapter *adapter)
2403 device_t dev = adapter->dev;
2404 struct ix_queue *que = adapter->queues;
2405 #ifndef IXGBE_LEGACY_TX
2406 struct tx_ring *txr = adapter->tx_rings;
2411 if (adapter->msix == 1)
2414 /* We allocate a single interrupt resource */
2415 adapter->res = bus_alloc_resource_any(dev,
2416 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2417 if (adapter->res == NULL) {
2418 device_printf(dev, "Unable to allocate bus resource: "
2424 * Try allocating a fast interrupt and the associated deferred
2425 * processing contexts.
2427 #ifndef IXGBE_LEGACY_TX
2428 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2430 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2431 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2432 taskqueue_thread_enqueue, &que->tq);
2433 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2434 device_get_nameunit(adapter->dev));
2436 /* Tasklets for Link, SFP and Multispeed Fiber */
2437 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2438 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2439 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2440 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2442 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2444 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2445 taskqueue_thread_enqueue, &adapter->tq);
2446 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2447 device_get_nameunit(adapter->dev));
2449 if ((error = bus_setup_intr(dev, adapter->res,
2450 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2451 que, &adapter->tag)) != 0) {
2452 device_printf(dev, "Failed to register fast interrupt "
2453 "handler: %d\n", error);
2454 taskqueue_free(que->tq);
2455 taskqueue_free(adapter->tq);
2460 /* For simplicity in the handlers */
2461 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2467 /*********************************************************************
2469 * Setup MSIX Interrupt resources and handlers
2471 **********************************************************************/
2473 ixgbe_allocate_msix(struct adapter *adapter)
2475 device_t dev = adapter->dev;
2476 struct ix_queue *que = adapter->queues;
2477 struct tx_ring *txr = adapter->tx_rings;
2478 int error, rid, vector = 0;
2486 * If we're doing RSS, the number of queues needs to
2487 * match the number of RSS buckets that are configured.
2489 * + If there's more queues than RSS buckets, we'll end
2490 * up with queues that get no traffic.
2492 * + If there's more RSS buckets than queues, we'll end
2493 * up having multiple RSS buckets map to the same queue,
2494 * so there'll be some contention.
2496 if (adapter->num_queues != rss_getnumbuckets()) {
2498 "%s: number of queues (%d) != number of RSS buckets (%d)"
2499 "; performance will be impacted.\n",
2501 adapter->num_queues,
2502 rss_getnumbuckets());
2506 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2508 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2509 RF_SHAREABLE | RF_ACTIVE);
2510 if (que->res == NULL) {
2511 device_printf(dev,"Unable to allocate"
2512 " bus resource: que interrupt [%d]\n", vector);
2515 /* Set the handler function */
2516 error = bus_setup_intr(dev, que->res,
2517 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2518 ixgbe_msix_que, que, &que->tag);
2521 device_printf(dev, "Failed to register QUE handler");
2524 #if __FreeBSD_version >= 800504
2525 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2528 adapter->active_queues |= (u64)(1 << que->msix);
2531 * The queue ID is used as the RSS layer bucket ID.
2532 * We look up the queue ID -> RSS CPU ID and select
2535 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2538 * Bind the msix vector, and thus the
2539 * rings to the corresponding cpu.
2541 * This just happens to match the default RSS round-robin
2542 * bucket -> queue -> CPU allocation.
2544 if (adapter->num_queues > 1)
2547 if (adapter->num_queues > 1)
2548 bus_bind_intr(dev, que->res, cpu_id);
2552 "Bound RSS bucket %d to CPU %d\n",
2556 "Bound queue %d to cpu %d\n",
2559 #endif /* IXGBE_DEBUG */
2562 #ifndef IXGBE_LEGACY_TX
2563 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2565 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2566 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2567 taskqueue_thread_enqueue, &que->tq);
2569 CPU_SETOF(cpu_id, &cpu_mask);
2570 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2573 device_get_nameunit(adapter->dev),
2576 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
2577 device_get_nameunit(adapter->dev), i);
2583 adapter->res = bus_alloc_resource_any(dev,
2584 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2585 if (!adapter->res) {
2586 device_printf(dev,"Unable to allocate"
2587 " bus resource: Link interrupt [%d]\n", rid);
2590 /* Set the link handler function */
2591 error = bus_setup_intr(dev, adapter->res,
2592 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2593 ixgbe_msix_link, adapter, &adapter->tag);
2595 adapter->res = NULL;
2596 device_printf(dev, "Failed to register LINK handler");
2599 #if __FreeBSD_version >= 800504
2600 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2602 adapter->vector = vector;
2603 /* Tasklets for Link, SFP and Multispeed Fiber */
2604 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2605 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2606 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2608 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2610 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2612 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2614 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2615 taskqueue_thread_enqueue, &adapter->tq);
2616 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2617 device_get_nameunit(adapter->dev));
2623 * Setup Either MSI/X or MSI
2626 ixgbe_setup_msix(struct adapter *adapter)
2628 device_t dev = adapter->dev;
2629 int rid, want, queues, msgs;
2631 /* Override by tuneable */
2632 if (ixgbe_enable_msix == 0)
2635 /* First try MSI/X */
2636 msgs = pci_msix_count(dev);
2639 rid = PCIR_BAR(MSIX_82598_BAR);
2640 adapter->msix_mem = bus_alloc_resource_any(dev,
2641 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2642 if (adapter->msix_mem == NULL) {
2643 rid += 4; /* 82599 maps in higher BAR */
2644 adapter->msix_mem = bus_alloc_resource_any(dev,
2645 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2647 if (adapter->msix_mem == NULL) {
2648 /* May not be enabled */
2649 device_printf(adapter->dev,
2650 "Unable to map MSIX table \n");
2654 /* Figure out a reasonable auto config value */
2655 queues = (mp_ncpus > (msgs - 1)) ? (msgs - 1) : mp_ncpus;
2658 /* If we're doing RSS, clamp at the number of RSS buckets */
2659 if (queues > rss_getnumbuckets())
2660 queues = rss_getnumbuckets();
2663 if (ixgbe_num_queues != 0)
2664 queues = ixgbe_num_queues;
2665 /* Set max queues to 8 when autoconfiguring */
2666 else if ((ixgbe_num_queues == 0) && (queues > 8))
2669 /* reflect correct sysctl value */
2670 ixgbe_num_queues = queues;
2673 ** Want one vector (RX/TX pair) per queue
2674 ** plus an additional for Link.
2680 device_printf(adapter->dev,
2681 "MSIX Configuration Problem, "
2682 "%d vectors but %d queues wanted!\n",
2686 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2687 device_printf(adapter->dev,
2688 "Using MSIX interrupts with %d vectors\n", msgs);
2689 adapter->num_queues = queues;
2693 ** If MSIX alloc failed or provided us with
2694 ** less than needed, free and fall through to MSI
2696 pci_release_msi(dev);
2699 if (adapter->msix_mem != NULL) {
2700 bus_release_resource(dev, SYS_RES_MEMORY,
2701 rid, adapter->msix_mem);
2702 adapter->msix_mem = NULL;
2705 if (pci_alloc_msi(dev, &msgs) == 0) {
2706 device_printf(adapter->dev, "Using an MSI interrupt\n");
2709 device_printf(adapter->dev, "Using a Legacy interrupt\n");
2715 ixgbe_allocate_pci_resources(struct adapter *adapter)
2718 device_t dev = adapter->dev;
2721 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2724 if (!(adapter->pci_mem)) {
2725 device_printf(dev, "Unable to allocate bus resource: memory\n");
2729 /* Save bus_space values for READ/WRITE_REG macros */
2730 adapter->osdep.mem_bus_space_tag =
2731 rman_get_bustag(adapter->pci_mem);
2732 adapter->osdep.mem_bus_space_handle =
2733 rman_get_bushandle(adapter->pci_mem);
2734 /* Set hw values for shared code */
2735 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2736 adapter->hw.back = adapter;
2738 /* Default to 1 queue if MSI-X setup fails */
2739 adapter->num_queues = 1;
2742 ** Now setup MSI or MSI-X, should
2743 ** return us the number of supported
2744 ** vectors. (Will be 1 for MSI)
2746 adapter->msix = ixgbe_setup_msix(adapter);
2751 ixgbe_free_pci_resources(struct adapter * adapter)
2753 struct ix_queue *que = adapter->queues;
2754 device_t dev = adapter->dev;
2757 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2758 memrid = PCIR_BAR(MSIX_82598_BAR);
2760 memrid = PCIR_BAR(MSIX_82599_BAR);
2763 ** There is a slight possibility of a failure mode
2764 ** in attach that will result in entering this function
2765 ** before interrupt resources have been initialized, and
2766 ** in that case we do not want to execute the loops below
2767 ** We can detect this reliably by the state of the adapter
2770 if (adapter->res == NULL)
2774 ** Release all msix queue resources:
2776 for (int i = 0; i < adapter->num_queues; i++, que++) {
2777 rid = que->msix + 1;
2778 if (que->tag != NULL) {
2779 bus_teardown_intr(dev, que->res, que->tag);
2782 if (que->res != NULL)
2783 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2787 /* Clean the Legacy or Link interrupt last */
2788 if (adapter->vector) /* we are doing MSIX */
2789 rid = adapter->vector + 1;
2791 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2793 if (adapter->tag != NULL) {
2794 bus_teardown_intr(dev, adapter->res, adapter->tag);
2795 adapter->tag = NULL;
2797 if (adapter->res != NULL)
2798 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2802 pci_release_msi(dev);
2804 if (adapter->msix_mem != NULL)
2805 bus_release_resource(dev, SYS_RES_MEMORY,
2806 memrid, adapter->msix_mem);
2808 if (adapter->pci_mem != NULL)
2809 bus_release_resource(dev, SYS_RES_MEMORY,
2810 PCIR_BAR(0), adapter->pci_mem);
2815 /*********************************************************************
2817 * Setup networking device structure and register an interface.
2819 **********************************************************************/
2821 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2825 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2827 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2829 device_printf(dev, "can not allocate ifnet structure\n");
2832 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2833 if_initbaudrate(ifp, IF_Gbps(10));
2834 ifp->if_init = ixgbe_init;
2835 ifp->if_softc = adapter;
2836 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2837 ifp->if_ioctl = ixgbe_ioctl;
2838 #if __FreeBSD_version >= 1100036
2839 if_setgetcounterfn(ifp, ixgbe_get_counter);
2841 #if __FreeBSD_version >= 1100045
2842 /* TSO parameters */
2843 ifp->if_hw_tsomax = 65518;
2844 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2845 ifp->if_hw_tsomaxsegsize = 2048;
2847 #ifndef IXGBE_LEGACY_TX
2848 ifp->if_transmit = ixgbe_mq_start;
2849 ifp->if_qflush = ixgbe_qflush;
2851 ifp->if_start = ixgbe_start;
2852 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2853 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2854 IFQ_SET_READY(&ifp->if_snd);
2857 ether_ifattach(ifp, adapter->hw.mac.addr);
2859 adapter->max_frame_size =
2860 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2863 * Tell the upper layer(s) we support long frames.
2865 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2867 /* Set capability flags */
2868 ifp->if_capabilities |= IFCAP_RXCSUM
2875 | IFCAP_VLAN_HWTAGGING
2882 /* Enable the above capabilities by default */
2883 ifp->if_capenable = ifp->if_capabilities;
2886 ** Don't turn this on by default, if vlans are
2887 ** created on another pseudo device (eg. lagg)
2888 ** then vlan events are not passed thru, breaking
2889 ** operation, but with HW FILTER off it works. If
2890 ** using vlans directly on the ixgbe driver you can
2891 ** enable this and get full hardware tag filtering.
2893 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2896 * Specify the media types supported by this adapter and register
2897 * callbacks to update media and link information
2899 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2900 ixgbe_media_status);
2902 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
2903 ixgbe_add_media_types(adapter);
2905 /* Set autoselect media by default */
2906 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2912 ixgbe_add_media_types(struct adapter *adapter)
2914 struct ixgbe_hw *hw = &adapter->hw;
2915 device_t dev = adapter->dev;
2918 layer = adapter->phy_layer;
2920 /* Media types with matching FreeBSD media defines */
2921 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2922 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2923 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2924 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2925 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2926 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2928 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2929 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2930 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2932 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
2933 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2934 if (hw->phy.multispeed_fiber)
2935 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2937 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2938 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2939 if (hw->phy.multispeed_fiber)
2940 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2941 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2942 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2943 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2944 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2946 #ifdef IFM_ETH_XTYPE
2947 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2948 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2949 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
2950 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2951 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2952 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2954 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2955 device_printf(dev, "Media supported: 10GbaseKR\n");
2956 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2957 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2959 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2960 device_printf(dev, "Media supported: 10GbaseKX4\n");
2961 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2962 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2964 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2965 device_printf(dev, "Media supported: 1000baseKX\n");
2966 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2967 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2970 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
2971 device_printf(dev, "Media supported: 1000baseBX\n");
2973 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2974 ifmedia_add(&adapter->media,
2975 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2976 ifmedia_add(&adapter->media,
2977 IFM_ETHER | IFM_1000_T, 0, NULL);
2980 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2984 ixgbe_config_link(struct adapter *adapter)
2986 struct ixgbe_hw *hw = &adapter->hw;
2987 u32 autoneg, err = 0;
2988 bool sfp, negotiate;
2990 sfp = ixgbe_is_sfp(hw);
2993 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2995 if (hw->mac.ops.check_link)
2996 err = ixgbe_check_link(hw, &adapter->link_speed,
2997 &adapter->link_up, FALSE);
3000 autoneg = hw->phy.autoneg_advertised;
3001 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3002 err = hw->mac.ops.get_link_capabilities(hw,
3003 &autoneg, &negotiate);
3006 if (hw->mac.ops.setup_link)
3007 err = hw->mac.ops.setup_link(hw,
3008 autoneg, adapter->link_up);
3015 /*********************************************************************
3017 * Enable transmit units.
3019 **********************************************************************/
3021 ixgbe_initialize_transmit_units(struct adapter *adapter)
3023 struct tx_ring *txr = adapter->tx_rings;
3024 struct ixgbe_hw *hw = &adapter->hw;
3026 /* Setup the Base and Length of the Tx Descriptor Ring */
3027 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3028 u64 tdba = txr->txdma.dma_paddr;
3032 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
3033 (tdba & 0x00000000ffffffffULL));
3034 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
3035 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
3036 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
3038 /* Setup the HW Tx Head and Tail descriptor pointers */
3039 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
3040 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
3042 /* Cache the tail address */
3043 txr->tail = IXGBE_TDT(j);
3045 /* Disable Head Writeback */
3047 * Note: for X550 series devices, these registers are actually
3048 * prefixed with TPH_ isntead of DCA_, but the addresses and
3049 * fields remain the same.
3051 switch (hw->mac.type) {
3052 case ixgbe_mac_82598EB:
3053 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
3056 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
3059 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3060 switch (hw->mac.type) {
3061 case ixgbe_mac_82598EB:
3062 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
3065 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
3071 if (hw->mac.type != ixgbe_mac_82598EB) {
3072 u32 dmatxctl, rttdcs;
3074 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
3076 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3077 dmatxctl |= IXGBE_DMATXCTL_TE;
3078 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3079 /* Disable arbiter to set MTQC */
3080 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3081 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3082 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3084 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
3086 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
3088 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3089 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3096 ixgbe_initialize_rss_mapping(struct adapter *adapter)
3098 struct ixgbe_hw *hw = &adapter->hw;
3099 u32 reta = 0, mrqc, rss_key[10];
3100 int queue_id, table_size, index_mult;
3102 u32 rss_hash_config;
3105 enum ixgbe_iov_mode mode;
3109 /* Fetch the configured RSS key */
3110 rss_getkey((uint8_t *) &rss_key);
3112 /* set up random bits */
3113 arc4rand(&rss_key, sizeof(rss_key), 0);
3116 /* Set multiplier for RETA setup and table size based on MAC */
3119 switch (adapter->hw.mac.type) {
3120 case ixgbe_mac_82598EB:
3123 case ixgbe_mac_X550:
3124 case ixgbe_mac_X550EM_x:
3131 /* Set up the redirection table */
3132 for (int i = 0, j = 0; i < table_size; i++, j++) {
3133 if (j == adapter->num_queues) j = 0;
3136 * Fetch the RSS bucket id for the given indirection entry.
3137 * Cap it at the number of configured buckets (which is
3140 queue_id = rss_get_indirection_to_bucket(i);
3141 queue_id = queue_id % adapter->num_queues;
3143 queue_id = (j * index_mult);
3146 * The low 8 bits are for hash value (n+0);
3147 * The next 8 bits are for hash value (n+1), etc.
3150 reta = reta | ( ((uint32_t) queue_id) << 24);
3153 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3155 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
3160 /* Now fill our hash function seeds */
3161 for (int i = 0; i < 10; i++)
3162 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3164 /* Perform hash on these packet types */
3166 mrqc = IXGBE_MRQC_RSSEN;
3167 rss_hash_config = rss_gethashconfig();
3168 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3169 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3170 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3171 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3172 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3173 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3174 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3175 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3176 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3177 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3178 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3179 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3180 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3181 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3182 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3183 device_printf(adapter->dev,
3184 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
3185 "but not supported\n", __func__);
3186 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3187 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3188 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3189 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3192 * Disable UDP - IP fragments aren't currently being handled
3193 * and so we end up with a mix of 2-tuple and 4-tuple
3196 mrqc = IXGBE_MRQC_RSSEN
3197 | IXGBE_MRQC_RSS_FIELD_IPV4
3198 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3199 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3200 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3201 | IXGBE_MRQC_RSS_FIELD_IPV6
3202 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3206 mode = ixgbe_get_iov_mode(adapter);
3207 mrqc |= ixgbe_get_mrqc(mode);
3209 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3213 /*********************************************************************
3215 * Setup receive registers and features.
3217 **********************************************************************/
3218 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3220 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3223 ixgbe_initialize_receive_units(struct adapter *adapter)
3225 struct rx_ring *rxr = adapter->rx_rings;
3226 struct ixgbe_hw *hw = &adapter->hw;
3227 struct ifnet *ifp = adapter->ifp;
3228 u32 bufsz, fctrl, srrctl, rxcsum;
3232 * Make sure receives are disabled while
3233 * setting up the descriptor ring
3235 ixgbe_disable_rx(hw);
3237 /* Enable broadcasts */
3238 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3239 fctrl |= IXGBE_FCTRL_BAM;
3240 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3241 fctrl |= IXGBE_FCTRL_DPF;
3242 fctrl |= IXGBE_FCTRL_PMCF;
3244 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3246 /* Set for Jumbo Frames? */
3247 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3248 if (ifp->if_mtu > ETHERMTU)
3249 hlreg |= IXGBE_HLREG0_JUMBOEN;
3251 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3253 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
3254 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3255 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3257 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3258 #endif /* DEV_NETMAP */
3259 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3261 bufsz = (adapter->rx_mbuf_sz +
3262 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3264 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3265 u64 rdba = rxr->rxdma.dma_paddr;
3268 /* Setup the Base and Length of the Rx Descriptor Ring */
3269 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3270 (rdba & 0x00000000ffffffffULL));
3271 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3272 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3273 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3275 /* Set up the SRRCTL register */
3276 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3277 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3278 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3280 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3283 * Set DROP_EN iff we have no flow control and >1 queue.
3284 * Note that srrctl was cleared shortly before during reset,
3285 * so we do not need to clear the bit, but do it just in case
3286 * this code is moved elsewhere.
3288 if (adapter->num_queues > 1 &&
3289 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3290 srrctl |= IXGBE_SRRCTL_DROP_EN;
3292 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3295 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3297 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3298 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3299 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3301 /* Set the driver rx tail address */
3302 rxr->tail = IXGBE_RDT(rxr->me);
3305 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3306 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3307 IXGBE_PSRTYPE_UDPHDR |
3308 IXGBE_PSRTYPE_IPV4HDR |
3309 IXGBE_PSRTYPE_IPV6HDR;
3310 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3313 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3315 ixgbe_initialize_rss_mapping(adapter);
3317 if (adapter->num_queues > 1) {
3318 /* RSS and RX IPP Checksum are mutually exclusive */
3319 rxcsum |= IXGBE_RXCSUM_PCSD;
3322 if (ifp->if_capenable & IFCAP_RXCSUM)
3323 rxcsum |= IXGBE_RXCSUM_PCSD;
3325 /* This is useful for calculating UDP/IP fragment checksums */
3326 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3327 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3329 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3336 ** This routine is run via an vlan config EVENT,
3337 ** it enables us to use the HW Filter table since
3338 ** we can get the vlan id. This just creates the
3339 ** entry in the soft version of the VFTA, init will
3340 ** repopulate the real table.
3343 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3345 struct adapter *adapter = ifp->if_softc;
3348 if (ifp->if_softc != arg) /* Not our event */
3351 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3354 IXGBE_CORE_LOCK(adapter);
3355 index = (vtag >> 5) & 0x7F;
3357 adapter->shadow_vfta[index] |= (1 << bit);
3358 ++adapter->num_vlans;
3359 ixgbe_setup_vlan_hw_support(adapter);
3360 IXGBE_CORE_UNLOCK(adapter);
3364 ** This routine is run via an vlan
3365 ** unconfig EVENT, remove our entry
3366 ** in the soft vfta.
3369 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3371 struct adapter *adapter = ifp->if_softc;
3374 if (ifp->if_softc != arg)
3377 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3380 IXGBE_CORE_LOCK(adapter);
3381 index = (vtag >> 5) & 0x7F;
3383 adapter->shadow_vfta[index] &= ~(1 << bit);
3384 --adapter->num_vlans;
3385 /* Re-init to load the changes */
3386 ixgbe_setup_vlan_hw_support(adapter);
3387 IXGBE_CORE_UNLOCK(adapter);
3391 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3393 struct ifnet *ifp = adapter->ifp;
3394 struct ixgbe_hw *hw = &adapter->hw;
3395 struct rx_ring *rxr;
3400 ** We get here thru init_locked, meaning
3401 ** a soft reset, this has already cleared
3402 ** the VFTA and other state, so if there
3403 ** have been no vlan's registered do nothing.
3405 if (adapter->num_vlans == 0)
3408 /* Setup the queues for vlans */
3409 for (int i = 0; i < adapter->num_queues; i++) {
3410 rxr = &adapter->rx_rings[i];
3411 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3412 if (hw->mac.type != ixgbe_mac_82598EB) {
3413 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3414 ctrl |= IXGBE_RXDCTL_VME;
3415 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3417 rxr->vtag_strip = TRUE;
3420 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3423 ** A soft reset zero's out the VFTA, so
3424 ** we need to repopulate it now.
3426 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3427 if (adapter->shadow_vfta[i] != 0)
3428 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3429 adapter->shadow_vfta[i]);
3431 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3432 /* Enable the Filter Table if enabled */
3433 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3434 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3435 ctrl |= IXGBE_VLNCTRL_VFE;
3437 if (hw->mac.type == ixgbe_mac_82598EB)
3438 ctrl |= IXGBE_VLNCTRL_VME;
3439 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3443 ixgbe_enable_intr(struct adapter *adapter)
3445 struct ixgbe_hw *hw = &adapter->hw;
3446 struct ix_queue *que = adapter->queues;
3449 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3450 /* Enable Fan Failure detection */
3451 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3452 mask |= IXGBE_EIMS_GPI_SDP1;
3454 switch (adapter->hw.mac.type) {
3455 case ixgbe_mac_82599EB:
3456 mask |= IXGBE_EIMS_ECC;
3457 /* Temperature sensor on some adapters */
3458 mask |= IXGBE_EIMS_GPI_SDP0;
3459 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3460 mask |= IXGBE_EIMS_GPI_SDP1;
3461 mask |= IXGBE_EIMS_GPI_SDP2;
3463 mask |= IXGBE_EIMS_FLOW_DIR;
3466 mask |= IXGBE_EIMS_MAILBOX;
3469 case ixgbe_mac_X540:
3470 /* Detect if Thermal Sensor is enabled */
3471 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3472 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3473 mask |= IXGBE_EIMS_TS;
3474 mask |= IXGBE_EIMS_ECC;
3476 mask |= IXGBE_EIMS_FLOW_DIR;
3479 case ixgbe_mac_X550:
3480 case ixgbe_mac_X550EM_x:
3481 /* MAC thermal sensor is automatically enabled */
3482 mask |= IXGBE_EIMS_TS;
3483 /* Some devices use SDP0 for important information */
3484 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3485 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3486 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3487 mask |= IXGBE_EIMS_ECC;
3489 mask |= IXGBE_EIMS_FLOW_DIR;
3492 mask |= IXGBE_EIMS_MAILBOX;
3499 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3501 /* With MSI-X we use auto clear */
3502 if (adapter->msix_mem) {
3503 mask = IXGBE_EIMS_ENABLE_MASK;
3504 /* Don't autoclear Link */
3505 mask &= ~IXGBE_EIMS_OTHER;
3506 mask &= ~IXGBE_EIMS_LSC;
3508 mask &= ~IXGBE_EIMS_MAILBOX;
3510 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3514 ** Now enable all queues, this is done separately to
3515 ** allow for handling the extended (beyond 32) MSIX
3516 ** vectors that can be used by 82599
3518 for (int i = 0; i < adapter->num_queues; i++, que++)
3519 ixgbe_enable_queue(adapter, que->msix);
3521 IXGBE_WRITE_FLUSH(hw);
3527 ixgbe_disable_intr(struct adapter *adapter)
3529 if (adapter->msix_mem)
3530 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3531 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3532 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3536 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3538 IXGBE_WRITE_FLUSH(&adapter->hw);
3543 ** Get the width and transaction speed of
3544 ** the slot this adapter is plugged into.
3547 ixgbe_get_slot_info(struct adapter *adapter)
3549 device_t dev = adapter->dev;
3550 struct ixgbe_hw *hw = &adapter->hw;
3551 struct ixgbe_mac_info *mac = &hw->mac;
3555 /* For most devices simply call the shared code routine */
3556 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3557 ixgbe_get_bus_info(hw);
3558 /* These devices don't use PCI-E */
3559 switch (hw->mac.type) {
3560 case ixgbe_mac_X550EM_x:
3568 ** For the Quad port adapter we need to parse back
3569 ** up the PCI tree to find the speed of the expansion
3570 ** slot into which this adapter is plugged. A bit more work.
3572 dev = device_get_parent(device_get_parent(dev));
3574 device_printf(dev, "parent pcib = %x,%x,%x\n",
3575 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3577 dev = device_get_parent(device_get_parent(dev));
3579 device_printf(dev, "slot pcib = %x,%x,%x\n",
3580 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3582 /* Now get the PCI Express Capabilities offset */
3583 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3584 /* ...and read the Link Status Register */
3585 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3586 switch (link & IXGBE_PCI_LINK_WIDTH) {
3587 case IXGBE_PCI_LINK_WIDTH_1:
3588 hw->bus.width = ixgbe_bus_width_pcie_x1;
3590 case IXGBE_PCI_LINK_WIDTH_2:
3591 hw->bus.width = ixgbe_bus_width_pcie_x2;
3593 case IXGBE_PCI_LINK_WIDTH_4:
3594 hw->bus.width = ixgbe_bus_width_pcie_x4;
3596 case IXGBE_PCI_LINK_WIDTH_8:
3597 hw->bus.width = ixgbe_bus_width_pcie_x8;
3600 hw->bus.width = ixgbe_bus_width_unknown;
3604 switch (link & IXGBE_PCI_LINK_SPEED) {
3605 case IXGBE_PCI_LINK_SPEED_2500:
3606 hw->bus.speed = ixgbe_bus_speed_2500;
3608 case IXGBE_PCI_LINK_SPEED_5000:
3609 hw->bus.speed = ixgbe_bus_speed_5000;
3611 case IXGBE_PCI_LINK_SPEED_8000:
3612 hw->bus.speed = ixgbe_bus_speed_8000;
3615 hw->bus.speed = ixgbe_bus_speed_unknown;
3619 mac->ops.set_lan_id(hw);
3622 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3623 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3624 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3625 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3626 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3627 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3628 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3631 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3632 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3633 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3634 device_printf(dev, "PCI-Express bandwidth available"
3635 " for this card\n is not sufficient for"
3636 " optimal performance.\n");
3637 device_printf(dev, "For optimal performance a x8 "
3638 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3640 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3641 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3642 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3643 device_printf(dev, "PCI-Express bandwidth available"
3644 " for this card\n is not sufficient for"
3645 " optimal performance.\n");
3646 device_printf(dev, "For optimal performance a x8 "
3647 "PCIE Gen3 slot is required.\n");
3655 ** Setup the correct IVAR register for a particular MSIX interrupt
3656 ** (yes this is all very magic and confusing :)
3657 ** - entry is the register array entry
3658 ** - vector is the MSIX vector for this queue
3659 ** - type is RX/TX/MISC
3662 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3664 struct ixgbe_hw *hw = &adapter->hw;
3667 vector |= IXGBE_IVAR_ALLOC_VAL;
3669 switch (hw->mac.type) {
3671 case ixgbe_mac_82598EB:
3673 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3675 entry += (type * 64);
3676 index = (entry >> 2) & 0x1F;
3677 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3678 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3679 ivar |= (vector << (8 * (entry & 0x3)));
3680 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3683 case ixgbe_mac_82599EB:
3684 case ixgbe_mac_X540:
3685 case ixgbe_mac_X550:
3686 case ixgbe_mac_X550EM_x:
3687 if (type == -1) { /* MISC IVAR */
3688 index = (entry & 1) * 8;
3689 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3690 ivar &= ~(0xFF << index);
3691 ivar |= (vector << index);
3692 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3693 } else { /* RX/TX IVARS */
3694 index = (16 * (entry & 1)) + (8 * type);
3695 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3696 ivar &= ~(0xFF << index);
3697 ivar |= (vector << index);
3698 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3707 ixgbe_configure_ivars(struct adapter *adapter)
3709 struct ix_queue *que = adapter->queues;
3712 if (ixgbe_max_interrupt_rate > 0)
3713 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3716 ** Disable DMA coalescing if interrupt moderation is
3723 for (int i = 0; i < adapter->num_queues; i++, que++) {
3724 struct rx_ring *rxr = &adapter->rx_rings[i];
3725 struct tx_ring *txr = &adapter->tx_rings[i];
3726 /* First the RX queue entry */
3727 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3728 /* ... and the TX */
3729 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3730 /* Set an Initial EITR value */
3731 IXGBE_WRITE_REG(&adapter->hw,
3732 IXGBE_EITR(que->msix), newitr);
3735 /* For the Link interrupt */
3736 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3740 ** ixgbe_sfp_probe - called in the local timer to
3741 ** determine if a port had optics inserted.
3744 ixgbe_sfp_probe(struct adapter *adapter)
3746 struct ixgbe_hw *hw = &adapter->hw;
3747 device_t dev = adapter->dev;
3748 bool result = FALSE;
3750 if ((hw->phy.type == ixgbe_phy_nl) &&
3751 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3752 s32 ret = hw->phy.ops.identify_sfp(hw);
3755 ret = hw->phy.ops.reset(hw);
3756 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3757 device_printf(dev, "Unsupported SFP+ module detected!");
3758 device_printf(dev, "Reload driver with supported module.\n");
3759 adapter->sfp_probe = FALSE;
3762 device_printf(dev, "SFP+ module detected!\n");
3763 /* We now have supported optics */
3764 adapter->sfp_probe = FALSE;
3765 /* Set the optics type so system reports correctly */
3766 ixgbe_setup_optics(adapter);
3774 ** Tasklet handler for MSIX Link interrupts
3775 ** - do outside interrupt since it might sleep
3778 ixgbe_handle_link(void *context, int pending)
3780 struct adapter *adapter = context;
3781 struct ixgbe_hw *hw = &adapter->hw;
3783 ixgbe_check_link(hw,
3784 &adapter->link_speed, &adapter->link_up, 0);
3785 ixgbe_update_link_status(adapter);
3787 /* Re-enable link interrupts */
3788 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3792 ** Tasklet for handling SFP module interrupts
3795 ixgbe_handle_mod(void *context, int pending)
3797 struct adapter *adapter = context;
3798 struct ixgbe_hw *hw = &adapter->hw;
3799 enum ixgbe_phy_type orig_type = hw->phy.type;
3800 device_t dev = adapter->dev;
3803 IXGBE_CORE_LOCK(adapter);
3805 /* Check to see if the PHY type changed */
3806 if (hw->phy.ops.identify) {
3807 hw->phy.type = ixgbe_phy_unknown;
3808 hw->phy.ops.identify(hw);
3811 if (hw->phy.type != orig_type) {
3812 device_printf(dev, "Detected phy_type %d\n", hw->phy.type);
3814 if (hw->phy.type == ixgbe_phy_none) {
3815 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
3819 /* Try to do the initialization that was skipped before */
3820 if (hw->phy.ops.init)
3821 hw->phy.ops.init(hw);
3822 if (hw->phy.ops.reset)
3823 hw->phy.ops.reset(hw);
3826 err = hw->phy.ops.identify_sfp(hw);
3827 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3829 "Unsupported SFP+ module type was detected.\n");
3833 err = hw->mac.ops.setup_sfp(hw);
3834 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3836 "Setup failure - unsupported SFP+ module type.\n");
3839 if (hw->phy.multispeed_fiber)
3840 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3842 /* Update media type */
3843 switch (hw->mac.ops.get_media_type(hw)) {
3844 case ixgbe_media_type_fiber:
3845 adapter->optics = IFM_10G_SR;
3847 case ixgbe_media_type_copper:
3848 adapter->optics = IFM_10G_TWINAX;
3850 case ixgbe_media_type_cx4:
3851 adapter->optics = IFM_10G_CX4;
3854 adapter->optics = 0;
3858 IXGBE_CORE_UNLOCK(adapter);
3864 ** Tasklet for handling MSF (multispeed fiber) interrupts
3867 ixgbe_handle_msf(void *context, int pending)
3869 struct adapter *adapter = context;
3870 struct ixgbe_hw *hw = &adapter->hw;
3874 IXGBE_CORE_LOCK(adapter);
3875 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3876 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3878 autoneg = hw->phy.autoneg_advertised;
3879 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3880 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3881 if (hw->mac.ops.setup_link)
3882 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3884 /* Adjust media types shown in ifconfig */
3885 ifmedia_removeall(&adapter->media);
3886 ixgbe_add_media_types(adapter);
3887 IXGBE_CORE_UNLOCK(adapter);
3892 ** Tasklet for handling interrupts from an external PHY
3895 ixgbe_handle_phy(void *context, int pending)
3897 struct adapter *adapter = context;
3898 struct ixgbe_hw *hw = &adapter->hw;
3901 error = hw->phy.ops.handle_lasi(hw);
3902 if (error == IXGBE_ERR_OVERTEMP)
3903 device_printf(adapter->dev,
3904 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3905 " PHY will downshift to lower power state!\n");
3907 device_printf(adapter->dev,
3908 "Error handling LASI interrupt: %d\n",
3915 ** Tasklet for reinitializing the Flow Director filter table
3918 ixgbe_reinit_fdir(void *context, int pending)
3920 struct adapter *adapter = context;
3921 struct ifnet *ifp = adapter->ifp;
3923 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3925 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3926 adapter->fdir_reinit = 0;
3927 /* re-enable flow director interrupts */
3928 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3929 /* Restart the interface */
3930 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3935 /*********************************************************************
3937 * Configure DMA Coalescing
3939 **********************************************************************/
3941 ixgbe_config_dmac(struct adapter *adapter)
3943 struct ixgbe_hw *hw = &adapter->hw;
3944 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3946 if (hw->mac.type < ixgbe_mac_X550 ||
3947 !hw->mac.ops.dmac_config)
3950 if (dcfg->watchdog_timer ^ adapter->dmac ||
3951 dcfg->link_speed ^ adapter->link_speed) {
3952 dcfg->watchdog_timer = adapter->dmac;
3953 dcfg->fcoe_en = false;
3954 dcfg->link_speed = adapter->link_speed;
3957 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3958 dcfg->watchdog_timer, dcfg->link_speed);
3960 hw->mac.ops.dmac_config(hw);
3965 * Checks whether the adapter's ports are capable of
3966 * Wake On LAN by reading the adapter's NVM.
3968 * Sets each port's hw->wol_enabled value depending
3969 * on the value read here.
3972 ixgbe_check_wol_support(struct adapter *adapter)
3974 struct ixgbe_hw *hw = &adapter->hw;
3977 /* Find out WoL support for port */
3978 adapter->wol_support = hw->wol_enabled = 0;
3979 ixgbe_get_device_caps(hw, &dev_caps);
3980 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3981 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3983 adapter->wol_support = hw->wol_enabled = 1;
3985 /* Save initial wake up filter configuration */
3986 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3992 * Prepare the adapter/port for LPLU and/or WoL
3995 ixgbe_setup_low_power_mode(struct adapter *adapter)
3997 struct ixgbe_hw *hw = &adapter->hw;
3998 device_t dev = adapter->dev;
4001 mtx_assert(&adapter->core_mtx, MA_OWNED);
4003 if (!hw->wol_enabled)
4004 ixgbe_set_phy_power(hw, FALSE);
4006 /* Limit power management flow to X550EM baseT */
4007 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
4008 && hw->phy.ops.enter_lplu) {
4009 /* Turn off support for APM wakeup. (Using ACPI instead) */
4010 IXGBE_WRITE_REG(hw, IXGBE_GRC,
4011 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
4014 * Clear Wake Up Status register to prevent any previous wakeup
4015 * events from waking us up immediately after we suspend.
4017 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4020 * Program the Wakeup Filter Control register with user filter
4023 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
4025 /* Enable wakeups and power management in Wakeup Control */
4026 IXGBE_WRITE_REG(hw, IXGBE_WUC,
4027 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
4029 /* X550EM baseT adapters need a special LPLU flow */
4030 hw->phy.reset_disable = true;
4031 ixgbe_stop(adapter);
4032 error = hw->phy.ops.enter_lplu(hw);
4035 "Error entering LPLU: %d\n", error);
4036 hw->phy.reset_disable = false;
4038 /* Just stop for other adapters */
4039 ixgbe_stop(adapter);
4045 /**********************************************************************
4047 * Update the board statistics counters.
4049 **********************************************************************/
4051 ixgbe_update_stats_counters(struct adapter *adapter)
4053 struct ixgbe_hw *hw = &adapter->hw;
4054 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4055 u64 total_missed_rx = 0;
4057 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4058 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4059 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4060 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4062 for (int i = 0; i < 16; i++) {
4063 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4064 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4065 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4067 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4068 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4069 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4071 /* Hardware workaround, gprc counts missed packets */
4072 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4073 adapter->stats.pf.gprc -= missed_rx;
4075 if (hw->mac.type != ixgbe_mac_82598EB) {
4076 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4077 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4078 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4079 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4080 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4081 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4082 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4083 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4085 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4086 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4087 /* 82598 only has a counter in the high register */
4088 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4089 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4090 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4094 * Workaround: mprc hardware is incorrectly counting
4095 * broadcasts, so for now we subtract those.
4097 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4098 adapter->stats.pf.bprc += bprc;
4099 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4100 if (hw->mac.type == ixgbe_mac_82598EB)
4101 adapter->stats.pf.mprc -= bprc;
4103 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4104 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4105 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4106 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4107 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4108 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4110 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4111 adapter->stats.pf.lxontxc += lxon;
4112 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4113 adapter->stats.pf.lxofftxc += lxoff;
4114 total = lxon + lxoff;
4116 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4117 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4118 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4119 adapter->stats.pf.gptc -= total;
4120 adapter->stats.pf.mptc -= total;
4121 adapter->stats.pf.ptc64 -= total;
4122 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
4124 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4125 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4126 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4127 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4128 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4129 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4130 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4131 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4132 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4133 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4134 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4135 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4136 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4137 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4138 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4139 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4140 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4141 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4142 /* Only read FCOE on 82599 */
4143 if (hw->mac.type != ixgbe_mac_82598EB) {
4144 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4145 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4146 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4147 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4148 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4151 /* Fill out the OS statistics structure */
4152 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
4153 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
4154 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
4155 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
4156 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
4157 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
4158 IXGBE_SET_COLLISIONS(adapter, 0);
4159 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
4160 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
4161 + adapter->stats.pf.rlec);
4164 #if __FreeBSD_version >= 1100036
4166 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
4168 struct adapter *adapter;
4169 struct tx_ring *txr;
4172 adapter = if_getsoftc(ifp);
4175 case IFCOUNTER_IPACKETS:
4176 return (adapter->ipackets);
4177 case IFCOUNTER_OPACKETS:
4178 return (adapter->opackets);
4179 case IFCOUNTER_IBYTES:
4180 return (adapter->ibytes);
4181 case IFCOUNTER_OBYTES:
4182 return (adapter->obytes);
4183 case IFCOUNTER_IMCASTS:
4184 return (adapter->imcasts);
4185 case IFCOUNTER_OMCASTS:
4186 return (adapter->omcasts);
4187 case IFCOUNTER_COLLISIONS:
4189 case IFCOUNTER_IQDROPS:
4190 return (adapter->iqdrops);
4191 case IFCOUNTER_OQDROPS:
4193 txr = adapter->tx_rings;
4194 for (int i = 0; i < adapter->num_queues; i++, txr++)
4195 rv += txr->br->br_drops;
4197 case IFCOUNTER_IERRORS:
4198 return (adapter->ierrors);
4200 return (if_get_counter_default(ifp, cnt));
4205 /** ixgbe_sysctl_tdh_handler - Handler function
4206 * Retrieves the TDH value from the hardware
4209 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4213 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4216 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4217 error = sysctl_handle_int(oidp, &val, 0, req);
4218 if (error || !req->newptr)
4223 /** ixgbe_sysctl_tdt_handler - Handler function
4224 * Retrieves the TDT value from the hardware
4227 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4231 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4234 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4235 error = sysctl_handle_int(oidp, &val, 0, req);
4236 if (error || !req->newptr)
4241 /** ixgbe_sysctl_rdh_handler - Handler function
4242 * Retrieves the RDH value from the hardware
4245 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4249 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4252 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4253 error = sysctl_handle_int(oidp, &val, 0, req);
4254 if (error || !req->newptr)
4259 /** ixgbe_sysctl_rdt_handler - Handler function
4260 * Retrieves the RDT value from the hardware
4263 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4267 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4270 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4271 error = sysctl_handle_int(oidp, &val, 0, req);
4272 if (error || !req->newptr)
4278 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4281 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4282 unsigned int reg, usec, rate;
4284 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4285 usec = ((reg & 0x0FF8) >> 3);
4287 rate = 500000 / usec;
4290 error = sysctl_handle_int(oidp, &rate, 0, req);
4291 if (error || !req->newptr)
4293 reg &= ~0xfff; /* default, no limitation */
4294 ixgbe_max_interrupt_rate = 0;
4295 if (rate > 0 && rate < 500000) {
4298 ixgbe_max_interrupt_rate = rate;
4299 reg |= ((4000000/rate) & 0xff8 );
4301 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4306 ixgbe_add_device_sysctls(struct adapter *adapter)
4308 device_t dev = adapter->dev;
4309 struct ixgbe_hw *hw = &adapter->hw;
4310 struct sysctl_oid_list *child;
4311 struct sysctl_ctx_list *ctx;
4313 ctx = device_get_sysctl_ctx(dev);
4314 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4316 /* Sysctls for all devices */
4317 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
4318 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4319 ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4321 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
4323 &ixgbe_enable_aim, 1, "Interrupt Moderation");
4325 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4326 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4327 ixgbe_sysctl_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
4329 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
4330 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4331 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
4334 /* testing sysctls (for all devices) */
4335 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
4336 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4337 ixgbe_sysctl_power_state, "I", "PCI Power State");
4339 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
4340 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
4341 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
4343 /* for X550 series devices */
4344 if (hw->mac.type >= ixgbe_mac_X550)
4345 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4346 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4347 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
4349 /* for X552 backplane devices */
4350 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
4351 struct sysctl_oid *eee_node;
4352 struct sysctl_oid_list *eee_list;
4354 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4356 "Energy Efficient Ethernet sysctls");
4357 eee_list = SYSCTL_CHILDREN(eee_node);
4359 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4360 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4361 ixgbe_sysctl_eee_enable, "I",
4362 "Enable or Disable EEE");
4364 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4365 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4366 ixgbe_sysctl_eee_negotiated, "I",
4367 "EEE negotiated on link");
4369 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4370 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4371 ixgbe_sysctl_eee_tx_lpi_status, "I",
4372 "Whether or not TX link is in LPI state");
4374 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4375 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4376 ixgbe_sysctl_eee_rx_lpi_status, "I",
4377 "Whether or not RX link is in LPI state");
4379 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_delay",
4380 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4381 ixgbe_sysctl_eee_tx_lpi_delay, "I",
4382 "TX LPI entry delay in microseconds");
4385 /* for WoL-capable devices */
4386 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4387 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4388 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4389 ixgbe_sysctl_wol_enable, "I",
4390 "Enable/Disable Wake on LAN");
4392 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4393 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4394 ixgbe_sysctl_wufc, "I",
4395 "Enable/Disable Wake Up Filters");
4398 /* for X552/X557-AT devices */
4399 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4400 struct sysctl_oid *phy_node;
4401 struct sysctl_oid_list *phy_list;
4403 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4405 "External PHY sysctls");
4406 phy_list = SYSCTL_CHILDREN(phy_node);
4408 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4409 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4410 ixgbe_sysctl_phy_temp, "I",
4411 "Current External PHY Temperature (Celsius)");
4413 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4414 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4415 ixgbe_sysctl_phy_overtemp_occurred, "I",
4416 "External PHY High Temperature Event Occurred");
4421 * Add sysctl variables, one per statistic, to the system.
4424 ixgbe_add_hw_stats(struct adapter *adapter)
4426 device_t dev = adapter->dev;
4428 struct tx_ring *txr = adapter->tx_rings;
4429 struct rx_ring *rxr = adapter->rx_rings;
4431 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4432 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4433 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4434 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4436 struct sysctl_oid *stat_node, *queue_node;
4437 struct sysctl_oid_list *stat_list, *queue_list;
4439 #define QUEUE_NAME_LEN 32
4440 char namebuf[QUEUE_NAME_LEN];
4442 /* Driver Statistics */
4443 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4444 CTLFLAG_RD, &adapter->dropped_pkts,
4445 "Driver dropped packets");
4446 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4447 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4448 "m_defrag() failed");
4449 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4450 CTLFLAG_RD, &adapter->watchdog_events,
4451 "Watchdog timeouts");
4452 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4453 CTLFLAG_RD, &adapter->link_irq,
4454 "Link MSIX IRQ Handled");
4456 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4457 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4458 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4459 CTLFLAG_RD, NULL, "Queue Name");
4460 queue_list = SYSCTL_CHILDREN(queue_node);
4462 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4463 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4464 sizeof(&adapter->queues[i]),
4465 ixgbe_sysctl_interrupt_rate_handler, "IU",
4467 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4468 CTLFLAG_RD, &(adapter->queues[i].irqs),
4469 "irqs on this queue");
4470 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4471 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4472 ixgbe_sysctl_tdh_handler, "IU",
4473 "Transmit Descriptor Head");
4474 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4475 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4476 ixgbe_sysctl_tdt_handler, "IU",
4477 "Transmit Descriptor Tail");
4478 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4479 CTLFLAG_RD, &txr->tso_tx,
4481 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4482 CTLFLAG_RD, &txr->no_tx_dma_setup,
4483 "Driver tx dma failure in xmit");
4484 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4485 CTLFLAG_RD, &txr->no_desc_avail,
4486 "Queue No Descriptor Available");
4487 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4488 CTLFLAG_RD, &txr->total_packets,
4489 "Queue Packets Transmitted");
4490 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4491 CTLFLAG_RD, &txr->br->br_drops,
4492 "Packets dropped in buf_ring");
4495 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4496 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4497 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4498 CTLFLAG_RD, NULL, "Queue Name");
4499 queue_list = SYSCTL_CHILDREN(queue_node);
4501 struct lro_ctrl *lro = &rxr->lro;
4503 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4504 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4505 CTLFLAG_RD, NULL, "Queue Name");
4506 queue_list = SYSCTL_CHILDREN(queue_node);
4508 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4509 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4510 ixgbe_sysctl_rdh_handler, "IU",
4511 "Receive Descriptor Head");
4512 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4513 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4514 ixgbe_sysctl_rdt_handler, "IU",
4515 "Receive Descriptor Tail");
4516 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4517 CTLFLAG_RD, &rxr->rx_packets,
4518 "Queue Packets Received");
4519 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4520 CTLFLAG_RD, &rxr->rx_bytes,
4521 "Queue Bytes Received");
4522 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4523 CTLFLAG_RD, &rxr->rx_copies,
4524 "Copied RX Frames");
4525 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4526 CTLFLAG_RD, &lro->lro_queued, 0,
4528 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4529 CTLFLAG_RD, &lro->lro_flushed, 0,
4533 /* MAC stats get the own sub node */
4535 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4536 CTLFLAG_RD, NULL, "MAC Statistics");
4537 stat_list = SYSCTL_CHILDREN(stat_node);
4539 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4540 CTLFLAG_RD, &stats->crcerrs,
4542 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4543 CTLFLAG_RD, &stats->illerrc,
4544 "Illegal Byte Errors");
4545 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4546 CTLFLAG_RD, &stats->errbc,
4548 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4549 CTLFLAG_RD, &stats->mspdc,
4550 "MAC Short Packets Discarded");
4551 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4552 CTLFLAG_RD, &stats->mlfc,
4553 "MAC Local Faults");
4554 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4555 CTLFLAG_RD, &stats->mrfc,
4556 "MAC Remote Faults");
4557 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4558 CTLFLAG_RD, &stats->rlec,
4559 "Receive Length Errors");
4561 /* Flow Control stats */
4562 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4563 CTLFLAG_RD, &stats->lxontxc,
4564 "Link XON Transmitted");
4565 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4566 CTLFLAG_RD, &stats->lxonrxc,
4567 "Link XON Received");
4568 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4569 CTLFLAG_RD, &stats->lxofftxc,
4570 "Link XOFF Transmitted");
4571 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4572 CTLFLAG_RD, &stats->lxoffrxc,
4573 "Link XOFF Received");
4575 /* Packet Reception Stats */
4576 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4577 CTLFLAG_RD, &stats->tor,
4578 "Total Octets Received");
4579 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4580 CTLFLAG_RD, &stats->gorc,
4581 "Good Octets Received");
4582 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4583 CTLFLAG_RD, &stats->tpr,
4584 "Total Packets Received");
4585 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4586 CTLFLAG_RD, &stats->gprc,
4587 "Good Packets Received");
4588 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4589 CTLFLAG_RD, &stats->mprc,
4590 "Multicast Packets Received");
4591 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4592 CTLFLAG_RD, &stats->bprc,
4593 "Broadcast Packets Received");
4594 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4595 CTLFLAG_RD, &stats->prc64,
4596 "64 byte frames received ");
4597 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4598 CTLFLAG_RD, &stats->prc127,
4599 "65-127 byte frames received");
4600 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4601 CTLFLAG_RD, &stats->prc255,
4602 "128-255 byte frames received");
4603 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4604 CTLFLAG_RD, &stats->prc511,
4605 "256-511 byte frames received");
4606 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4607 CTLFLAG_RD, &stats->prc1023,
4608 "512-1023 byte frames received");
4609 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4610 CTLFLAG_RD, &stats->prc1522,
4611 "1023-1522 byte frames received");
4612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4613 CTLFLAG_RD, &stats->ruc,
4614 "Receive Undersized");
4615 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4616 CTLFLAG_RD, &stats->rfc,
4617 "Fragmented Packets Received ");
4618 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4619 CTLFLAG_RD, &stats->roc,
4620 "Oversized Packets Received");
4621 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4622 CTLFLAG_RD, &stats->rjc,
4624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4625 CTLFLAG_RD, &stats->mngprc,
4626 "Management Packets Received");
4627 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4628 CTLFLAG_RD, &stats->mngptc,
4629 "Management Packets Dropped");
4630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4631 CTLFLAG_RD, &stats->xec,
4634 /* Packet Transmission Stats */
4635 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4636 CTLFLAG_RD, &stats->gotc,
4637 "Good Octets Transmitted");
4638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4639 CTLFLAG_RD, &stats->tpt,
4640 "Total Packets Transmitted");
4641 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4642 CTLFLAG_RD, &stats->gptc,
4643 "Good Packets Transmitted");
4644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4645 CTLFLAG_RD, &stats->bptc,
4646 "Broadcast Packets Transmitted");
4647 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4648 CTLFLAG_RD, &stats->mptc,
4649 "Multicast Packets Transmitted");
4650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4651 CTLFLAG_RD, &stats->mngptc,
4652 "Management Packets Transmitted");
4653 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4654 CTLFLAG_RD, &stats->ptc64,
4655 "64 byte frames transmitted ");
4656 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4657 CTLFLAG_RD, &stats->ptc127,
4658 "65-127 byte frames transmitted");
4659 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4660 CTLFLAG_RD, &stats->ptc255,
4661 "128-255 byte frames transmitted");
4662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4663 CTLFLAG_RD, &stats->ptc511,
4664 "256-511 byte frames transmitted");
4665 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4666 CTLFLAG_RD, &stats->ptc1023,
4667 "512-1023 byte frames transmitted");
4668 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4669 CTLFLAG_RD, &stats->ptc1522,
4670 "1024-1522 byte frames transmitted");
4674 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4675 const char *description, int *limit, int value)
4678 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4679 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4680 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4684 ** Set flow control using sysctl:
4685 ** Flow control values:
4692 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4695 struct adapter *adapter;
4697 adapter = (struct adapter *) arg1;
4700 error = sysctl_handle_int(oidp, &fc, 0, req);
4701 if ((error) || (req->newptr == NULL))
4704 /* Don't bother if it's not changed */
4705 if (adapter->fc == fc)
4708 return ixgbe_set_flowcntl(adapter, fc);
4713 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4717 case ixgbe_fc_rx_pause:
4718 case ixgbe_fc_tx_pause:
4720 adapter->hw.fc.requested_mode = adapter->fc;
4721 if (adapter->num_queues > 1)
4722 ixgbe_disable_rx_drop(adapter);
4725 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4726 if (adapter->num_queues > 1)
4727 ixgbe_enable_rx_drop(adapter);
4733 /* Don't autoneg if forcing a value */
4734 adapter->hw.fc.disable_fc_autoneg = TRUE;
4735 ixgbe_fc_enable(&adapter->hw);
4740 ** Control advertised link speed:
4742 ** 0x1 - advertise 100 Mb
4743 ** 0x2 - advertise 1G
4744 ** 0x4 - advertise 10G
4747 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4749 int error, advertise;
4750 struct adapter *adapter;
4752 adapter = (struct adapter *) arg1;
4753 advertise = adapter->advertise;
4755 error = sysctl_handle_int(oidp, &advertise, 0, req);
4756 if ((error) || (req->newptr == NULL))
4759 /* Checks to validate new value */
4760 if (adapter->advertise == advertise) /* no change */
4763 return ixgbe_set_advertise(adapter, advertise);
4767 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4770 struct ixgbe_hw *hw;
4771 ixgbe_link_speed speed;
4776 /* No speed changes for backplane media */
4777 if (hw->phy.media_type == ixgbe_media_type_backplane)
4780 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4781 (hw->phy.multispeed_fiber))) {
4783 "Advertised speed can only be set on copper or "
4784 "multispeed fiber media types.\n");
4788 if (advertise < 0x1 || advertise > 0x7) {
4790 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4794 if ((advertise & 0x1)
4795 && (hw->mac.type != ixgbe_mac_X540)
4796 && (hw->mac.type != ixgbe_mac_X550)) {
4797 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4801 /* Set new value and report new advertised mode */
4803 if (advertise & 0x1)
4804 speed |= IXGBE_LINK_SPEED_100_FULL;
4805 if (advertise & 0x2)
4806 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4807 if (advertise & 0x4)
4808 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4809 adapter->advertise = advertise;
4811 hw->mac.autotry_restart = TRUE;
4812 hw->mac.ops.setup_link(hw, speed, TRUE);
4818 * The following two sysctls are for X552/X557-AT devices;
4819 * they deal with the external PHY used in them.
4822 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4824 struct adapter *adapter = (struct adapter *) arg1;
4825 struct ixgbe_hw *hw = &adapter->hw;
4828 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4829 device_printf(adapter->dev,
4830 "Device has no supported external thermal sensor.\n");
4834 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4835 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4837 device_printf(adapter->dev,
4838 "Error reading from PHY's current temperature register\n");
4842 /* Shift temp for output */
4845 return (sysctl_handle_int(oidp, NULL, reg, req));
4849 * Reports whether the current PHY temperature is over
4850 * the overtemp threshold.
4851 * - This is reported directly from the PHY
4854 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4856 struct adapter *adapter = (struct adapter *) arg1;
4857 struct ixgbe_hw *hw = &adapter->hw;
4860 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4861 device_printf(adapter->dev,
4862 "Device has no supported external thermal sensor.\n");
4866 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4867 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4869 device_printf(adapter->dev,
4870 "Error reading from PHY's temperature status register\n");
4874 /* Get occurrence bit */
4875 reg = !!(reg & 0x4000);
4876 return (sysctl_handle_int(oidp, 0, reg, req));
4880 ** Thermal Shutdown Trigger (internal MAC)
4881 ** - Set this to 1 to cause an overtemp event to occur
4884 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4886 struct adapter *adapter = (struct adapter *) arg1;
4887 struct ixgbe_hw *hw = &adapter->hw;
4888 int error, fire = 0;
4890 error = sysctl_handle_int(oidp, &fire, 0, req);
4891 if ((error) || (req->newptr == NULL))
4895 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4896 reg |= IXGBE_EICR_TS;
4897 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4904 ** Manage DMA Coalescing.
4906 ** 0/1 - off / on (use default value of 1000)
4908 ** Legal timer values are:
4909 ** 50,100,250,500,1000,2000,5000,10000
4911 ** Turning off interrupt moderation will also turn this off.
4914 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4916 struct adapter *adapter = (struct adapter *) arg1;
4917 struct ifnet *ifp = adapter->ifp;
4921 newval = adapter->dmac;
4922 error = sysctl_handle_int(oidp, &newval, 0, req);
4923 if ((error) || (req->newptr == NULL))
4932 /* Enable and use default */
4933 adapter->dmac = 1000;
4943 /* Legal values - allow */
4944 adapter->dmac = newval;
4947 /* Do nothing, illegal value */
4951 /* Re-initialize hardware if it's already running */
4952 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4953 ixgbe_init(adapter);
4960 * Sysctl to test power states
4962 * 0 - set device to D0
4963 * 3 - set device to D3
4964 * (none) - get current device power state
4967 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4969 struct adapter *adapter = (struct adapter *) arg1;
4970 device_t dev = adapter->dev;
4971 int curr_ps, new_ps, error = 0;
4973 curr_ps = new_ps = pci_get_powerstate(dev);
4975 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4976 if ((error) || (req->newptr == NULL))
4979 if (new_ps == curr_ps)
4982 if (new_ps == 3 && curr_ps == 0)
4983 error = DEVICE_SUSPEND(dev);
4984 else if (new_ps == 0 && curr_ps == 3)
4985 error = DEVICE_RESUME(dev);
4989 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4995 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
5001 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
5003 struct adapter *adapter = (struct adapter *) arg1;
5004 struct ixgbe_hw *hw = &adapter->hw;
5005 int new_wol_enabled;
5008 new_wol_enabled = hw->wol_enabled;
5009 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
5010 if ((error) || (req->newptr == NULL))
5012 new_wol_enabled = !!(new_wol_enabled);
5013 if (new_wol_enabled == hw->wol_enabled)
5016 if (new_wol_enabled > 0 && !adapter->wol_support)
5019 hw->wol_enabled = new_wol_enabled;
5025 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
5026 * if supported by the adapter.
5032 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
5034 struct adapter *adapter = (struct adapter *) arg1;
5035 struct ixgbe_hw *hw = &adapter->hw;
5036 struct ifnet *ifp = adapter->ifp;
5037 int new_eee_enabled, error = 0;
5039 new_eee_enabled = adapter->eee_enabled;
5040 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
5041 if ((error) || (req->newptr == NULL))
5043 new_eee_enabled = !!(new_eee_enabled);
5044 if (new_eee_enabled == adapter->eee_enabled)
5047 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
5050 adapter->eee_enabled = new_eee_enabled;
5052 /* Re-initialize hardware if it's already running */
5053 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5054 ixgbe_init(adapter);
5060 * Read-only sysctl indicating whether EEE support was negotiated
5064 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
5066 struct adapter *adapter = (struct adapter *) arg1;
5067 struct ixgbe_hw *hw = &adapter->hw;
5070 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
5072 return (sysctl_handle_int(oidp, 0, status, req));
5076 * Read-only sysctl indicating whether RX Link is in LPI state.
5079 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
5081 struct adapter *adapter = (struct adapter *) arg1;
5082 struct ixgbe_hw *hw = &adapter->hw;
5085 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5086 IXGBE_EEE_RX_LPI_STATUS);
5088 return (sysctl_handle_int(oidp, 0, status, req));
5092 * Read-only sysctl indicating whether TX Link is in LPI state.
5095 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
5097 struct adapter *adapter = (struct adapter *) arg1;
5098 struct ixgbe_hw *hw = &adapter->hw;
5101 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5102 IXGBE_EEE_TX_LPI_STATUS);
5104 return (sysctl_handle_int(oidp, 0, status, req));
5108 * Read-only sysctl indicating TX Link LPI delay
5111 ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS)
5113 struct adapter *adapter = (struct adapter *) arg1;
5114 struct ixgbe_hw *hw = &adapter->hw;
5117 reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU);
5119 return (sysctl_handle_int(oidp, 0, reg >> 26, req));
5123 * Sysctl to enable/disable the types of packets that the
5124 * adapter will wake up on upon receipt.
5125 * WUFC - Wake Up Filter Control
5127 * 0x1 - Link Status Change
5128 * 0x2 - Magic Packet
5129 * 0x4 - Direct Exact
5130 * 0x8 - Directed Multicast
5132 * 0x20 - ARP/IPv4 Request Packet
5133 * 0x40 - Direct IPv4 Packet
5134 * 0x80 - Direct IPv6 Packet
5136 * Setting another flag will cause the sysctl to return an
5140 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5142 struct adapter *adapter = (struct adapter *) arg1;
5146 new_wufc = adapter->wufc;
5148 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
5149 if ((error) || (req->newptr == NULL))
5151 if (new_wufc == adapter->wufc)
5154 if (new_wufc & 0xffffff00)
5158 new_wufc |= (0xffffff & adapter->wufc);
5159 adapter->wufc = new_wufc;
5167 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5169 struct adapter *adapter = (struct adapter *)arg1;
5170 struct ixgbe_hw *hw = &adapter->hw;
5171 device_t dev = adapter->dev;
5172 int error = 0, reta_size;
5176 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5178 device_printf(dev, "Could not allocate sbuf for output.\n");
5182 // TODO: use sbufs to make a string to print out
5183 /* Set multiplier for RETA setup and table size based on MAC */
5184 switch (adapter->hw.mac.type) {
5185 case ixgbe_mac_X550:
5186 case ixgbe_mac_X550EM_x:
5194 /* Print out the redirection table */
5195 sbuf_cat(buf, "\n");
5196 for (int i = 0; i < reta_size; i++) {
5198 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5199 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5201 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5202 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5206 // TODO: print more config
5208 error = sbuf_finish(buf);
5210 device_printf(dev, "Error finishing sbuf: %d\n", error);
5215 #endif /* IXGBE_DEBUG */
5218 ** Enable the hardware to drop packets when the buffer is
5219 ** full. This is useful when multiqueue,so that no single
5220 ** queue being full stalls the entire RX engine. We only
5221 ** enable this when Multiqueue AND when Flow Control is
5225 ixgbe_enable_rx_drop(struct adapter *adapter)
5227 struct ixgbe_hw *hw = &adapter->hw;
5229 for (int i = 0; i < adapter->num_queues; i++) {
5230 struct rx_ring *rxr = &adapter->rx_rings[i];
5231 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5232 srrctl |= IXGBE_SRRCTL_DROP_EN;
5233 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5236 /* enable drop for each vf */
5237 for (int i = 0; i < adapter->num_vfs; i++) {
5238 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5239 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5246 ixgbe_disable_rx_drop(struct adapter *adapter)
5248 struct ixgbe_hw *hw = &adapter->hw;
5250 for (int i = 0; i < adapter->num_queues; i++) {
5251 struct rx_ring *rxr = &adapter->rx_rings[i];
5252 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5253 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5254 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5257 /* disable drop for each vf */
5258 for (int i = 0; i < adapter->num_vfs; i++) {
5259 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5260 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5266 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5270 switch (adapter->hw.mac.type) {
5271 case ixgbe_mac_82598EB:
5272 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5273 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5275 case ixgbe_mac_82599EB:
5276 case ixgbe_mac_X540:
5277 case ixgbe_mac_X550:
5278 case ixgbe_mac_X550EM_x:
5279 mask = (queues & 0xFFFFFFFF);
5280 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5281 mask = (queues >> 32);
5282 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5292 ** Support functions for SRIOV/VF management
5296 ixgbe_ping_all_vfs(struct adapter *adapter)
5298 struct ixgbe_vf *vf;
5300 for (int i = 0; i < adapter->num_vfs; i++) {
5301 vf = &adapter->vfs[i];
5302 if (vf->flags & IXGBE_VF_ACTIVE)
5303 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5309 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
5312 struct ixgbe_hw *hw;
5313 uint32_t vmolr, vmvir;
5319 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
5321 /* Do not receive packets that pass inexact filters. */
5322 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
5324 /* Disable Multicast Promicuous Mode. */
5325 vmolr &= ~IXGBE_VMOLR_MPE;
5327 /* Accept broadcasts. */
5328 vmolr |= IXGBE_VMOLR_BAM;
5331 /* Accept non-vlan tagged traffic. */
5332 //vmolr |= IXGBE_VMOLR_AUPE;
5334 /* Allow VM to tag outgoing traffic; no default tag. */
5337 /* Require vlan-tagged traffic. */
5338 vmolr &= ~IXGBE_VMOLR_AUPE;
5340 /* Tag all traffic with provided vlan tag. */
5341 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
5343 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
5344 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
5349 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
5353 * Frame size compatibility between PF and VF is only a problem on
5354 * 82599-based cards. X540 and later support any combination of jumbo
5355 * frames on PFs and VFs.
5357 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
5360 switch (vf->api_ver) {
5361 case IXGBE_API_VER_1_0:
5362 case IXGBE_API_VER_UNKNOWN:
5364 * On legacy (1.0 and older) VF versions, we don't support jumbo
5365 * frames on either the PF or the VF.
5367 if (adapter->max_frame_size > ETHER_MAX_LEN ||
5368 vf->max_frame_size > ETHER_MAX_LEN)
5374 case IXGBE_API_VER_1_1:
5377 * 1.1 or later VF versions always work if they aren't using
5380 if (vf->max_frame_size <= ETHER_MAX_LEN)
5384 * Jumbo frames only work with VFs if the PF is also using jumbo
5387 if (adapter->max_frame_size <= ETHER_MAX_LEN)
5397 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
5399 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
5401 // XXX clear multicast addresses
5403 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
5405 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5410 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
5412 struct ixgbe_hw *hw;
5413 uint32_t vf_index, vfte;
5417 vf_index = IXGBE_VF_INDEX(vf->pool);
5418 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
5419 vfte |= IXGBE_VF_BIT(vf->pool);
5420 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
5425 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
5427 struct ixgbe_hw *hw;
5428 uint32_t vf_index, vfre;
5432 vf_index = IXGBE_VF_INDEX(vf->pool);
5433 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
5434 if (ixgbe_vf_frame_size_compatible(adapter, vf))
5435 vfre |= IXGBE_VF_BIT(vf->pool);
5437 vfre &= ~IXGBE_VF_BIT(vf->pool);
5438 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
5443 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5445 struct ixgbe_hw *hw;
5447 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
5451 ixgbe_process_vf_reset(adapter, vf);
5453 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5454 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5455 vf->ether_addr, vf->pool, TRUE);
5456 ack = IXGBE_VT_MSGTYPE_ACK;
5458 ack = IXGBE_VT_MSGTYPE_NACK;
5460 ixgbe_vf_enable_transmit(adapter, vf);
5461 ixgbe_vf_enable_receive(adapter, vf);
5463 vf->flags |= IXGBE_VF_CTS;
5465 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
5466 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
5467 resp[3] = hw->mac.mc_filter_type;
5468 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
5473 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5477 mac = (uint8_t*)&msg[1];
5479 /* Check that the VF has permission to change the MAC address. */
5480 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
5481 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5485 if (ixgbe_validate_mac_addr(mac) != 0) {
5486 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5490 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5492 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
5495 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5500 ** VF multicast addresses are set by using the appropriate bit in
5501 ** 1 of 128 32 bit addresses (4096 possible).
5504 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
5506 u16 *list = (u16*)&msg[1];
5508 u32 vmolr, vec_bit, vec_reg, mta_reg;
5510 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
5511 entries = min(entries, IXGBE_MAX_VF_MC);
5513 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
5515 vf->num_mc_hashes = entries;
5517 /* Set the appropriate MTA bit */
5518 for (int i = 0; i < entries; i++) {
5519 vf->mc_hash[i] = list[i];
5520 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
5521 vec_bit = vf->mc_hash[i] & 0x1F;
5522 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
5523 mta_reg |= (1 << vec_bit);
5524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
5527 vmolr |= IXGBE_VMOLR_ROMPE;
5528 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
5529 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5535 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5537 struct ixgbe_hw *hw;
5542 enable = IXGBE_VT_MSGINFO(msg[0]);
5543 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
5545 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
5546 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5550 /* It is illegal to enable vlan tag 0. */
5551 if (tag == 0 && enable != 0){
5552 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5556 ixgbe_set_vfta(hw, tag, vf->pool, enable);
5557 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5562 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5564 struct ixgbe_hw *hw;
5565 uint32_t vf_max_size, pf_max_size, mhadd;
5568 vf_max_size = msg[1];
5570 if (vf_max_size < ETHER_CRC_LEN) {
5571 /* We intentionally ACK invalid LPE requests. */
5572 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5576 vf_max_size -= ETHER_CRC_LEN;
5578 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5579 /* We intentionally ACK invalid LPE requests. */
5580 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5584 vf->max_frame_size = vf_max_size;
5585 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5588 * We might have to disable reception to this VF if the frame size is
5589 * not compatible with the config on the PF.
5591 ixgbe_vf_enable_receive(adapter, vf);
5593 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5594 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
5596 if (pf_max_size < adapter->max_frame_size) {
5597 mhadd &= ~IXGBE_MHADD_MFS_MASK;
5598 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5599 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
5602 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5607 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
5610 //XXX implement this
5611 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5616 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
5621 case IXGBE_API_VER_1_0:
5622 case IXGBE_API_VER_1_1:
5623 vf->api_ver = msg[1];
5624 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5627 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5628 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5635 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
5638 struct ixgbe_hw *hw;
5639 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5644 /* GET_QUEUES is not supported on pre-1.1 APIs. */
5646 case IXGBE_API_VER_1_0:
5647 case IXGBE_API_VER_UNKNOWN:
5648 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5652 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
5653 IXGBE_VT_MSGTYPE_CTS;
5655 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
5656 resp[IXGBE_VF_TX_QUEUES] = num_queues;
5657 resp[IXGBE_VF_RX_QUEUES] = num_queues;
5658 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5659 resp[IXGBE_VF_DEF_QUEUE] = 0;
5661 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5666 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
5668 struct ixgbe_hw *hw;
5669 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5674 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
5679 CTR3(KTR_MALLOC, "%s: received msg %x from %d",
5680 adapter->ifp->if_xname, msg[0], vf->pool);
5681 if (msg[0] == IXGBE_VF_RESET) {
5682 ixgbe_vf_reset_msg(adapter, vf, msg);
5686 if (!(vf->flags & IXGBE_VF_CTS)) {
5687 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5691 switch (msg[0] & IXGBE_VT_MSG_MASK) {
5692 case IXGBE_VF_SET_MAC_ADDR:
5693 ixgbe_vf_set_mac(adapter, vf, msg);
5695 case IXGBE_VF_SET_MULTICAST:
5696 ixgbe_vf_set_mc_addr(adapter, vf, msg);
5698 case IXGBE_VF_SET_VLAN:
5699 ixgbe_vf_set_vlan(adapter, vf, msg);
5701 case IXGBE_VF_SET_LPE:
5702 ixgbe_vf_set_lpe(adapter, vf, msg);
5704 case IXGBE_VF_SET_MACVLAN:
5705 ixgbe_vf_set_macvlan(adapter, vf, msg);
5707 case IXGBE_VF_API_NEGOTIATE:
5708 ixgbe_vf_api_negotiate(adapter, vf, msg);
5710 case IXGBE_VF_GET_QUEUES:
5711 ixgbe_vf_get_queues(adapter, vf, msg);
5714 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5720 * Tasklet for handling VF -> PF mailbox messages.
5723 ixgbe_handle_mbx(void *context, int pending)
5725 struct adapter *adapter;
5726 struct ixgbe_hw *hw;
5727 struct ixgbe_vf *vf;
5733 IXGBE_CORE_LOCK(adapter);
5734 for (i = 0; i < adapter->num_vfs; i++) {
5735 vf = &adapter->vfs[i];
5737 if (vf->flags & IXGBE_VF_ACTIVE) {
5738 if (ixgbe_check_for_rst(hw, vf->pool) == 0)
5739 ixgbe_process_vf_reset(adapter, vf);
5741 if (ixgbe_check_for_msg(hw, vf->pool) == 0)
5742 ixgbe_process_vf_msg(adapter, vf);
5744 if (ixgbe_check_for_ack(hw, vf->pool) == 0)
5745 ixgbe_process_vf_ack(adapter, vf);
5748 IXGBE_CORE_UNLOCK(adapter);
5753 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
5755 struct adapter *adapter;
5756 enum ixgbe_iov_mode mode;
5758 adapter = device_get_softc(dev);
5759 adapter->num_vfs = num_vfs;
5760 mode = ixgbe_get_iov_mode(adapter);
5762 if (num_vfs > ixgbe_max_vfs(mode)) {
5763 adapter->num_vfs = 0;
5767 IXGBE_CORE_LOCK(adapter);
5769 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
5772 if (adapter->vfs == NULL) {
5773 adapter->num_vfs = 0;
5774 IXGBE_CORE_UNLOCK(adapter);
5778 ixgbe_init_locked(adapter);
5780 IXGBE_CORE_UNLOCK(adapter);
5787 ixgbe_uninit_iov(device_t dev)
5789 struct ixgbe_hw *hw;
5790 struct adapter *adapter;
5791 uint32_t pf_reg, vf_reg;
5793 adapter = device_get_softc(dev);
5796 IXGBE_CORE_LOCK(adapter);
5798 /* Enable rx/tx for the PF and disable it for all VFs. */
5799 pf_reg = IXGBE_VF_INDEX(adapter->pool);
5800 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
5801 IXGBE_VF_BIT(adapter->pool));
5802 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
5803 IXGBE_VF_BIT(adapter->pool));
5809 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
5810 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
5812 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
5814 free(adapter->vfs, M_IXGBE);
5815 adapter->vfs = NULL;
5816 adapter->num_vfs = 0;
5818 IXGBE_CORE_UNLOCK(adapter);
5823 ixgbe_initialize_iov(struct adapter *adapter)
5825 struct ixgbe_hw *hw = &adapter->hw;
5826 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
5827 enum ixgbe_iov_mode mode;
5830 mode = ixgbe_get_iov_mode(adapter);
5831 if (mode == IXGBE_NO_VM)
5834 IXGBE_CORE_LOCK_ASSERT(adapter);
5836 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
5837 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
5841 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
5844 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
5847 panic("Unexpected SR-IOV mode %d", mode);
5849 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
5851 mtqc = IXGBE_MTQC_VT_ENA;
5854 mtqc |= IXGBE_MTQC_64VF;
5857 mtqc |= IXGBE_MTQC_32VF;
5860 panic("Unexpected SR-IOV mode %d", mode);
5862 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
5865 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
5866 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
5867 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
5870 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
5873 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
5876 panic("Unexpected SR-IOV mode %d", mode);
5878 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
5881 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5882 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
5885 gpie |= IXGBE_GPIE_VTMODE_64;
5888 gpie |= IXGBE_GPIE_VTMODE_32;
5891 panic("Unexpected SR-IOV mode %d", mode);
5893 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5895 /* Enable rx/tx for the PF. */
5896 vf_reg = IXGBE_VF_INDEX(adapter->pool);
5897 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
5898 IXGBE_VF_BIT(adapter->pool));
5899 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
5900 IXGBE_VF_BIT(adapter->pool));
5902 /* Allow VM-to-VM communication. */
5903 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
5905 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
5906 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
5907 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
5909 for (i = 0; i < adapter->num_vfs; i++)
5910 ixgbe_init_vf(adapter, &adapter->vfs[i]);
5915 ** Check the max frame setting of all active VF's
5918 ixgbe_recalculate_max_frame(struct adapter *adapter)
5920 struct ixgbe_vf *vf;
5922 IXGBE_CORE_LOCK_ASSERT(adapter);
5924 for (int i = 0; i < adapter->num_vfs; i++) {
5925 vf = &adapter->vfs[i];
5926 if (vf->flags & IXGBE_VF_ACTIVE)
5927 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5933 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
5935 struct ixgbe_hw *hw;
5936 uint32_t vf_index, pfmbimr;
5938 IXGBE_CORE_LOCK_ASSERT(adapter);
5942 if (!(vf->flags & IXGBE_VF_ACTIVE))
5945 vf_index = IXGBE_VF_INDEX(vf->pool);
5946 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
5947 pfmbimr |= IXGBE_VF_BIT(vf->pool);
5948 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
5950 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
5952 // XXX multicast addresses
5954 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5955 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5956 vf->ether_addr, vf->pool, TRUE);
5959 ixgbe_vf_enable_transmit(adapter, vf);
5960 ixgbe_vf_enable_receive(adapter, vf);
5962 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5966 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
5968 struct adapter *adapter;
5969 struct ixgbe_vf *vf;
5972 adapter = device_get_softc(dev);
5974 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
5975 vfnum, adapter->num_vfs));
5977 IXGBE_CORE_LOCK(adapter);
5978 vf = &adapter->vfs[vfnum];
5981 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
5982 vf->rar_index = vfnum + 1;
5983 vf->default_vlan = 0;
5984 vf->max_frame_size = ETHER_MAX_LEN;
5985 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5987 if (nvlist_exists_binary(config, "mac-addr")) {
5988 mac = nvlist_get_binary(config, "mac-addr", NULL);
5989 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5990 if (nvlist_get_bool(config, "allow-set-mac"))
5991 vf->flags |= IXGBE_VF_CAP_MAC;
5994 * If the administrator has not specified a MAC address then
5995 * we must allow the VF to choose one.
5997 vf->flags |= IXGBE_VF_CAP_MAC;
5999 vf->flags = IXGBE_VF_ACTIVE;
6001 ixgbe_init_vf(adapter, vf);
6002 IXGBE_CORE_UNLOCK(adapter);
6006 #endif /* PCI_IOV */