1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /************************************************************************
45 ************************************************************************/
46 char ixgbe_driver_version[] = "3.2.11-k";
49 /************************************************************************
52 * Used by probe to select devices to load on
53 * Last field stores an index into ixgbe_strings
54 * Last entry must be all 0s
56 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
57 ************************************************************************/
58 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
103 /* required last entry */
107 /************************************************************************
108 * Table of branding strings
109 ************************************************************************/
110 static char *ixgbe_strings[] = {
111 "Intel(R) PRO/10GbE PCI-Express Network Driver"
114 /************************************************************************
115 * Function prototypes
116 ************************************************************************/
117 static int ixgbe_probe(device_t);
118 static int ixgbe_attach(device_t);
119 static int ixgbe_detach(device_t);
120 static int ixgbe_shutdown(device_t);
121 static int ixgbe_suspend(device_t);
122 static int ixgbe_resume(device_t);
123 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
124 static void ixgbe_init(void *);
125 static void ixgbe_stop(void *);
126 #if __FreeBSD_version >= 1100036
127 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
129 static void ixgbe_init_device_features(struct adapter *);
130 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
131 static void ixgbe_add_media_types(struct adapter *);
132 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
133 static int ixgbe_media_change(struct ifnet *);
134 static int ixgbe_allocate_pci_resources(struct adapter *);
135 static void ixgbe_get_slot_info(struct adapter *);
136 static int ixgbe_allocate_msix(struct adapter *);
137 static int ixgbe_allocate_legacy(struct adapter *);
138 static int ixgbe_configure_interrupts(struct adapter *);
139 static void ixgbe_free_pci_resources(struct adapter *);
140 static void ixgbe_local_timer(void *);
141 static int ixgbe_setup_interface(device_t, struct adapter *);
142 static void ixgbe_config_gpie(struct adapter *);
143 static void ixgbe_config_dmac(struct adapter *);
144 static void ixgbe_config_delay_values(struct adapter *);
145 static void ixgbe_config_link(struct adapter *);
146 static void ixgbe_check_wol_support(struct adapter *);
147 static int ixgbe_setup_low_power_mode(struct adapter *);
148 static void ixgbe_rearm_queues(struct adapter *, u64);
150 static void ixgbe_initialize_transmit_units(struct adapter *);
151 static void ixgbe_initialize_receive_units(struct adapter *);
152 static void ixgbe_enable_rx_drop(struct adapter *);
153 static void ixgbe_disable_rx_drop(struct adapter *);
154 static void ixgbe_initialize_rss_mapping(struct adapter *);
156 static void ixgbe_enable_intr(struct adapter *);
157 static void ixgbe_disable_intr(struct adapter *);
158 static void ixgbe_update_stats_counters(struct adapter *);
159 static void ixgbe_set_promisc(struct adapter *);
160 static void ixgbe_set_multi(struct adapter *);
161 static void ixgbe_update_link_status(struct adapter *);
162 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
163 static void ixgbe_configure_ivars(struct adapter *);
164 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
166 static void ixgbe_setup_vlan_hw_support(struct adapter *);
167 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
168 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
170 static void ixgbe_add_device_sysctls(struct adapter *);
171 static void ixgbe_add_hw_stats(struct adapter *);
172 static int ixgbe_set_flowcntl(struct adapter *, int);
173 static int ixgbe_set_advertise(struct adapter *, int);
174 static int ixgbe_get_advertise(struct adapter *);
176 /* Sysctl handlers */
177 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
178 const char *, int *, int);
179 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
180 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
181 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
182 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
183 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
185 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
186 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
188 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
189 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
192 /* Support for pluggable optic modules */
193 static bool ixgbe_sfp_probe(struct adapter *);
195 /* Legacy (single vector) interrupt handler */
196 static void ixgbe_legacy_irq(void *);
198 /* The MSI/MSI-X Interrupt handlers */
199 static void ixgbe_msix_que(void *);
200 static void ixgbe_msix_link(void *);
202 /* Deferred interrupt tasklets */
203 static void ixgbe_handle_que(void *, int);
204 static void ixgbe_handle_link(void *, int);
205 static void ixgbe_handle_msf(void *, int);
206 static void ixgbe_handle_mod(void *, int);
207 static void ixgbe_handle_phy(void *, int);
210 /************************************************************************
211 * FreeBSD Device Interface Entry Points
212 ************************************************************************/
213 static device_method_t ix_methods[] = {
214 /* Device interface */
215 DEVMETHOD(device_probe, ixgbe_probe),
216 DEVMETHOD(device_attach, ixgbe_attach),
217 DEVMETHOD(device_detach, ixgbe_detach),
218 DEVMETHOD(device_shutdown, ixgbe_shutdown),
219 DEVMETHOD(device_suspend, ixgbe_suspend),
220 DEVMETHOD(device_resume, ixgbe_resume),
222 DEVMETHOD(pci_iov_init, ixgbe_init_iov),
223 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
224 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
229 static driver_t ix_driver = {
230 "ix", ix_methods, sizeof(struct adapter),
233 devclass_t ix_devclass;
234 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
236 MODULE_DEPEND(ix, pci, 1, 1, 1);
237 MODULE_DEPEND(ix, ether, 1, 1, 1);
238 #if __FreeBSD_version >= 1100000
239 MODULE_DEPEND(ix, netmap, 1, 1, 1);
243 * TUNEABLE PARAMETERS:
246 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
249 * AIM: Adaptive Interrupt Moderation
250 * which means that the interrupt rate
251 * is varied over time based on the
252 * traffic for that interrupt vector
254 static int ixgbe_enable_aim = TRUE;
255 TUNABLE_INT("hw.ix.enable_aim", &ixgbe_enable_aim);
256 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
257 "Enable adaptive interrupt moderation");
259 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
260 TUNABLE_INT("hw.ix.max_interrupt_rate", &ixgbe_max_interrupt_rate);
261 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
262 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
264 /* How many packets rxeof tries to clean at a time */
265 static int ixgbe_rx_process_limit = 256;
266 TUNABLE_INT("hw.ix.rx_process_limit", &ixgbe_rx_process_limit);
267 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
268 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
270 /* How many packets txeof tries to clean at a time */
271 static int ixgbe_tx_process_limit = 256;
272 TUNABLE_INT("hw.ix.tx_process_limit", &ixgbe_tx_process_limit);
273 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
274 &ixgbe_tx_process_limit, 0,
275 "Maximum number of sent packets to process at a time, -1 means unlimited");
277 /* Flow control setting, default to full */
278 static int ixgbe_flow_control = ixgbe_fc_full;
279 TUNABLE_INT("hw.ix.flow_control", &ixgbe_flow_control);
280 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
281 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
283 /* Advertise Speed, default to 0 (auto) */
284 static int ixgbe_advertise_speed = 0;
285 TUNABLE_INT("hw.ix.advertise_speed", &ixgbe_advertise_speed);
286 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
287 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
290 * Smart speed setting, default to on
291 * this only works as a compile option
292 * right now as its during attach, set
293 * this to 'ixgbe_smart_speed_off' to
296 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
299 * MSI-X should be the default for best performance,
300 * but this allows it to be forced off for testing.
302 static int ixgbe_enable_msix = 1;
303 TUNABLE_INT("hw.ix.enable_msix", &ixgbe_enable_msix);
304 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
305 "Enable MSI-X interrupts");
308 * Number of Queues, can be set to 0,
309 * it then autoconfigures based on the
310 * number of cpus with a max of 8. This
311 * can be overriden manually here.
313 static int ixgbe_num_queues = 0;
314 TUNABLE_INT("hw.ix.num_queues", &ixgbe_num_queues);
315 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
316 "Number of queues to configure, 0 indicates autoconfigure");
319 * Number of TX descriptors per ring,
320 * setting higher than RX as this seems
321 * the better performing choice.
323 static int ixgbe_txd = PERFORM_TXD;
324 TUNABLE_INT("hw.ix.txd", &ixgbe_txd);
325 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
326 "Number of transmit descriptors per queue");
328 /* Number of RX descriptors per ring */
329 static int ixgbe_rxd = PERFORM_RXD;
330 TUNABLE_INT("hw.ix.rxd", &ixgbe_rxd);
331 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
332 "Number of receive descriptors per queue");
335 * Defining this on will allow the use
336 * of unsupported SFP+ modules, note that
337 * doing so you are on your own :)
339 static int allow_unsupported_sfp = FALSE;
340 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
341 SYSCTL_INT(_hw_ix, OID_AUTO, allow_unsupported_sfp, CTLFLAG_RDTUN,
342 &allow_unsupported_sfp, 0,
343 "Allow unsupported SFP modules...use at your own risk");
346 * Not sure if Flow Director is fully baked,
347 * so we'll default to turning it off.
349 static int ixgbe_enable_fdir = 0;
350 TUNABLE_INT("hw.ix.enable_fdir", &ixgbe_enable_fdir);
351 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
352 "Enable Flow Director");
354 /* Legacy Transmit (single queue) */
355 static int ixgbe_enable_legacy_tx = 0;
356 TUNABLE_INT("hw.ix.enable_legacy_tx", &ixgbe_enable_legacy_tx);
357 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
358 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
360 /* Receive-Side Scaling */
361 static int ixgbe_enable_rss = 1;
362 TUNABLE_INT("hw.ix.enable_rss", &ixgbe_enable_rss);
363 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
364 "Enable Receive-Side Scaling (RSS)");
366 /* Keep running tab on them for sanity check */
367 static int ixgbe_total_ports;
369 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
370 static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
372 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
374 /************************************************************************
375 * ixgbe_probe - Device identification routine
377 * Determines if the driver should be loaded on
378 * adapter based on its PCI vendor/device ID.
380 * return BUS_PROBE_DEFAULT on success, positive on failure
381 ************************************************************************/
383 ixgbe_probe(device_t dev)
385 ixgbe_vendor_info_t *ent;
387 u16 pci_vendor_id = 0;
388 u16 pci_device_id = 0;
389 u16 pci_subvendor_id = 0;
390 u16 pci_subdevice_id = 0;
391 char adapter_name[256];
393 INIT_DEBUGOUT("ixgbe_probe: begin");
395 pci_vendor_id = pci_get_vendor(dev);
396 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
399 pci_device_id = pci_get_device(dev);
400 pci_subvendor_id = pci_get_subvendor(dev);
401 pci_subdevice_id = pci_get_subdevice(dev);
403 ent = ixgbe_vendor_info_array;
404 while (ent->vendor_id != 0) {
405 if ((pci_vendor_id == ent->vendor_id) &&
406 (pci_device_id == ent->device_id) &&
407 ((pci_subvendor_id == ent->subvendor_id) ||
408 (ent->subvendor_id == 0)) &&
409 ((pci_subdevice_id == ent->subdevice_id) ||
410 (ent->subdevice_id == 0))) {
411 sprintf(adapter_name, "%s, Version - %s",
412 ixgbe_strings[ent->index],
413 ixgbe_driver_version);
414 device_set_desc_copy(dev, adapter_name);
416 return (BUS_PROBE_DEFAULT);
424 /************************************************************************
425 * ixgbe_attach - Device initialization routine
427 * Called when the driver is being loaded.
428 * Identifies the type of hardware, allocates all resources
429 * and initializes the hardware.
431 * return 0 on success, positive on failure
432 ************************************************************************/
434 ixgbe_attach(device_t dev)
436 struct adapter *adapter;
441 INIT_DEBUGOUT("ixgbe_attach: begin");
443 /* Allocate, clear, and link in our adapter structure */
444 adapter = device_get_softc(dev);
445 adapter->hw.back = adapter;
450 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
452 /* Set up the timer callout */
453 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
455 /* Determine hardware revision */
456 hw->vendor_id = pci_get_vendor(dev);
457 hw->device_id = pci_get_device(dev);
458 hw->revision_id = pci_get_revid(dev);
459 hw->subsystem_vendor_id = pci_get_subvendor(dev);
460 hw->subsystem_device_id = pci_get_subdevice(dev);
463 * Make sure BUSMASTER is set
465 pci_enable_busmaster(dev);
467 /* Do base PCI setup - map BAR0 */
468 if (ixgbe_allocate_pci_resources(adapter)) {
469 device_printf(dev, "Allocation of PCI resources failed\n");
474 /* let hardware know driver is loaded */
475 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
476 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
477 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
480 * Initialize the shared code
482 if (ixgbe_init_shared_code(hw)) {
483 device_printf(dev, "Unable to initialize the shared code\n");
488 if (hw->mbx.ops.init_params)
489 hw->mbx.ops.init_params(hw);
491 hw->allow_unsupported_sfp = allow_unsupported_sfp;
493 /* Pick up the 82599 settings */
494 if (hw->mac.type != ixgbe_mac_82598EB) {
495 hw->phy.smart_speed = ixgbe_smart_speed;
496 adapter->num_segs = IXGBE_82599_SCATTER;
498 adapter->num_segs = IXGBE_82598_SCATTER;
500 ixgbe_init_device_features(adapter);
502 if (ixgbe_configure_interrupts(adapter)) {
507 /* Allocate multicast array memory. */
508 adapter->mta = malloc(sizeof(*adapter->mta) *
509 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
510 if (adapter->mta == NULL) {
511 device_printf(dev, "Can not allocate multicast setup array\n");
516 /* Enable WoL (if supported) */
517 ixgbe_check_wol_support(adapter);
519 /* Register for VLAN events */
520 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
521 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
522 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
523 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
525 /* Verify adapter fan is still functional (if applicable) */
526 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
527 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
528 ixgbe_check_fan_failure(adapter, esdp, false);
531 /* Enable EEE power saving */
532 if (adapter->feat_en & IXGBE_FEATURE_EEE)
533 hw->mac.ops.setup_eee(hw, true);
535 /* Set an initial default flow control value */
536 hw->fc.requested_mode = ixgbe_flow_control;
538 /* Put the semaphore in a known state (released) */
539 ixgbe_init_swfw_semaphore(hw);
541 /* Sysctls for limiting the amount of work done in the taskqueues */
542 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
543 "max number of rx packets to process",
544 &adapter->rx_process_limit, ixgbe_rx_process_limit);
546 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
547 "max number of tx packets to process",
548 &adapter->tx_process_limit, ixgbe_tx_process_limit);
550 /* Do descriptor calc and sanity checks */
551 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
552 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
553 device_printf(dev, "TXD config issue, using default!\n");
554 adapter->num_tx_desc = DEFAULT_TXD;
556 adapter->num_tx_desc = ixgbe_txd;
559 * With many RX rings it is easy to exceed the
560 * system mbuf allocation. Tuning nmbclusters
561 * can alleviate this.
563 if (nmbclusters > 0) {
565 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
566 if (s > nmbclusters) {
567 device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
568 ixgbe_rxd = DEFAULT_RXD;
572 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
573 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
574 device_printf(dev, "RXD config issue, using default!\n");
575 adapter->num_rx_desc = DEFAULT_RXD;
577 adapter->num_rx_desc = ixgbe_rxd;
579 /* Allocate our TX/RX Queues */
580 if (ixgbe_allocate_queues(adapter)) {
585 hw->phy.reset_if_overtemp = TRUE;
586 error = ixgbe_reset_hw(hw);
587 hw->phy.reset_if_overtemp = FALSE;
588 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
590 * No optics in this port, set up
591 * so the timer routine will probe
592 * for later insertion.
594 adapter->sfp_probe = TRUE;
595 error = IXGBE_SUCCESS;
596 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
597 device_printf(dev, "Unsupported SFP+ module detected!\n");
601 device_printf(dev, "Hardware initialization failed\n");
606 /* Make sure we have a good EEPROM before we read from it */
607 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
608 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
613 /* Setup OS specific network interface */
614 if (ixgbe_setup_interface(dev, adapter) != 0)
617 if (adapter->feat_en & IXGBE_FEATURE_MSIX)
618 error = ixgbe_allocate_msix(adapter);
620 error = ixgbe_allocate_legacy(adapter);
624 error = ixgbe_start_hw(hw);
626 case IXGBE_ERR_EEPROM_VERSION:
627 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
629 case IXGBE_ERR_SFP_NOT_SUPPORTED:
630 device_printf(dev, "Unsupported SFP+ Module\n");
633 case IXGBE_ERR_SFP_NOT_PRESENT:
634 device_printf(dev, "No SFP+ Module found\n");
640 /* Enable the optics for 82599 SFP+ fiber */
641 ixgbe_enable_tx_laser(hw);
643 /* Enable power to the phy. */
644 ixgbe_set_phy_power(hw, TRUE);
646 /* Initialize statistics */
647 ixgbe_update_stats_counters(adapter);
649 /* Check PCIE slot type/speed/width */
650 ixgbe_get_slot_info(adapter);
653 * Do time init and sysctl init here, but
654 * only on the first port of a bypass adapter.
656 ixgbe_bypass_init(adapter);
658 /* Set an initial dmac value */
660 /* Set initial advertised speeds (if applicable) */
661 adapter->advertise = ixgbe_get_advertise(adapter);
663 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
664 ixgbe_define_iov_schemas(dev, &error);
667 ixgbe_add_device_sysctls(adapter);
668 ixgbe_add_hw_stats(adapter);
671 adapter->init_locked = ixgbe_init_locked;
672 adapter->stop_locked = ixgbe_stop;
674 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
675 ixgbe_netmap_attach(adapter);
677 INIT_DEBUGOUT("ixgbe_attach: end");
682 ixgbe_free_transmit_structures(adapter);
683 ixgbe_free_receive_structures(adapter);
684 free(adapter->queues, M_IXGBE);
686 if (adapter->ifp != NULL)
687 if_free(adapter->ifp);
688 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
689 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
690 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
691 ixgbe_free_pci_resources(adapter);
692 free(adapter->mta, M_IXGBE);
693 IXGBE_CORE_LOCK_DESTROY(adapter);
698 /************************************************************************
699 * ixgbe_detach - Device removal routine
701 * Called when the driver is being removed.
702 * Stops the adapter and deallocates all the resources
703 * that were allocated for driver operation.
705 * return 0 on success, positive on failure
706 ************************************************************************/
708 ixgbe_detach(device_t dev)
710 struct adapter *adapter = device_get_softc(dev);
711 struct ix_queue *que = adapter->queues;
712 struct tx_ring *txr = adapter->tx_rings;
715 INIT_DEBUGOUT("ixgbe_detach: begin");
717 /* Make sure VLANS are not using driver */
718 if (adapter->ifp->if_vlantrunk != NULL) {
719 device_printf(dev, "Vlan in use, detach first\n");
723 if (ixgbe_pci_iov_detach(dev) != 0) {
724 device_printf(dev, "SR-IOV in use; detach first.\n");
728 ether_ifdetach(adapter->ifp);
729 /* Stop the adapter */
730 IXGBE_CORE_LOCK(adapter);
731 ixgbe_setup_low_power_mode(adapter);
732 IXGBE_CORE_UNLOCK(adapter);
734 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
736 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
737 taskqueue_drain(que->tq, &txr->txq_task);
738 taskqueue_drain(que->tq, &que->que_task);
739 taskqueue_free(que->tq);
743 /* Drain the Link queue */
745 taskqueue_drain(adapter->tq, &adapter->link_task);
746 taskqueue_drain(adapter->tq, &adapter->mod_task);
747 taskqueue_drain(adapter->tq, &adapter->msf_task);
748 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
749 taskqueue_drain(adapter->tq, &adapter->mbx_task);
750 taskqueue_drain(adapter->tq, &adapter->phy_task);
751 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
752 taskqueue_drain(adapter->tq, &adapter->fdir_task);
753 taskqueue_free(adapter->tq);
756 /* let hardware know driver is unloading */
757 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
758 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
759 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
761 /* Unregister VLAN events */
762 if (adapter->vlan_attach != NULL)
763 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
764 if (adapter->vlan_detach != NULL)
765 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
767 callout_drain(&adapter->timer);
769 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
770 netmap_detach(adapter->ifp);
772 ixgbe_free_pci_resources(adapter);
773 bus_generic_detach(dev);
774 if_free(adapter->ifp);
776 ixgbe_free_transmit_structures(adapter);
777 ixgbe_free_receive_structures(adapter);
778 free(adapter->queues, M_IXGBE);
779 free(adapter->mta, M_IXGBE);
781 IXGBE_CORE_LOCK_DESTROY(adapter);
786 /************************************************************************
787 * ixgbe_shutdown - Shutdown entry point
788 ************************************************************************/
790 ixgbe_shutdown(device_t dev)
792 struct adapter *adapter = device_get_softc(dev);
795 INIT_DEBUGOUT("ixgbe_shutdown: begin");
797 IXGBE_CORE_LOCK(adapter);
798 error = ixgbe_setup_low_power_mode(adapter);
799 IXGBE_CORE_UNLOCK(adapter);
802 } /* ixgbe_shutdown */
804 /************************************************************************
808 ************************************************************************/
810 ixgbe_suspend(device_t dev)
812 struct adapter *adapter = device_get_softc(dev);
815 INIT_DEBUGOUT("ixgbe_suspend: begin");
817 IXGBE_CORE_LOCK(adapter);
819 error = ixgbe_setup_low_power_mode(adapter);
821 IXGBE_CORE_UNLOCK(adapter);
824 } /* ixgbe_suspend */
826 /************************************************************************
830 ************************************************************************/
832 ixgbe_resume(device_t dev)
834 struct adapter *adapter = device_get_softc(dev);
835 struct ifnet *ifp = adapter->ifp;
836 struct ixgbe_hw *hw = &adapter->hw;
839 INIT_DEBUGOUT("ixgbe_resume: begin");
841 IXGBE_CORE_LOCK(adapter);
843 /* Read & clear WUS register */
844 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
846 device_printf(dev, "Woken up by (WUS): %#010x\n",
847 IXGBE_READ_REG(hw, IXGBE_WUS));
848 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
849 /* And clear WUFC until next low-power transition */
850 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
853 * Required after D3->D0 transition;
854 * will re-advertise all previous advertised speeds
856 if (ifp->if_flags & IFF_UP)
857 ixgbe_init_locked(adapter);
859 IXGBE_CORE_UNLOCK(adapter);
865 /************************************************************************
866 * ixgbe_ioctl - Ioctl entry point
868 * Called when the user wants to configure the interface.
870 * return 0 on success, positive on failure
871 ************************************************************************/
873 ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
875 struct adapter *adapter = ifp->if_softc;
876 struct ifreq *ifr = (struct ifreq *) data;
877 #if defined(INET) || defined(INET6)
878 struct ifaddr *ifa = (struct ifaddr *)data;
881 bool avoid_reset = FALSE;
886 if (ifa->ifa_addr->sa_family == AF_INET)
890 if (ifa->ifa_addr->sa_family == AF_INET6)
894 * Calling init results in link renegotiation,
895 * so we avoid doing it when possible.
898 ifp->if_flags |= IFF_UP;
899 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
902 if (!(ifp->if_flags & IFF_NOARP))
903 arp_ifinit(ifp, ifa);
906 error = ether_ioctl(ifp, command, data);
909 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
910 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
913 IXGBE_CORE_LOCK(adapter);
914 ifp->if_mtu = ifr->ifr_mtu;
915 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
916 ixgbe_init_locked(adapter);
917 ixgbe_recalculate_max_frame(adapter);
918 IXGBE_CORE_UNLOCK(adapter);
922 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
923 IXGBE_CORE_LOCK(adapter);
924 if (ifp->if_flags & IFF_UP) {
925 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
926 if ((ifp->if_flags ^ adapter->if_flags) &
927 (IFF_PROMISC | IFF_ALLMULTI)) {
928 ixgbe_set_promisc(adapter);
931 ixgbe_init_locked(adapter);
933 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
935 adapter->if_flags = ifp->if_flags;
936 IXGBE_CORE_UNLOCK(adapter);
940 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
941 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
942 IXGBE_CORE_LOCK(adapter);
943 ixgbe_disable_intr(adapter);
944 ixgbe_set_multi(adapter);
945 ixgbe_enable_intr(adapter);
946 IXGBE_CORE_UNLOCK(adapter);
951 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
952 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
956 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
958 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
963 /* HW cannot turn these on/off separately */
964 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
965 ifp->if_capenable ^= IFCAP_RXCSUM;
966 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
968 if (mask & IFCAP_TXCSUM)
969 ifp->if_capenable ^= IFCAP_TXCSUM;
970 if (mask & IFCAP_TXCSUM_IPV6)
971 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
972 if (mask & IFCAP_TSO4)
973 ifp->if_capenable ^= IFCAP_TSO4;
974 if (mask & IFCAP_TSO6)
975 ifp->if_capenable ^= IFCAP_TSO6;
976 if (mask & IFCAP_LRO)
977 ifp->if_capenable ^= IFCAP_LRO;
978 if (mask & IFCAP_VLAN_HWTAGGING)
979 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
980 if (mask & IFCAP_VLAN_HWFILTER)
981 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
982 if (mask & IFCAP_VLAN_HWTSO)
983 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
985 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
986 IXGBE_CORE_LOCK(adapter);
987 ixgbe_init_locked(adapter);
988 IXGBE_CORE_UNLOCK(adapter);
990 VLAN_CAPABILITIES(ifp);
993 #if __FreeBSD_version >= 1002500
996 struct ixgbe_hw *hw = &adapter->hw;
1000 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
1001 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1004 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1008 if (i2c.len > sizeof(i2c.data)) {
1013 for (i = 0; i < i2c.len; i++)
1014 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
1015 i2c.dev_addr, &i2c.data[i]);
1016 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1021 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
1022 error = ether_ioctl(ifp, command, data);
1029 /************************************************************************
1030 * ixgbe_init_device_features
1031 ************************************************************************/
1033 ixgbe_init_device_features(struct adapter *adapter)
1035 adapter->feat_cap = IXGBE_FEATURE_NETMAP
1038 | IXGBE_FEATURE_MSIX
1039 | IXGBE_FEATURE_LEGACY_IRQ
1040 | IXGBE_FEATURE_LEGACY_TX;
1042 /* Set capabilities first... */
1043 switch (adapter->hw.mac.type) {
1044 case ixgbe_mac_82598EB:
1045 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
1046 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
1048 case ixgbe_mac_X540:
1049 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1050 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1051 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
1052 (adapter->hw.bus.func == 0))
1053 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
1055 case ixgbe_mac_X550:
1056 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
1057 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1058 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1060 case ixgbe_mac_X550EM_x:
1061 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1062 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1063 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
1064 adapter->feat_cap |= IXGBE_FEATURE_EEE;
1066 case ixgbe_mac_X550EM_a:
1067 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1068 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1069 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
1070 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
1071 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
1072 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
1073 adapter->feat_cap |= IXGBE_FEATURE_EEE;
1076 case ixgbe_mac_82599EB:
1077 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1078 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1079 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
1080 (adapter->hw.bus.func == 0))
1081 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
1082 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
1083 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
1089 /* Enabled by default... */
1090 /* Fan failure detection */
1091 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
1092 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
1094 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1095 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1097 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1098 adapter->feat_en |= IXGBE_FEATURE_EEE;
1099 /* Thermal Sensor */
1100 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
1101 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
1103 /* Enabled via global sysctl... */
1105 if (ixgbe_enable_fdir) {
1106 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
1107 adapter->feat_en |= IXGBE_FEATURE_FDIR;
1109 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
1111 /* Legacy (single queue) transmit */
1112 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
1113 ixgbe_enable_legacy_tx)
1114 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
1116 * Message Signal Interrupts - Extended (MSI-X)
1117 * Normal MSI is only enabled if MSI-X calls fail.
1119 if (!ixgbe_enable_msix)
1120 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
1121 /* Receive-Side Scaling (RSS) */
1122 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
1123 adapter->feat_en |= IXGBE_FEATURE_RSS;
1125 /* Disable features with unmet dependencies... */
1127 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
1128 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
1129 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
1130 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
1131 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
1133 } /* ixgbe_init_device_features */
1135 /************************************************************************
1136 * ixgbe_check_fan_failure
1137 ************************************************************************/
1139 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
1143 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
1147 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
1148 } /* ixgbe_check_fan_failure */
1150 /************************************************************************
1152 ************************************************************************/
1154 ixgbe_is_sfp(struct ixgbe_hw *hw)
1156 switch (hw->mac.type) {
1157 case ixgbe_mac_82598EB:
1158 if (hw->phy.type == ixgbe_phy_nl)
1161 case ixgbe_mac_82599EB:
1162 switch (hw->mac.ops.get_media_type(hw)) {
1163 case ixgbe_media_type_fiber:
1164 case ixgbe_media_type_fiber_qsfp:
1169 case ixgbe_mac_X550EM_x:
1170 case ixgbe_mac_X550EM_a:
1171 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1177 } /* ixgbe_is_sfp */
1179 /************************************************************************
1180 * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
1182 * Takes the ifnet's if_capenable flags (e.g. set by the user using
1183 * ifconfig) and indicates to the OS via the ifnet's if_hwassist
1184 * field what mbuf offload flags the driver will understand.
1185 ************************************************************************/
1187 ixgbe_set_if_hwassist(struct adapter *adapter)
1189 struct ifnet *ifp = adapter->ifp;
1191 ifp->if_hwassist = 0;
1192 #if __FreeBSD_version >= 1000000
1193 if (ifp->if_capenable & IFCAP_TSO4)
1194 ifp->if_hwassist |= CSUM_IP_TSO;
1195 if (ifp->if_capenable & IFCAP_TSO6)
1196 ifp->if_hwassist |= CSUM_IP6_TSO;
1197 if (ifp->if_capenable & IFCAP_TXCSUM) {
1198 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
1199 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
1200 ifp->if_hwassist |= CSUM_IP_SCTP;
1202 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
1203 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
1204 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
1205 ifp->if_hwassist |= CSUM_IP6_SCTP;
1208 if (ifp->if_capenable & IFCAP_TSO)
1209 ifp->if_hwassist |= CSUM_TSO;
1210 if (ifp->if_capenable & IFCAP_TXCSUM) {
1211 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1212 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
1213 ifp->if_hwassist |= CSUM_SCTP;
1216 } /* ixgbe_set_if_hwassist */
1218 /************************************************************************
1219 * ixgbe_init_locked - Init entry point
1221 * Used in two ways: It is used by the stack as an init
1222 * entry point in network interface structure. It is also
1223 * used by the driver as a hw/sw initialization routine to
1224 * get to a consistent state.
1226 * return 0 on success, positive on failure
1227 ************************************************************************/
1229 ixgbe_init_locked(struct adapter *adapter)
1231 struct ifnet *ifp = adapter->ifp;
1232 device_t dev = adapter->dev;
1233 struct ixgbe_hw *hw = &adapter->hw;
1234 struct tx_ring *txr;
1235 struct rx_ring *rxr;
1241 mtx_assert(&adapter->core_mtx, MA_OWNED);
1242 INIT_DEBUGOUT("ixgbe_init_locked: begin");
1244 hw->adapter_stopped = FALSE;
1245 ixgbe_stop_adapter(hw);
1246 callout_stop(&adapter->timer);
1248 /* Queue indices may change with IOV mode */
1249 ixgbe_align_all_queue_indices(adapter);
1251 /* reprogram the RAR[0] in case user changed it. */
1252 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1254 /* Get the latest mac address, User can use a LAA */
1255 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1256 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1257 hw->addr_ctrl.rar_used_count = 1;
1259 /* Set hardware offload abilities from ifnet flags */
1260 ixgbe_set_if_hwassist(adapter);
1262 /* Prepare transmit descriptors and buffers */
1263 if (ixgbe_setup_transmit_structures(adapter)) {
1264 device_printf(dev, "Could not setup transmit structures\n");
1265 ixgbe_stop(adapter);
1270 ixgbe_initialize_iov(adapter);
1271 ixgbe_initialize_transmit_units(adapter);
1273 /* Setup Multicast table */
1274 ixgbe_set_multi(adapter);
1276 /* Determine the correct mbuf pool, based on frame size */
1277 if (adapter->max_frame_size <= MCLBYTES)
1278 adapter->rx_mbuf_sz = MCLBYTES;
1280 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1282 /* Prepare receive descriptors and buffers */
1283 if (ixgbe_setup_receive_structures(adapter)) {
1284 device_printf(dev, "Could not setup receive structures\n");
1285 ixgbe_stop(adapter);
1289 /* Configure RX settings */
1290 ixgbe_initialize_receive_units(adapter);
1292 /* Enable SDP & MSI-X interrupts based on adapter */
1293 ixgbe_config_gpie(adapter);
1296 if (ifp->if_mtu > ETHERMTU) {
1297 /* aka IXGBE_MAXFRS on 82599 and newer */
1298 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1299 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1300 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1301 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1304 /* Now enable all the queues */
1305 for (int i = 0; i < adapter->num_queues; i++) {
1306 txr = &adapter->tx_rings[i];
1307 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1308 txdctl |= IXGBE_TXDCTL_ENABLE;
1309 /* Set WTHRESH to 8, burst writeback */
1310 txdctl |= (8 << 16);
1312 * When the internal queue falls below PTHRESH (32),
1313 * start prefetching as long as there are at least
1314 * HTHRESH (1) buffers ready. The values are taken
1315 * from the Intel linux driver 3.8.21.
1316 * Prefetching enables tx line rate even with 1 queue.
1318 txdctl |= (32 << 0) | (1 << 8);
1319 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1322 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1323 rxr = &adapter->rx_rings[i];
1324 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1325 if (hw->mac.type == ixgbe_mac_82598EB) {
1331 rxdctl &= ~0x3FFFFF;
1334 rxdctl |= IXGBE_RXDCTL_ENABLE;
1335 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1336 for (; j < 10; j++) {
1337 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1338 IXGBE_RXDCTL_ENABLE)
1346 * In netmap mode, we must preserve the buffers made
1347 * available to userspace before the if_init()
1348 * (this is true by default on the TX side, because
1349 * init makes all buffers available to userspace).
1351 * netmap_reset() and the device specific routines
1352 * (e.g. ixgbe_setup_receive_rings()) map these
1353 * buffers at the end of the NIC ring, so here we
1354 * must set the RDT (tail) register to make sure
1355 * they are not overwritten.
1357 * In this driver the NIC ring starts at RDH = 0,
1358 * RDT points to the last slot available for reception (?),
1359 * so RDT = num_rx_desc - 1 means the whole ring is available.
1362 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1363 (ifp->if_capenable & IFCAP_NETMAP)) {
1364 struct netmap_adapter *na = NA(adapter->ifp);
1365 struct netmap_kring *kring = &na->rx_rings[i];
1366 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1368 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1370 #endif /* DEV_NETMAP */
1371 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
1372 adapter->num_rx_desc - 1);
1375 /* Enable Receive engine */
1376 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1377 if (hw->mac.type == ixgbe_mac_82598EB)
1378 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1379 rxctrl |= IXGBE_RXCTRL_RXEN;
1380 ixgbe_enable_rx_dma(hw, rxctrl);
1382 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1384 /* Set up MSI-X routing */
1385 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1386 ixgbe_configure_ivars(adapter);
1387 /* Set up auto-mask */
1388 if (hw->mac.type == ixgbe_mac_82598EB)
1389 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1391 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1392 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1394 } else { /* Simple settings for Legacy/MSI */
1395 ixgbe_set_ivar(adapter, 0, 0, 0);
1396 ixgbe_set_ivar(adapter, 0, 0, 1);
1397 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1400 ixgbe_init_fdir(adapter);
1403 * Check on any SFP devices that
1404 * need to be kick-started
1406 if (hw->phy.type == ixgbe_phy_none) {
1407 err = hw->phy.ops.identify(hw);
1408 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1410 "Unsupported SFP+ module type was detected.\n");
1415 /* Set moderation on the Link interrupt */
1416 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1418 /* Config/Enable Link */
1419 ixgbe_config_link(adapter);
1421 /* Hardware Packet Buffer & Flow Control setup */
1422 ixgbe_config_delay_values(adapter);
1424 /* Initialize the FC settings */
1427 /* Set up VLAN support and filter */
1428 ixgbe_setup_vlan_hw_support(adapter);
1430 /* Setup DMA Coalescing */
1431 ixgbe_config_dmac(adapter);
1433 /* And now turn on interrupts */
1434 ixgbe_enable_intr(adapter);
1436 /* Enable the use of the MBX by the VF's */
1437 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
1438 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1439 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1440 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1443 /* Now inform the stack we're ready */
1444 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1447 } /* ixgbe_init_locked */
1449 /************************************************************************
1451 ************************************************************************/
1453 ixgbe_init(void *arg)
1455 struct adapter *adapter = arg;
1457 IXGBE_CORE_LOCK(adapter);
1458 ixgbe_init_locked(adapter);
1459 IXGBE_CORE_UNLOCK(adapter);
1464 /************************************************************************
1466 ************************************************************************/
1468 ixgbe_config_gpie(struct adapter *adapter)
1470 struct ixgbe_hw *hw = &adapter->hw;
1473 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1475 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1476 /* Enable Enhanced MSI-X mode */
1477 gpie |= IXGBE_GPIE_MSIX_MODE
1479 | IXGBE_GPIE_PBA_SUPPORT
1483 /* Fan Failure Interrupt */
1484 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
1485 gpie |= IXGBE_SDP1_GPIEN;
1487 /* Thermal Sensor Interrupt */
1488 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
1489 gpie |= IXGBE_SDP0_GPIEN_X540;
1491 /* Link detection */
1492 switch (hw->mac.type) {
1493 case ixgbe_mac_82599EB:
1494 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
1496 case ixgbe_mac_X550EM_x:
1497 case ixgbe_mac_X550EM_a:
1498 gpie |= IXGBE_SDP0_GPIEN_X540;
1504 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1507 } /* ixgbe_config_gpie */
1509 /************************************************************************
1510 * ixgbe_config_delay_values
1512 * Requires adapter->max_frame_size to be set.
1513 ************************************************************************/
1515 ixgbe_config_delay_values(struct adapter *adapter)
1517 struct ixgbe_hw *hw = &adapter->hw;
1518 u32 rxpb, frame, size, tmp;
1520 frame = adapter->max_frame_size;
1522 /* Calculate High Water */
1523 switch (hw->mac.type) {
1524 case ixgbe_mac_X540:
1525 case ixgbe_mac_X550:
1526 case ixgbe_mac_X550EM_x:
1527 case ixgbe_mac_X550EM_a:
1528 tmp = IXGBE_DV_X540(frame, frame);
1531 tmp = IXGBE_DV(frame, frame);
1534 size = IXGBE_BT2KB(tmp);
1535 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1536 hw->fc.high_water[0] = rxpb - size;
1538 /* Now calculate Low Water */
1539 switch (hw->mac.type) {
1540 case ixgbe_mac_X540:
1541 case ixgbe_mac_X550:
1542 case ixgbe_mac_X550EM_x:
1543 case ixgbe_mac_X550EM_a:
1544 tmp = IXGBE_LOW_DV_X540(frame);
1547 tmp = IXGBE_LOW_DV(frame);
1550 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1552 hw->fc.pause_time = IXGBE_FC_PAUSE;
1553 hw->fc.send_xon = TRUE;
1554 } /* ixgbe_config_delay_values */
1556 /************************************************************************
1557 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1558 ************************************************************************/
1560 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1562 struct ixgbe_hw *hw = &adapter->hw;
1563 u64 queue = (u64)(1 << vector);
1566 if (hw->mac.type == ixgbe_mac_82598EB) {
1567 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1568 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1570 mask = (queue & 0xFFFFFFFF);
1572 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1573 mask = (queue >> 32);
1575 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1577 } /* ixgbe_enable_queue */
1579 /************************************************************************
1580 * ixgbe_disable_queue
1581 ************************************************************************/
1583 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1585 struct ixgbe_hw *hw = &adapter->hw;
1586 u64 queue = (u64)(1 << vector);
1589 if (hw->mac.type == ixgbe_mac_82598EB) {
1590 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1591 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1593 mask = (queue & 0xFFFFFFFF);
1595 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1596 mask = (queue >> 32);
1598 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1600 } /* ixgbe_disable_queue */
1602 /************************************************************************
1604 ************************************************************************/
1606 ixgbe_handle_que(void *context, int pending)
1608 struct ix_queue *que = context;
1609 struct adapter *adapter = que->adapter;
1610 struct tx_ring *txr = que->txr;
1611 struct ifnet *ifp = adapter->ifp;
1613 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1617 if (!ixgbe_ring_empty(ifp, txr->br))
1618 ixgbe_start_locked(ifp, txr);
1619 IXGBE_TX_UNLOCK(txr);
1622 /* Reenable this interrupt */
1623 if (que->res != NULL)
1624 ixgbe_enable_queue(adapter, que->msix);
1626 ixgbe_enable_intr(adapter);
1629 } /* ixgbe_handle_que */
1632 /************************************************************************
1633 * ixgbe_legacy_irq - Legacy Interrupt Service routine
1634 ************************************************************************/
1636 ixgbe_legacy_irq(void *arg)
1638 struct ix_queue *que = arg;
1639 struct adapter *adapter = que->adapter;
1640 struct ixgbe_hw *hw = &adapter->hw;
1641 struct ifnet *ifp = adapter->ifp;
1642 struct tx_ring *txr = adapter->tx_rings;
1644 u32 eicr, eicr_mask;
1646 /* Silicon errata #26 on 82598 */
1647 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1649 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1653 ixgbe_enable_intr(adapter);
1657 more = ixgbe_rxeof(que);
1661 if (!ixgbe_ring_empty(ifp, txr->br))
1662 ixgbe_start_locked(ifp, txr);
1663 IXGBE_TX_UNLOCK(txr);
1665 /* Check for fan failure */
1666 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1667 ixgbe_check_fan_failure(adapter, eicr, true);
1668 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1671 /* Link status change */
1672 if (eicr & IXGBE_EICR_LSC)
1673 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1675 if (ixgbe_is_sfp(hw)) {
1676 /* Pluggable optics-related interrupt */
1677 if (hw->mac.type >= ixgbe_mac_X540)
1678 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
1680 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1682 if (eicr & eicr_mask) {
1683 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
1684 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1687 if ((hw->mac.type == ixgbe_mac_82599EB) &&
1688 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1689 IXGBE_WRITE_REG(hw, IXGBE_EICR,
1690 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1691 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1695 /* External PHY interrupt */
1696 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
1697 (eicr & IXGBE_EICR_GPI_SDP0_X540))
1698 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1701 taskqueue_enqueue(que->tq, &que->que_task);
1703 ixgbe_enable_intr(adapter);
1706 } /* ixgbe_legacy_irq */
1709 /************************************************************************
1710 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1711 ************************************************************************/
1713 ixgbe_msix_que(void *arg)
1715 struct ix_queue *que = arg;
1716 struct adapter *adapter = que->adapter;
1717 struct ifnet *ifp = adapter->ifp;
1718 struct tx_ring *txr = que->txr;
1719 struct rx_ring *rxr = que->rxr;
1724 /* Protect against spurious interrupts */
1725 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1728 ixgbe_disable_queue(adapter, que->msix);
1731 more = ixgbe_rxeof(que);
1735 if (!ixgbe_ring_empty(ifp, txr->br))
1736 ixgbe_start_locked(ifp, txr);
1737 IXGBE_TX_UNLOCK(txr);
1741 if (adapter->enable_aim == FALSE)
1744 * Do Adaptive Interrupt Moderation:
1745 * - Write out last calculated setting
1746 * - Calculate based on average size over
1747 * the last interval.
1749 if (que->eitr_setting)
1750 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1753 que->eitr_setting = 0;
1755 /* Idle, do nothing */
1756 if ((txr->bytes == 0) && (rxr->bytes == 0))
1759 if ((txr->bytes) && (txr->packets))
1760 newitr = txr->bytes/txr->packets;
1761 if ((rxr->bytes) && (rxr->packets))
1762 newitr = max(newitr, (rxr->bytes / rxr->packets));
1763 newitr += 24; /* account for hardware frame, crc */
1765 /* set an upper boundary */
1766 newitr = min(newitr, 3000);
1768 /* Be nice to the mid range */
1769 if ((newitr > 300) && (newitr < 1200))
1770 newitr = (newitr / 3);
1772 newitr = (newitr / 2);
1774 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1775 newitr |= newitr << 16;
1777 newitr |= IXGBE_EITR_CNT_WDIS;
1779 /* save for next interrupt */
1780 que->eitr_setting = newitr;
1790 taskqueue_enqueue(que->tq, &que->que_task);
1792 ixgbe_enable_queue(adapter, que->msix);
1795 } /* ixgbe_msix_que */
1798 /************************************************************************
1799 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
1800 ************************************************************************/
1802 ixgbe_msix_link(void *arg)
1804 struct adapter *adapter = arg;
1805 struct ixgbe_hw *hw = &adapter->hw;
1806 u32 eicr, eicr_mask;
1809 ++adapter->link_irq;
1811 /* Pause other interrupts */
1812 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1814 /* First get the cause */
1815 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1816 /* Be sure the queue bits are not cleared */
1817 eicr &= ~IXGBE_EICR_RTX_QUEUE;
1818 /* Clear interrupt with write */
1819 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
1821 /* Link status change */
1822 if (eicr & IXGBE_EICR_LSC) {
1823 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1824 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1827 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1828 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
1829 (eicr & IXGBE_EICR_FLOW_DIR)) {
1830 /* This is probably overkill :) */
1831 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1833 /* Disable the interrupt */
1834 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
1835 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1838 if (eicr & IXGBE_EICR_ECC) {
1839 device_printf(adapter->dev,
1840 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
1841 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1844 /* Check for over temp condition */
1845 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
1846 switch (adapter->hw.mac.type) {
1847 case ixgbe_mac_X550EM_a:
1848 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
1850 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
1851 IXGBE_EICR_GPI_SDP0_X550EM_a);
1852 IXGBE_WRITE_REG(hw, IXGBE_EICR,
1853 IXGBE_EICR_GPI_SDP0_X550EM_a);
1854 retval = hw->phy.ops.check_overtemp(hw);
1855 if (retval != IXGBE_ERR_OVERTEMP)
1857 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
1858 device_printf(adapter->dev, "System shutdown required!\n");
1861 if (!(eicr & IXGBE_EICR_TS))
1863 retval = hw->phy.ops.check_overtemp(hw);
1864 if (retval != IXGBE_ERR_OVERTEMP)
1866 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
1867 device_printf(adapter->dev, "System shutdown required!\n");
1868 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1873 /* Check for VF message */
1874 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
1875 (eicr & IXGBE_EICR_MAILBOX))
1876 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1879 if (ixgbe_is_sfp(hw)) {
1880 /* Pluggable optics-related interrupt */
1881 if (hw->mac.type >= ixgbe_mac_X540)
1882 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
1884 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1886 if (eicr & eicr_mask) {
1887 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
1888 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1891 if ((hw->mac.type == ixgbe_mac_82599EB) &&
1892 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1893 IXGBE_WRITE_REG(hw, IXGBE_EICR,
1894 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1895 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1899 /* Check for fan failure */
1900 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1901 ixgbe_check_fan_failure(adapter, eicr, true);
1902 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1905 /* External PHY interrupt */
1906 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
1907 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1908 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1909 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1912 /* Re-enable other interrupts */
1913 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1916 } /* ixgbe_msix_link */
1918 /************************************************************************
1919 * ixgbe_media_status - Media Ioctl callback
1921 * Called whenever the user queries the status of
1922 * the interface using ifconfig.
1923 ************************************************************************/
1925 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1927 struct adapter *adapter = ifp->if_softc;
1928 struct ixgbe_hw *hw = &adapter->hw;
1931 INIT_DEBUGOUT("ixgbe_media_status: begin");
1932 IXGBE_CORE_LOCK(adapter);
1933 ixgbe_update_link_status(adapter);
1935 ifmr->ifm_status = IFM_AVALID;
1936 ifmr->ifm_active = IFM_ETHER;
1938 if (!adapter->link_active) {
1939 IXGBE_CORE_UNLOCK(adapter);
1943 ifmr->ifm_status |= IFM_ACTIVE;
1944 layer = adapter->phy_layer;
1946 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1947 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1948 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
1949 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1950 switch (adapter->link_speed) {
1951 case IXGBE_LINK_SPEED_10GB_FULL:
1952 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1954 case IXGBE_LINK_SPEED_1GB_FULL:
1955 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1957 case IXGBE_LINK_SPEED_100_FULL:
1958 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1960 case IXGBE_LINK_SPEED_10_FULL:
1961 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1964 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1965 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1966 switch (adapter->link_speed) {
1967 case IXGBE_LINK_SPEED_10GB_FULL:
1968 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1971 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1972 switch (adapter->link_speed) {
1973 case IXGBE_LINK_SPEED_10GB_FULL:
1974 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1976 case IXGBE_LINK_SPEED_1GB_FULL:
1977 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1980 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1981 switch (adapter->link_speed) {
1982 case IXGBE_LINK_SPEED_10GB_FULL:
1983 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1985 case IXGBE_LINK_SPEED_1GB_FULL:
1986 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1989 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1990 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1991 switch (adapter->link_speed) {
1992 case IXGBE_LINK_SPEED_10GB_FULL:
1993 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1995 case IXGBE_LINK_SPEED_1GB_FULL:
1996 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1999 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2000 switch (adapter->link_speed) {
2001 case IXGBE_LINK_SPEED_10GB_FULL:
2002 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2006 * XXX: These need to use the proper media types once
2009 #ifndef IFM_ETH_XTYPE
2010 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2011 switch (adapter->link_speed) {
2012 case IXGBE_LINK_SPEED_10GB_FULL:
2013 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2015 case IXGBE_LINK_SPEED_2_5GB_FULL:
2016 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2018 case IXGBE_LINK_SPEED_1GB_FULL:
2019 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2022 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2023 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2024 switch (adapter->link_speed) {
2025 case IXGBE_LINK_SPEED_10GB_FULL:
2026 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2028 case IXGBE_LINK_SPEED_2_5GB_FULL:
2029 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2031 case IXGBE_LINK_SPEED_1GB_FULL:
2032 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2036 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2037 switch (adapter->link_speed) {
2038 case IXGBE_LINK_SPEED_10GB_FULL:
2039 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2041 case IXGBE_LINK_SPEED_2_5GB_FULL:
2042 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2044 case IXGBE_LINK_SPEED_1GB_FULL:
2045 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2048 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2049 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2050 switch (adapter->link_speed) {
2051 case IXGBE_LINK_SPEED_10GB_FULL:
2052 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2054 case IXGBE_LINK_SPEED_2_5GB_FULL:
2055 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2057 case IXGBE_LINK_SPEED_1GB_FULL:
2058 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2063 /* If nothing is recognized... */
2064 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2065 ifmr->ifm_active |= IFM_UNKNOWN;
2067 #if __FreeBSD_version >= 900025
2068 /* Display current flow control setting used on link */
2069 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2070 hw->fc.current_mode == ixgbe_fc_full)
2071 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2072 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2073 hw->fc.current_mode == ixgbe_fc_full)
2074 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2077 IXGBE_CORE_UNLOCK(adapter);
2080 } /* ixgbe_media_status */
2082 /************************************************************************
2083 * ixgbe_media_change - Media Ioctl callback
2085 * Called when the user changes speed/duplex using
2086 * media/mediopt option with ifconfig.
2087 ************************************************************************/
2089 ixgbe_media_change(struct ifnet *ifp)
2091 struct adapter *adapter = ifp->if_softc;
2092 struct ifmedia *ifm = &adapter->media;
2093 struct ixgbe_hw *hw = &adapter->hw;
2094 ixgbe_link_speed speed = 0;
2096 INIT_DEBUGOUT("ixgbe_media_change: begin");
2098 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2101 if (hw->phy.media_type == ixgbe_media_type_backplane)
2105 * We don't actually need to check against the supported
2106 * media types of the adapter; ifmedia will take care of
2109 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2112 speed |= IXGBE_LINK_SPEED_100_FULL;
2113 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2114 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2118 #ifndef IFM_ETH_XTYPE
2119 case IFM_10G_SR: /* KR, too */
2120 case IFM_10G_CX4: /* KX4 */
2125 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2126 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2128 #ifndef IFM_ETH_XTYPE
2129 case IFM_1000_CX: /* KX */
2135 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2138 speed |= IXGBE_LINK_SPEED_100_FULL;
2139 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2141 case IFM_10G_TWINAX:
2142 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2145 speed |= IXGBE_LINK_SPEED_100_FULL;
2148 speed |= IXGBE_LINK_SPEED_10_FULL;
2154 hw->mac.autotry_restart = TRUE;
2155 hw->mac.ops.setup_link(hw, speed, TRUE);
2156 adapter->advertise =
2157 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2158 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
2159 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
2160 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
2165 device_printf(adapter->dev, "Invalid media type!\n");
2168 } /* ixgbe_media_change */
2170 /************************************************************************
2172 ************************************************************************/
2174 ixgbe_set_promisc(struct adapter *adapter)
2176 struct ifnet *ifp = adapter->ifp;
2180 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2181 rctl &= (~IXGBE_FCTRL_UPE);
2182 if (ifp->if_flags & IFF_ALLMULTI)
2183 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2185 struct ifmultiaddr *ifma;
2186 #if __FreeBSD_version < 800000
2189 if_maddr_rlock(ifp);
2191 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2192 if (ifma->ifma_addr->sa_family != AF_LINK)
2194 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2198 #if __FreeBSD_version < 800000
2199 IF_ADDR_UNLOCK(ifp);
2201 if_maddr_runlock(ifp);
2204 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2205 rctl &= (~IXGBE_FCTRL_MPE);
2206 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2208 if (ifp->if_flags & IFF_PROMISC) {
2209 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2210 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2211 } else if (ifp->if_flags & IFF_ALLMULTI) {
2212 rctl |= IXGBE_FCTRL_MPE;
2213 rctl &= ~IXGBE_FCTRL_UPE;
2214 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2218 } /* ixgbe_set_promisc */
2221 /************************************************************************
2222 * ixgbe_set_multi - Multicast Update
2224 * Called whenever multicast address list is updated.
2225 ************************************************************************/
2227 ixgbe_set_multi(struct adapter *adapter)
2229 struct ifmultiaddr *ifma;
2230 struct ixgbe_mc_addr *mta;
2231 struct ifnet *ifp = adapter->ifp;
2236 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
2239 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
2241 #if __FreeBSD_version < 800000
2244 if_maddr_rlock(ifp);
2246 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2247 if (ifma->ifma_addr->sa_family != AF_LINK)
2249 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2251 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2252 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2253 mta[mcnt].vmdq = adapter->pool;
2256 #if __FreeBSD_version < 800000
2257 IF_ADDR_UNLOCK(ifp);
2259 if_maddr_runlock(ifp);
2262 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2263 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2264 if (ifp->if_flags & IFF_PROMISC)
2265 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2266 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
2267 ifp->if_flags & IFF_ALLMULTI) {
2268 fctrl |= IXGBE_FCTRL_MPE;
2269 fctrl &= ~IXGBE_FCTRL_UPE;
2271 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2273 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2275 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
2276 update_ptr = (u8 *)mta;
2277 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
2278 ixgbe_mc_array_itr, TRUE);
2282 } /* ixgbe_set_multi */
2284 /************************************************************************
2285 * ixgbe_mc_array_itr
2287 * An iterator function needed by the multicast shared code.
2288 * It feeds the shared code routine the addresses in the
2289 * array of ixgbe_set_multi() one by one.
2290 ************************************************************************/
2292 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
2294 struct ixgbe_mc_addr *mta;
2296 mta = (struct ixgbe_mc_addr *)*update_ptr;
2299 *update_ptr = (u8*)(mta + 1);
2302 } /* ixgbe_mc_array_itr */
2305 /************************************************************************
2306 * ixgbe_local_timer - Timer routine
2308 * Checks for link status, updates statistics,
2309 * and runs the watchdog check.
2310 ************************************************************************/
2312 ixgbe_local_timer(void *arg)
2314 struct adapter *adapter = arg;
2315 device_t dev = adapter->dev;
2316 struct ix_queue *que = adapter->queues;
2320 mtx_assert(&adapter->core_mtx, MA_OWNED);
2322 /* Check for pluggable optics */
2323 if (adapter->sfp_probe)
2324 if (!ixgbe_sfp_probe(adapter))
2325 goto out; /* Nothing to do */
2327 ixgbe_update_link_status(adapter);
2328 ixgbe_update_stats_counters(adapter);
2331 * Check the TX queues status
2332 * - mark hung queues so we don't schedule on them
2333 * - watchdog only if all queues show hung
2335 for (int i = 0; i < adapter->num_queues; i++, que++) {
2336 /* Keep track of queues with work for soft irq */
2338 queues |= ((u64)1 << que->me);
2340 * Each time txeof runs without cleaning, but there
2341 * are uncleaned descriptors it increments busy. If
2342 * we get to the MAX we declare it hung.
2344 if (que->busy == IXGBE_QUEUE_HUNG) {
2346 /* Mark the queue as inactive */
2347 adapter->active_queues &= ~((u64)1 << que->me);
2350 /* Check if we've come back from hung */
2351 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2352 adapter->active_queues |= ((u64)1 << que->me);
2354 if (que->busy >= IXGBE_MAX_TX_BUSY) {
2356 "Warning queue %d appears to be hung!\n", i);
2357 que->txr->busy = IXGBE_QUEUE_HUNG;
2362 /* Only truly watchdog if all queues show hung */
2363 if (hung == adapter->num_queues)
2365 else if (queues != 0) { /* Force an IRQ on queues with work */
2366 ixgbe_rearm_queues(adapter, queues);
2370 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2374 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2375 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2376 adapter->watchdog_events++;
2377 ixgbe_init_locked(adapter);
2378 } /* ixgbe_local_timer */
2381 /************************************************************************
2382 * ixgbe_update_link_status - Update OS on link state
2384 * Note: Only updates the OS on the cached link state.
2385 * The real check of the hardware only happens with
2387 ************************************************************************/
2389 ixgbe_update_link_status(struct adapter *adapter)
2391 struct ifnet *ifp = adapter->ifp;
2392 device_t dev = adapter->dev;
2394 if (adapter->link_up) {
2395 if (adapter->link_active == FALSE) {
2397 device_printf(dev, "Link is up %d Gbps %s \n",
2398 ((adapter->link_speed == 128) ? 10 : 1),
2400 adapter->link_active = TRUE;
2401 /* Update any Flow Control changes */
2402 ixgbe_fc_enable(&adapter->hw);
2403 /* Update DMA coalescing config */
2404 ixgbe_config_dmac(adapter);
2405 if_link_state_change(ifp, LINK_STATE_UP);
2406 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
2407 ixgbe_ping_all_vfs(adapter);
2409 } else { /* Link down */
2410 if (adapter->link_active == TRUE) {
2412 device_printf(dev, "Link is Down\n");
2413 if_link_state_change(ifp, LINK_STATE_DOWN);
2414 adapter->link_active = FALSE;
2415 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
2416 ixgbe_ping_all_vfs(adapter);
2421 } /* ixgbe_update_link_status */
2424 /************************************************************************
2425 * ixgbe_stop - Stop the hardware
2427 * Disables all traffic on the adapter by issuing a
2428 * global reset on the MAC and deallocates TX/RX buffers.
2429 ************************************************************************/
2431 ixgbe_stop(void *arg)
2434 struct adapter *adapter = arg;
2435 struct ixgbe_hw *hw = &adapter->hw;
2439 mtx_assert(&adapter->core_mtx, MA_OWNED);
2441 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2442 ixgbe_disable_intr(adapter);
2443 callout_stop(&adapter->timer);
2445 /* Let the stack know...*/
2446 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2449 hw->adapter_stopped = FALSE;
2450 ixgbe_stop_adapter(hw);
2451 if (hw->mac.type == ixgbe_mac_82599EB)
2452 ixgbe_stop_mac_link_on_d3_82599(hw);
2453 /* Turn off the laser - noop with no optics */
2454 ixgbe_disable_tx_laser(hw);
2456 /* Update the stack */
2457 adapter->link_up = FALSE;
2458 ixgbe_update_link_status(adapter);
2460 /* reprogram the RAR[0] in case user changed it. */
2461 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2467 /************************************************************************
2468 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
2469 ************************************************************************/
2471 ixgbe_allocate_legacy(struct adapter *adapter)
2473 device_t dev = adapter->dev;
2474 struct ix_queue *que = adapter->queues;
2475 struct tx_ring *txr = adapter->tx_rings;
2478 /* We allocate a single interrupt resource */
2479 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2480 &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
2481 if (adapter->res == NULL) {
2483 "Unable to allocate bus resource: interrupt\n");
2488 * Try allocating a fast interrupt and the associated deferred
2489 * processing contexts.
2491 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2492 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2493 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2494 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2495 taskqueue_thread_enqueue, &que->tq);
2496 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2497 device_get_nameunit(adapter->dev));
2499 /* Tasklets for Link, SFP and Multispeed Fiber */
2500 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2501 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2502 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2503 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2504 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2505 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2506 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2507 taskqueue_thread_enqueue, &adapter->tq);
2508 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2509 device_get_nameunit(adapter->dev));
2511 if ((error = bus_setup_intr(dev, adapter->res,
2512 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
2513 &adapter->tag)) != 0) {
2515 "Failed to register fast interrupt handler: %d\n", error);
2516 taskqueue_free(que->tq);
2517 taskqueue_free(adapter->tq);
2523 /* For simplicity in the handlers */
2524 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2527 } /* ixgbe_allocate_legacy */
2530 /************************************************************************
2531 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
2532 ************************************************************************/
2534 ixgbe_allocate_msix(struct adapter *adapter)
2536 device_t dev = adapter->dev;
2537 struct ix_queue *que = adapter->queues;
2538 struct tx_ring *txr = adapter->tx_rings;
2539 int error, rid, vector = 0;
2541 unsigned int rss_buckets = 0;
2545 * If we're doing RSS, the number of queues needs to
2546 * match the number of RSS buckets that are configured.
2548 * + If there's more queues than RSS buckets, we'll end
2549 * up with queues that get no traffic.
2551 * + If there's more RSS buckets than queues, we'll end
2552 * up having multiple RSS buckets map to the same queue,
2553 * so there'll be some contention.
2555 rss_buckets = rss_getnumbuckets();
2556 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
2557 (adapter->num_queues != rss_buckets)) {
2558 device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
2559 __func__, adapter->num_queues, rss_buckets);
2562 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2564 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2565 RF_SHAREABLE | RF_ACTIVE);
2566 if (que->res == NULL) {
2567 device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
2571 /* Set the handler function */
2572 error = bus_setup_intr(dev, que->res,
2573 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
2577 device_printf(dev, "Failed to register QUE handler");
2580 #if __FreeBSD_version >= 800504
2581 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2584 adapter->active_queues |= (u64)(1 << que->msix);
2586 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2588 * The queue ID is used as the RSS layer bucket ID.
2589 * We look up the queue ID -> RSS CPU ID and select
2592 cpu_id = rss_getcpu(i % rss_buckets);
2593 CPU_SETOF(cpu_id, &cpu_mask);
2596 * Bind the msix vector, and thus the
2597 * rings to the corresponding cpu.
2599 * This just happens to match the default RSS
2600 * round-robin bucket -> queue -> CPU allocation.
2602 if (adapter->num_queues > 1)
2605 if (adapter->num_queues > 1)
2606 bus_bind_intr(dev, que->res, cpu_id);
2608 if (adapter->feat_en & IXGBE_FEATURE_RSS)
2609 device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
2612 device_printf(dev, "Bound queue %d to cpu %d\n", i,
2614 #endif /* IXGBE_DEBUG */
2617 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2618 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
2620 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2621 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2622 taskqueue_thread_enqueue, &que->tq);
2623 #if __FreeBSD_version < 1100000
2624 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
2625 device_get_nameunit(adapter->dev), i);
2627 if (adapter->feat_en & IXGBE_FEATURE_RSS)
2628 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2629 &cpu_mask, "%s (bucket %d)",
2630 device_get_nameunit(adapter->dev), cpu_id);
2632 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2633 NULL, "%s:q%d", device_get_nameunit(adapter->dev),
2639 adapter->link_rid = vector + 1;
2640 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2641 &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
2642 if (!adapter->res) {
2644 "Unable to allocate bus resource: Link interrupt [%d]\n",
2648 /* Set the link handler function */
2649 error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
2650 NULL, ixgbe_msix_link, adapter, &adapter->tag);
2652 adapter->res = NULL;
2653 device_printf(dev, "Failed to register LINK handler");
2656 #if __FreeBSD_version >= 800504
2657 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2659 adapter->vector = vector;
2660 /* Tasklets for Link, SFP and Multispeed Fiber */
2661 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2662 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2663 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2664 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2665 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2666 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2667 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2668 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2669 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2670 taskqueue_thread_enqueue, &adapter->tq);
2671 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2672 device_get_nameunit(adapter->dev));
2675 } /* ixgbe_allocate_msix */
2677 /************************************************************************
2678 * ixgbe_configure_interrupts
2680 * Setup MSI-X, MSI, or legacy interrupts (in that order).
2681 * This will also depend on user settings.
2682 ************************************************************************/
2684 ixgbe_configure_interrupts(struct adapter *adapter)
2686 device_t dev = adapter->dev;
2687 int rid, want, queues, msgs;
2689 /* Default to 1 queue if MSI-X setup fails */
2690 adapter->num_queues = 1;
2692 /* Override by tuneable */
2693 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
2696 /* First try MSI-X */
2697 msgs = pci_msix_count(dev);
2700 rid = PCIR_BAR(MSIX_82598_BAR);
2701 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2703 if (adapter->msix_mem == NULL) {
2704 rid += 4; /* 82599 maps in higher BAR */
2705 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2708 if (adapter->msix_mem == NULL) {
2709 /* May not be enabled */
2710 device_printf(adapter->dev, "Unable to map MSI-X table.\n");
2714 /* Figure out a reasonable auto config value */
2715 queues = min(mp_ncpus, msgs - 1);
2716 /* If we're doing RSS, clamp at the number of RSS buckets */
2717 if (adapter->feat_en & IXGBE_FEATURE_RSS)
2718 queues = min(queues, rss_getnumbuckets());
2719 if (ixgbe_num_queues > queues) {
2720 device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
2721 ixgbe_num_queues = queues;
2724 if (ixgbe_num_queues != 0)
2725 queues = ixgbe_num_queues;
2726 /* Set max queues to 8 when autoconfiguring */
2728 queues = min(queues, 8);
2730 /* reflect correct sysctl value */
2731 ixgbe_num_queues = queues;
2734 * Want one vector (RX/TX pair) per queue
2735 * plus an additional for Link.
2741 device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
2745 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2746 device_printf(adapter->dev,
2747 "Using MSI-X interrupts with %d vectors\n", msgs);
2748 adapter->num_queues = queues;
2749 adapter->feat_en |= IXGBE_FEATURE_MSIX;
2753 * MSI-X allocation failed or provided us with
2754 * less vectors than needed. Free MSI-X resources
2755 * and we'll try enabling MSI.
2757 pci_release_msi(dev);
2760 /* Without MSI-X, some features are no longer supported */
2761 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
2762 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
2763 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
2764 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
2766 if (adapter->msix_mem != NULL) {
2767 bus_release_resource(dev, SYS_RES_MEMORY, rid,
2769 adapter->msix_mem = NULL;
2772 if (pci_alloc_msi(dev, &msgs) == 0) {
2773 adapter->feat_en |= IXGBE_FEATURE_MSI;
2774 adapter->link_rid = 1;
2775 device_printf(adapter->dev, "Using an MSI interrupt\n");
2779 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
2780 device_printf(adapter->dev,
2781 "Device does not support legacy interrupts.\n");
2785 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
2786 adapter->link_rid = 0;
2787 device_printf(adapter->dev, "Using a Legacy interrupt\n");
2790 } /* ixgbe_configure_interrupts */
2793 /************************************************************************
2794 * ixgbe_allocate_pci_resources
2795 ************************************************************************/
2797 ixgbe_allocate_pci_resources(struct adapter *adapter)
2799 device_t dev = adapter->dev;
2803 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2806 if (!(adapter->pci_mem)) {
2807 device_printf(dev, "Unable to allocate bus resource: memory\n");
2811 /* Save bus_space values for READ/WRITE_REG macros */
2812 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2813 adapter->osdep.mem_bus_space_handle =
2814 rman_get_bushandle(adapter->pci_mem);
2815 /* Set hw values for shared code */
2816 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2819 } /* ixgbe_allocate_pci_resources */
2821 /************************************************************************
2822 * ixgbe_free_pci_resources
2823 ************************************************************************/
2825 ixgbe_free_pci_resources(struct adapter *adapter)
2827 struct ix_queue *que = adapter->queues;
2828 device_t dev = adapter->dev;
2831 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2832 memrid = PCIR_BAR(MSIX_82598_BAR);
2834 memrid = PCIR_BAR(MSIX_82599_BAR);
2837 * There is a slight possibility of a failure mode
2838 * in attach that will result in entering this function
2839 * before interrupt resources have been initialized, and
2840 * in that case we do not want to execute the loops below
2841 * We can detect this reliably by the state of the adapter
2844 if (adapter->res == NULL)
2848 * Release all msix queue resources:
2850 for (int i = 0; i < adapter->num_queues; i++, que++) {
2851 rid = que->msix + 1;
2852 if (que->tag != NULL) {
2853 bus_teardown_intr(dev, que->res, que->tag);
2856 if (que->res != NULL)
2857 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2861 /* Clean the Legacy or Link interrupt last */
2862 if (adapter->tag != NULL) {
2863 bus_teardown_intr(dev, adapter->res, adapter->tag);
2864 adapter->tag = NULL;
2866 if (adapter->res != NULL)
2867 bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
2871 if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
2872 (adapter->feat_en & IXGBE_FEATURE_MSIX))
2873 pci_release_msi(dev);
2875 if (adapter->msix_mem != NULL)
2876 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
2879 if (adapter->pci_mem != NULL)
2880 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
2884 } /* ixgbe_free_pci_resources */
2886 /************************************************************************
2887 * ixgbe_setup_interface
2889 * Setup networking device structure and register an interface.
2890 ************************************************************************/
2892 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2896 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2898 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2900 device_printf(dev, "can not allocate ifnet structure\n");
2903 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2904 if_initbaudrate(ifp, IF_Gbps(10));
2905 ifp->if_init = ixgbe_init;
2906 ifp->if_softc = adapter;
2907 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2908 ifp->if_ioctl = ixgbe_ioctl;
2909 #if __FreeBSD_version >= 1100036
2910 if_setgetcounterfn(ifp, ixgbe_get_counter);
2912 #if __FreeBSD_version >= 1100045
2913 /* TSO parameters */
2914 ifp->if_hw_tsomax = 65518;
2915 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2916 ifp->if_hw_tsomaxsegsize = 2048;
2918 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
2919 ifp->if_start = ixgbe_legacy_start;
2920 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2921 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2922 IFQ_SET_READY(&ifp->if_snd);
2923 ixgbe_start_locked = ixgbe_legacy_start_locked;
2924 ixgbe_ring_empty = ixgbe_legacy_ring_empty;
2926 ifp->if_transmit = ixgbe_mq_start;
2927 ifp->if_qflush = ixgbe_qflush;
2928 ixgbe_start_locked = ixgbe_mq_start_locked;
2929 ixgbe_ring_empty = drbr_empty;
2932 ether_ifattach(ifp, adapter->hw.mac.addr);
2934 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
2937 * Tell the upper layer(s) we support long frames.
2939 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2941 /* Set capability flags */
2942 ifp->if_capabilities |= IFCAP_HWCSUM
2946 | IFCAP_VLAN_HWTAGGING
2953 /* Enable the above capabilities by default */
2954 ifp->if_capenable = ifp->if_capabilities;
2957 * Don't turn this on by default, if vlans are
2958 * created on another pseudo device (eg. lagg)
2959 * then vlan events are not passed thru, breaking
2960 * operation, but with HW FILTER off it works. If
2961 * using vlans directly on the ixgbe driver you can
2962 * enable this and get full hardware tag filtering.
2964 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2967 * Specify the media types supported by this adapter and register
2968 * callbacks to update media and link information
2970 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2971 ixgbe_media_status);
2973 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
2974 ixgbe_add_media_types(adapter);
2976 /* Set autoselect media by default */
2977 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2980 } /* ixgbe_setup_interface */
2982 /************************************************************************
2983 * ixgbe_add_media_types
2984 ************************************************************************/
2986 ixgbe_add_media_types(struct adapter *adapter)
2988 struct ixgbe_hw *hw = &adapter->hw;
2989 device_t dev = adapter->dev;
2992 layer = adapter->phy_layer;
2994 /* Media types with matching FreeBSD media defines */
2995 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2996 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2997 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2998 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2999 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
3000 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
3001 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
3002 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3004 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
3005 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
3006 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
3009 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
3010 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
3011 if (hw->phy.multispeed_fiber)
3012 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
3015 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
3016 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
3017 if (hw->phy.multispeed_fiber)
3018 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
3020 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
3021 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
3022 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
3023 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
3025 #ifdef IFM_ETH_XTYPE
3026 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
3027 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
3028 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
3029 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
3030 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
3031 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
3033 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
3034 device_printf(dev, "Media supported: 10GbaseKR\n");
3035 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
3036 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
3038 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
3039 device_printf(dev, "Media supported: 10GbaseKX4\n");
3040 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
3041 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
3043 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
3044 device_printf(dev, "Media supported: 1000baseKX\n");
3045 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
3046 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
3049 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
3050 device_printf(dev, "Media supported: 1000baseBX\n");
3052 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
3053 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
3055 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
3058 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3059 } /* ixgbe_add_media_types */
3061 /************************************************************************
3063 ************************************************************************/
3065 ixgbe_config_link(struct adapter *adapter)
3067 struct ixgbe_hw *hw = &adapter->hw;
3068 u32 autoneg, err = 0;
3069 bool sfp, negotiate;
3071 sfp = ixgbe_is_sfp(hw);
3074 if (hw->phy.multispeed_fiber) {
3075 hw->mac.ops.setup_sfp(hw);
3076 ixgbe_enable_tx_laser(hw);
3077 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3079 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
3081 if (hw->mac.ops.check_link)
3082 err = ixgbe_check_link(hw, &adapter->link_speed,
3083 &adapter->link_up, FALSE);
3086 autoneg = hw->phy.autoneg_advertised;
3087 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3088 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3092 if (hw->mac.ops.setup_link)
3093 err = hw->mac.ops.setup_link(hw, autoneg,
3099 } /* ixgbe_config_link */
3102 /************************************************************************
3103 * ixgbe_initialize_transmit_units - Enable transmit units.
3104 ************************************************************************/
3106 ixgbe_initialize_transmit_units(struct adapter *adapter)
3108 struct tx_ring *txr = adapter->tx_rings;
3109 struct ixgbe_hw *hw = &adapter->hw;
3111 /* Setup the Base and Length of the Tx Descriptor Ring */
3112 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3113 u64 tdba = txr->txdma.dma_paddr;
3117 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
3118 (tdba & 0x00000000ffffffffULL));
3119 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
3120 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
3121 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
3123 /* Setup the HW Tx Head and Tail descriptor pointers */
3124 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
3125 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
3127 /* Cache the tail address */
3128 txr->tail = IXGBE_TDT(j);
3130 /* Disable Head Writeback */
3132 * Note: for X550 series devices, these registers are actually
3133 * prefixed with TPH_ isntead of DCA_, but the addresses and
3134 * fields remain the same.
3136 switch (hw->mac.type) {
3137 case ixgbe_mac_82598EB:
3138 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
3141 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
3144 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3145 switch (hw->mac.type) {
3146 case ixgbe_mac_82598EB:
3147 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
3150 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
3156 if (hw->mac.type != ixgbe_mac_82598EB) {
3157 u32 dmatxctl, rttdcs;
3159 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3160 dmatxctl |= IXGBE_DMATXCTL_TE;
3161 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3162 /* Disable arbiter to set MTQC */
3163 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3164 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3165 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3166 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
3167 ixgbe_get_mtqc(adapter->iov_mode));
3168 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3169 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3173 } /* ixgbe_initialize_transmit_units */
3175 /************************************************************************
3176 * ixgbe_initialize_rss_mapping
3177 ************************************************************************/
3179 ixgbe_initialize_rss_mapping(struct adapter *adapter)
3181 struct ixgbe_hw *hw = &adapter->hw;
3182 u32 reta = 0, mrqc, rss_key[10];
3183 int queue_id, table_size, index_mult;
3185 u32 rss_hash_config;
3187 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
3188 /* Fetch the configured RSS key */
3189 rss_getkey((uint8_t *)&rss_key);
3191 /* set up random bits */
3192 arc4rand(&rss_key, sizeof(rss_key), 0);
3195 /* Set multiplier for RETA setup and table size based on MAC */
3198 switch (adapter->hw.mac.type) {
3199 case ixgbe_mac_82598EB:
3202 case ixgbe_mac_X550:
3203 case ixgbe_mac_X550EM_x:
3204 case ixgbe_mac_X550EM_a:
3211 /* Set up the redirection table */
3212 for (i = 0, j = 0; i < table_size; i++, j++) {
3213 if (j == adapter->num_queues)
3216 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
3218 * Fetch the RSS bucket id for the given indirection
3219 * entry. Cap it at the number of configured buckets
3220 * (which is num_queues.)
3222 queue_id = rss_get_indirection_to_bucket(i);
3223 queue_id = queue_id % adapter->num_queues;
3225 queue_id = (j * index_mult);
3228 * The low 8 bits are for hash value (n+0);
3229 * The next 8 bits are for hash value (n+1), etc.
3232 reta = reta | (((uint32_t)queue_id) << 24);
3235 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3237 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3243 /* Now fill our hash function seeds */
3244 for (i = 0; i < 10; i++)
3245 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3247 /* Perform hash on these packet types */
3248 if (adapter->feat_en & IXGBE_FEATURE_RSS)
3249 rss_hash_config = rss_gethashconfig();
3252 * Disable UDP - IP fragments aren't currently being handled
3253 * and so we end up with a mix of 2-tuple and 4-tuple
3256 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
3257 | RSS_HASHTYPE_RSS_TCP_IPV4
3258 | RSS_HASHTYPE_RSS_IPV6
3259 | RSS_HASHTYPE_RSS_TCP_IPV6
3260 | RSS_HASHTYPE_RSS_IPV6_EX
3261 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
3264 mrqc = IXGBE_MRQC_RSSEN;
3265 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3266 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3267 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3268 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3269 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3270 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3271 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3272 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3273 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3274 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3275 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3276 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3277 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3278 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3279 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3280 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
3282 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3283 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3284 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3285 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3286 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
3287 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3288 } /* ixgbe_initialize_rss_mapping */
3291 /************************************************************************
3292 * ixgbe_initialize_receive_units - Setup receive registers and features.
3293 ************************************************************************/
3294 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3297 ixgbe_initialize_receive_units(struct adapter *adapter)
3299 struct rx_ring *rxr = adapter->rx_rings;
3300 struct ixgbe_hw *hw = &adapter->hw;
3301 struct ifnet *ifp = adapter->ifp;
3303 u32 bufsz, fctrl, srrctl, rxcsum;
3307 * Make sure receives are disabled while
3308 * setting up the descriptor ring
3310 ixgbe_disable_rx(hw);
3312 /* Enable broadcasts */
3313 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3314 fctrl |= IXGBE_FCTRL_BAM;
3315 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3316 fctrl |= IXGBE_FCTRL_DPF;
3317 fctrl |= IXGBE_FCTRL_PMCF;
3319 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3321 /* Set for Jumbo Frames? */
3322 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3323 if (ifp->if_mtu > ETHERMTU)
3324 hlreg |= IXGBE_HLREG0_JUMBOEN;
3326 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3329 /* CRC stripping is conditional in Netmap */
3330 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3331 (ifp->if_capenable & IFCAP_NETMAP) &&
3333 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3335 #endif /* DEV_NETMAP */
3336 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3338 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3340 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
3341 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3343 for (i = 0; i < adapter->num_queues; i++, rxr++) {
3344 u64 rdba = rxr->rxdma.dma_paddr;
3347 /* Setup the Base and Length of the Rx Descriptor Ring */
3348 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3349 (rdba & 0x00000000ffffffffULL));
3350 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3351 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3352 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3354 /* Set up the SRRCTL register */
3355 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3356 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3357 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3359 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3362 * Set DROP_EN iff we have no flow control and >1 queue.
3363 * Note that srrctl was cleared shortly before during reset,
3364 * so we do not need to clear the bit, but do it just in case
3365 * this code is moved elsewhere.
3367 if (adapter->num_queues > 1 &&
3368 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3369 srrctl |= IXGBE_SRRCTL_DROP_EN;
3371 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3374 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3376 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3377 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3378 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3380 /* Set the driver rx tail address */
3381 rxr->tail = IXGBE_RDT(rxr->me);
3384 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3385 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
3386 | IXGBE_PSRTYPE_UDPHDR
3387 | IXGBE_PSRTYPE_IPV4HDR
3388 | IXGBE_PSRTYPE_IPV6HDR;
3389 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3392 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3394 ixgbe_initialize_rss_mapping(adapter);
3396 if (adapter->num_queues > 1) {
3397 /* RSS and RX IPP Checksum are mutually exclusive */
3398 rxcsum |= IXGBE_RXCSUM_PCSD;
3401 if (ifp->if_capenable & IFCAP_RXCSUM)
3402 rxcsum |= IXGBE_RXCSUM_PCSD;
3404 /* This is useful for calculating UDP/IP fragment checksums */
3405 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3406 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3408 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3411 } /* ixgbe_initialize_receive_units */
3414 /************************************************************************
3415 * ixgbe_register_vlan
3417 * Run via vlan config EVENT, it enables us to use the
3418 * HW Filter table since we can get the vlan id. This
3419 * just creates the entry in the soft version of the
3420 * VFTA, init will repopulate the real table.
3421 ************************************************************************/
3423 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3425 struct adapter *adapter = ifp->if_softc;
3428 if (ifp->if_softc != arg) /* Not our event */
3431 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3434 IXGBE_CORE_LOCK(adapter);
3435 index = (vtag >> 5) & 0x7F;
3437 adapter->shadow_vfta[index] |= (1 << bit);
3438 ++adapter->num_vlans;
3439 ixgbe_setup_vlan_hw_support(adapter);
3440 IXGBE_CORE_UNLOCK(adapter);
3441 } /* ixgbe_register_vlan */
3443 /************************************************************************
3444 * ixgbe_unregister_vlan
3446 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
3447 ************************************************************************/
3449 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3451 struct adapter *adapter = ifp->if_softc;
3454 if (ifp->if_softc != arg)
3457 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3460 IXGBE_CORE_LOCK(adapter);
3461 index = (vtag >> 5) & 0x7F;
3463 adapter->shadow_vfta[index] &= ~(1 << bit);
3464 --adapter->num_vlans;
3465 /* Re-init to load the changes */
3466 ixgbe_setup_vlan_hw_support(adapter);
3467 IXGBE_CORE_UNLOCK(adapter);
3468 } /* ixgbe_unregister_vlan */
3470 /************************************************************************
3471 * ixgbe_setup_vlan_hw_support
3472 ************************************************************************/
3474 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3476 struct ifnet *ifp = adapter->ifp;
3477 struct ixgbe_hw *hw = &adapter->hw;
3478 struct rx_ring *rxr;
3484 * We get here thru init_locked, meaning
3485 * a soft reset, this has already cleared
3486 * the VFTA and other state, so if there
3487 * have been no vlan's registered do nothing.
3489 if (adapter->num_vlans == 0)
3492 /* Setup the queues for vlans */
3493 for (i = 0; i < adapter->num_queues; i++) {
3494 rxr = &adapter->rx_rings[i];
3495 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3496 if (hw->mac.type != ixgbe_mac_82598EB) {
3497 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3498 ctrl |= IXGBE_RXDCTL_VME;
3499 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3501 rxr->vtag_strip = TRUE;
3504 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3507 * A soft reset zero's out the VFTA, so
3508 * we need to repopulate it now.
3510 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
3511 if (adapter->shadow_vfta[i] != 0)
3512 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3513 adapter->shadow_vfta[i]);
3515 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3516 /* Enable the Filter Table if enabled */
3517 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3518 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3519 ctrl |= IXGBE_VLNCTRL_VFE;
3521 if (hw->mac.type == ixgbe_mac_82598EB)
3522 ctrl |= IXGBE_VLNCTRL_VME;
3523 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3524 } /* ixgbe_setup_vlan_hw_support */
3526 /************************************************************************
3528 ************************************************************************/
3530 ixgbe_enable_intr(struct adapter *adapter)
3532 struct ixgbe_hw *hw = &adapter->hw;
3533 struct ix_queue *que = adapter->queues;
3536 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3538 switch (adapter->hw.mac.type) {
3539 case ixgbe_mac_82599EB:
3540 mask |= IXGBE_EIMS_ECC;
3541 /* Temperature sensor on some adapters */
3542 mask |= IXGBE_EIMS_GPI_SDP0;
3543 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3544 mask |= IXGBE_EIMS_GPI_SDP1;
3545 mask |= IXGBE_EIMS_GPI_SDP2;
3547 case ixgbe_mac_X540:
3548 /* Detect if Thermal Sensor is enabled */
3549 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3550 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3551 mask |= IXGBE_EIMS_TS;
3552 mask |= IXGBE_EIMS_ECC;
3554 case ixgbe_mac_X550:
3555 /* MAC thermal sensor is automatically enabled */
3556 mask |= IXGBE_EIMS_TS;
3557 mask |= IXGBE_EIMS_ECC;
3559 case ixgbe_mac_X550EM_x:
3560 case ixgbe_mac_X550EM_a:
3561 /* Some devices use SDP0 for important information */
3562 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3563 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3564 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3565 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3566 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3567 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3568 mask |= IXGBE_EICR_GPI_SDP0_X540;
3569 mask |= IXGBE_EIMS_ECC;
3575 /* Enable Fan Failure detection */
3576 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3577 mask |= IXGBE_EIMS_GPI_SDP1;
3579 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3580 mask |= IXGBE_EIMS_MAILBOX;
3581 /* Enable Flow Director */
3582 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3583 mask |= IXGBE_EIMS_FLOW_DIR;
3585 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3587 /* With MSI-X we use auto clear */
3588 if (adapter->msix_mem) {
3589 mask = IXGBE_EIMS_ENABLE_MASK;
3590 /* Don't autoclear Link */
3591 mask &= ~IXGBE_EIMS_OTHER;
3592 mask &= ~IXGBE_EIMS_LSC;
3593 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3594 mask &= ~IXGBE_EIMS_MAILBOX;
3595 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3599 * Now enable all queues, this is done separately to
3600 * allow for handling the extended (beyond 32) MSI-X
3601 * vectors that can be used by 82599
3603 for (int i = 0; i < adapter->num_queues; i++, que++)
3604 ixgbe_enable_queue(adapter, que->msix);
3606 IXGBE_WRITE_FLUSH(hw);
3609 } /* ixgbe_enable_intr */
3611 /************************************************************************
3612 * ixgbe_disable_intr
3613 ************************************************************************/
3615 ixgbe_disable_intr(struct adapter *adapter)
3617 if (adapter->msix_mem)
3618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3619 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3620 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3622 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3624 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3626 IXGBE_WRITE_FLUSH(&adapter->hw);
3629 } /* ixgbe_disable_intr */
3631 /************************************************************************
3632 * ixgbe_get_slot_info
3634 * Get the width and transaction speed of
3635 * the slot this adapter is plugged into.
3636 ************************************************************************/
3638 ixgbe_get_slot_info(struct adapter *adapter)
3640 device_t dev = adapter->dev;
3641 struct ixgbe_hw *hw = &adapter->hw;
3644 int bus_info_valid = TRUE;
3646 /* Some devices are behind an internal bridge */
3647 switch (hw->device_id) {
3648 case IXGBE_DEV_ID_82599_SFP_SF_QP:
3649 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
3650 goto get_parent_info;
3655 ixgbe_get_bus_info(hw);
3658 * Some devices don't use PCI-E, but there is no need
3659 * to display "Unknown" for bus speed and width.
3661 switch (hw->mac.type) {
3662 case ixgbe_mac_X550EM_x:
3663 case ixgbe_mac_X550EM_a:
3671 * For the Quad port adapter we need to parse back
3672 * up the PCI tree to find the speed of the expansion
3673 * slot into which this adapter is plugged. A bit more work.
3675 dev = device_get_parent(device_get_parent(dev));
3677 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
3678 pci_get_slot(dev), pci_get_function(dev));
3680 dev = device_get_parent(device_get_parent(dev));
3682 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
3683 pci_get_slot(dev), pci_get_function(dev));
3685 /* Now get the PCI Express Capabilities offset */
3686 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
3688 * Hmm...can't get PCI-Express capabilities.
3689 * Falling back to default method.
3691 bus_info_valid = FALSE;
3692 ixgbe_get_bus_info(hw);
3695 /* ...and read the Link Status Register */
3696 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3697 ixgbe_set_pci_config_data_generic(hw, link);
3700 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
3701 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
3702 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
3703 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
3705 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3706 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3707 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3710 if (bus_info_valid) {
3711 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3712 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3713 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3714 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
3715 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
3717 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3718 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3719 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3720 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
3721 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
3724 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
3727 } /* ixgbe_get_slot_info */
3730 /************************************************************************
3733 * Setup the correct IVAR register for a particular MSI-X interrupt
3734 * (yes this is all very magic and confusing :)
3735 * - entry is the register array entry
3736 * - vector is the MSI-X vector for this queue
3737 * - type is RX/TX/MISC
3738 ************************************************************************/
3740 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3742 struct ixgbe_hw *hw = &adapter->hw;
3745 vector |= IXGBE_IVAR_ALLOC_VAL;
3747 switch (hw->mac.type) {
3749 case ixgbe_mac_82598EB:
3751 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3753 entry += (type * 64);
3754 index = (entry >> 2) & 0x1F;
3755 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3756 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3757 ivar |= (vector << (8 * (entry & 0x3)));
3758 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3761 case ixgbe_mac_82599EB:
3762 case ixgbe_mac_X540:
3763 case ixgbe_mac_X550:
3764 case ixgbe_mac_X550EM_x:
3765 case ixgbe_mac_X550EM_a:
3766 if (type == -1) { /* MISC IVAR */
3767 index = (entry & 1) * 8;
3768 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3769 ivar &= ~(0xFF << index);
3770 ivar |= (vector << index);
3771 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3772 } else { /* RX/TX IVARS */
3773 index = (16 * (entry & 1)) + (8 * type);
3774 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3775 ivar &= ~(0xFF << index);
3776 ivar |= (vector << index);
3777 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3783 } /* ixgbe_set_ivar */
3785 /************************************************************************
3786 * ixgbe_configure_ivars
3787 ************************************************************************/
3789 ixgbe_configure_ivars(struct adapter *adapter)
3791 struct ix_queue *que = adapter->queues;
3794 if (ixgbe_max_interrupt_rate > 0)
3795 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3798 * Disable DMA coalescing if interrupt moderation is
3805 for (int i = 0; i < adapter->num_queues; i++, que++) {
3806 struct rx_ring *rxr = &adapter->rx_rings[i];
3807 struct tx_ring *txr = &adapter->tx_rings[i];
3808 /* First the RX queue entry */
3809 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3810 /* ... and the TX */
3811 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3812 /* Set an Initial EITR value */
3813 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3816 /* For the Link interrupt */
3817 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3818 } /* ixgbe_configure_ivars */
3820 /************************************************************************
3823 * Determine if a port had optics inserted.
3824 ************************************************************************/
3826 ixgbe_sfp_probe(struct adapter *adapter)
3828 struct ixgbe_hw *hw = &adapter->hw;
3829 device_t dev = adapter->dev;
3830 bool result = FALSE;
3832 if ((hw->phy.type == ixgbe_phy_nl) &&
3833 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3834 s32 ret = hw->phy.ops.identify_sfp(hw);
3837 ret = hw->phy.ops.reset(hw);
3838 adapter->sfp_probe = FALSE;
3839 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3840 device_printf(dev, "Unsupported SFP+ module detected!");
3842 "Reload driver with supported module.\n");
3845 device_printf(dev, "SFP+ module detected!\n");
3846 /* We now have supported optics */
3852 } /* ixgbe_sfp_probe */
3854 /************************************************************************
3855 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
3857 * Done outside of interrupt context since the driver might sleep
3858 ************************************************************************/
3860 ixgbe_handle_link(void *context, int pending)
3862 struct adapter *adapter = context;
3863 struct ixgbe_hw *hw = &adapter->hw;
3865 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
3866 ixgbe_update_link_status(adapter);
3868 /* Re-enable link interrupts */
3869 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3870 } /* ixgbe_handle_link */
3872 /************************************************************************
3873 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3874 ************************************************************************/
3876 ixgbe_handle_mod(void *context, int pending)
3878 struct adapter *adapter = context;
3879 struct ixgbe_hw *hw = &adapter->hw;
3880 device_t dev = adapter->dev;
3881 u32 err, cage_full = 0;
3883 if (adapter->hw.need_crosstalk_fix) {
3884 switch (hw->mac.type) {
3885 case ixgbe_mac_82599EB:
3886 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3889 case ixgbe_mac_X550EM_x:
3890 case ixgbe_mac_X550EM_a:
3891 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3902 err = hw->phy.ops.identify_sfp(hw);
3903 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3905 "Unsupported SFP+ module type was detected.\n");
3909 err = hw->mac.ops.setup_sfp(hw);
3910 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3912 "Setup failure - unsupported SFP+ module type.\n");
3915 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3918 } /* ixgbe_handle_mod */
3921 /************************************************************************
3922 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3923 ************************************************************************/
3925 ixgbe_handle_msf(void *context, int pending)
3927 struct adapter *adapter = context;
3928 struct ixgbe_hw *hw = &adapter->hw;
3932 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3933 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3935 autoneg = hw->phy.autoneg_advertised;
3936 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3937 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3938 if (hw->mac.ops.setup_link)
3939 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3941 /* Adjust media types shown in ifconfig */
3942 ifmedia_removeall(&adapter->media);
3943 ixgbe_add_media_types(adapter);
3944 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3945 IXGBE_CORE_UNLOCK(adapter);
3947 } /* ixgbe_handle_msf */
3949 /************************************************************************
3950 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3951 ************************************************************************/
3953 ixgbe_handle_phy(void *context, int pending)
3955 struct adapter *adapter = context;
3956 struct ixgbe_hw *hw = &adapter->hw;
3959 error = hw->phy.ops.handle_lasi(hw);
3960 if (error == IXGBE_ERR_OVERTEMP)
3961 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3963 device_printf(adapter->dev,
3964 "Error handling LASI interrupt: %d\n", error);
3967 } /* ixgbe_handle_phy */
3969 /************************************************************************
3970 * ixgbe_config_dmac - Configure DMA Coalescing
3971 ************************************************************************/
3973 ixgbe_config_dmac(struct adapter *adapter)
3975 struct ixgbe_hw *hw = &adapter->hw;
3976 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3978 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3981 if (dcfg->watchdog_timer ^ adapter->dmac ||
3982 dcfg->link_speed ^ adapter->link_speed) {
3983 dcfg->watchdog_timer = adapter->dmac;
3984 dcfg->fcoe_en = false;
3985 dcfg->link_speed = adapter->link_speed;
3988 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3989 dcfg->watchdog_timer, dcfg->link_speed);
3991 hw->mac.ops.dmac_config(hw);
3993 } /* ixgbe_config_dmac */
3995 /************************************************************************
3996 * ixgbe_check_wol_support
3998 * Checks whether the adapter's ports are capable of
3999 * Wake On LAN by reading the adapter's NVM.
4001 * Sets each port's hw->wol_enabled value depending
4002 * on the value read here.
4003 ************************************************************************/
4005 ixgbe_check_wol_support(struct adapter *adapter)
4007 struct ixgbe_hw *hw = &adapter->hw;
4010 /* Find out WoL support for port */
4011 adapter->wol_support = hw->wol_enabled = 0;
4012 ixgbe_get_device_caps(hw, &dev_caps);
4013 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
4014 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
4016 adapter->wol_support = hw->wol_enabled = 1;
4018 /* Save initial wake up filter configuration */
4019 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
4022 } /* ixgbe_check_wol_support */
4024 /************************************************************************
4025 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
4027 * Prepare the adapter/port for LPLU and/or WoL
4028 ************************************************************************/
4030 ixgbe_setup_low_power_mode(struct adapter *adapter)
4032 struct ixgbe_hw *hw = &adapter->hw;
4033 device_t dev = adapter->dev;
4036 mtx_assert(&adapter->core_mtx, MA_OWNED);
4038 /* Limit power management flow to X550EM baseT */
4039 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
4040 hw->phy.ops.enter_lplu) {
4041 /* Turn off support for APM wakeup. (Using ACPI instead) */
4042 IXGBE_WRITE_REG(hw, IXGBE_GRC,
4043 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
4046 * Clear Wake Up Status register to prevent any previous wakeup
4047 * events from waking us up immediately after we suspend.
4049 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4052 * Program the Wakeup Filter Control register with user filter
4055 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
4057 /* Enable wakeups and power management in Wakeup Control */
4058 IXGBE_WRITE_REG(hw, IXGBE_WUC,
4059 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
4061 /* X550EM baseT adapters need a special LPLU flow */
4062 hw->phy.reset_disable = true;
4063 ixgbe_stop(adapter);
4064 error = hw->phy.ops.enter_lplu(hw);
4066 device_printf(dev, "Error entering LPLU: %d\n", error);
4067 hw->phy.reset_disable = false;
4069 /* Just stop for other adapters */
4070 ixgbe_stop(adapter);
4074 } /* ixgbe_setup_low_power_mode */
4076 /************************************************************************
4077 * ixgbe_update_stats_counters - Update board statistics counters.
4078 ************************************************************************/
4080 ixgbe_update_stats_counters(struct adapter *adapter)
4082 struct ixgbe_hw *hw = &adapter->hw;
4083 struct ixgbe_hw_stats *stats = &adapter->stats_pf;
4084 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4085 u64 total_missed_rx = 0;
4087 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4088 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4089 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4090 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4091 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
4093 for (int i = 0; i < 16; i++) {
4094 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4095 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4096 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4098 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4099 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4100 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4102 /* Hardware workaround, gprc counts missed packets */
4103 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4104 stats->gprc -= missed_rx;
4106 if (hw->mac.type != ixgbe_mac_82598EB) {
4107 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4108 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4109 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4110 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4111 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4112 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4113 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4114 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4116 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4117 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4118 /* 82598 only has a counter in the high register */
4119 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4120 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4121 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4125 * Workaround: mprc hardware is incorrectly counting
4126 * broadcasts, so for now we subtract those.
4128 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4129 stats->bprc += bprc;
4130 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4131 if (hw->mac.type == ixgbe_mac_82598EB)
4132 stats->mprc -= bprc;
4134 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4135 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4136 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4137 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4138 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4139 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4141 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4142 stats->lxontxc += lxon;
4143 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4144 stats->lxofftxc += lxoff;
4145 total = lxon + lxoff;
4147 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4148 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4149 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4150 stats->gptc -= total;
4151 stats->mptc -= total;
4152 stats->ptc64 -= total;
4153 stats->gotc -= total * ETHER_MIN_LEN;
4155 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4156 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4157 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4158 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4159 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4160 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4161 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4162 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4163 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4164 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4165 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4166 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4167 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4168 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4169 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4170 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4171 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4172 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4173 /* Only read FCOE on 82599 */
4174 if (hw->mac.type != ixgbe_mac_82598EB) {
4175 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4176 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4177 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4178 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4179 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4182 /* Fill out the OS statistics structure */
4183 IXGBE_SET_IPACKETS(adapter, stats->gprc);
4184 IXGBE_SET_OPACKETS(adapter, stats->gptc);
4185 IXGBE_SET_IBYTES(adapter, stats->gorc);
4186 IXGBE_SET_OBYTES(adapter, stats->gotc);
4187 IXGBE_SET_IMCASTS(adapter, stats->mprc);
4188 IXGBE_SET_OMCASTS(adapter, stats->mptc);
4189 IXGBE_SET_COLLISIONS(adapter, 0);
4190 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
4191 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
4192 } /* ixgbe_update_stats_counters */
4194 #if __FreeBSD_version >= 1100036
4195 /************************************************************************
4197 ************************************************************************/
4199 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
4201 struct adapter *adapter;
4202 struct tx_ring *txr;
4205 adapter = if_getsoftc(ifp);
4208 case IFCOUNTER_IPACKETS:
4209 return (adapter->ipackets);
4210 case IFCOUNTER_OPACKETS:
4211 return (adapter->opackets);
4212 case IFCOUNTER_IBYTES:
4213 return (adapter->ibytes);
4214 case IFCOUNTER_OBYTES:
4215 return (adapter->obytes);
4216 case IFCOUNTER_IMCASTS:
4217 return (adapter->imcasts);
4218 case IFCOUNTER_OMCASTS:
4219 return (adapter->omcasts);
4220 case IFCOUNTER_COLLISIONS:
4222 case IFCOUNTER_IQDROPS:
4223 return (adapter->iqdrops);
4224 case IFCOUNTER_OQDROPS:
4226 txr = adapter->tx_rings;
4227 for (int i = 0; i < adapter->num_queues; i++, txr++)
4228 rv += txr->br->br_drops;
4230 case IFCOUNTER_IERRORS:
4231 return (adapter->ierrors);
4233 return (if_get_counter_default(ifp, cnt));
4235 } /* ixgbe_get_counter */
4238 /************************************************************************
4239 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
4241 * Retrieves the TDH value from the hardware
4242 ************************************************************************/
4244 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4246 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4253 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4254 error = sysctl_handle_int(oidp, &val, 0, req);
4255 if (error || !req->newptr)
4259 } /* ixgbe_sysctl_tdh_handler */
4261 /************************************************************************
4262 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
4264 * Retrieves the TDT value from the hardware
4265 ************************************************************************/
4267 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4269 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4276 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4277 error = sysctl_handle_int(oidp, &val, 0, req);
4278 if (error || !req->newptr)
4282 } /* ixgbe_sysctl_tdt_handler */
4284 /************************************************************************
4285 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
4287 * Retrieves the RDH value from the hardware
4288 ************************************************************************/
4290 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4292 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4299 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4300 error = sysctl_handle_int(oidp, &val, 0, req);
4301 if (error || !req->newptr)
4305 } /* ixgbe_sysctl_rdh_handler */
4307 /************************************************************************
4308 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
4310 * Retrieves the RDT value from the hardware
4311 ************************************************************************/
4313 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4315 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4322 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4323 error = sysctl_handle_int(oidp, &val, 0, req);
4324 if (error || !req->newptr)
4328 } /* ixgbe_sysctl_rdt_handler */
4330 /************************************************************************
4331 * ixgbe_sysctl_interrupt_rate_handler
4332 ************************************************************************/
4334 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4336 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4338 unsigned int reg, usec, rate;
4340 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4341 usec = ((reg & 0x0FF8) >> 3);
4343 rate = 500000 / usec;
4346 error = sysctl_handle_int(oidp, &rate, 0, req);
4347 if (error || !req->newptr)
4349 reg &= ~0xfff; /* default, no limitation */
4350 ixgbe_max_interrupt_rate = 0;
4351 if (rate > 0 && rate < 500000) {
4354 ixgbe_max_interrupt_rate = rate;
4355 reg |= ((4000000/rate) & 0xff8);
4357 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4360 } /* ixgbe_sysctl_interrupt_rate_handler */
4362 /************************************************************************
4363 * ixgbe_add_device_sysctls
4364 ************************************************************************/
4366 ixgbe_add_device_sysctls(struct adapter *adapter)
4368 device_t dev = adapter->dev;
4369 struct ixgbe_hw *hw = &adapter->hw;
4370 struct sysctl_oid_list *child;
4371 struct sysctl_ctx_list *ctx;
4373 ctx = device_get_sysctl_ctx(dev);
4374 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4376 /* Sysctls for all devices */
4377 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4378 adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4380 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
4381 &ixgbe_enable_aim, 1, "Interrupt Moderation");
4383 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4384 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
4385 IXGBE_SYSCTL_DESC_ADV_SPEED);
4388 /* testing sysctls (for all devices) */
4389 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
4390 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
4391 "I", "PCI Power State");
4393 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
4394 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
4395 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
4397 /* for X550 series devices */
4398 if (hw->mac.type >= ixgbe_mac_X550)
4399 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4400 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
4401 "I", "DMA Coalesce");
4403 /* for WoL-capable devices */
4404 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4405 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4406 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4407 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
4409 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4410 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
4411 "I", "Enable/Disable Wake Up Filters");
4414 /* for X552/X557-AT devices */
4415 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4416 struct sysctl_oid *phy_node;
4417 struct sysctl_oid_list *phy_list;
4419 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4420 CTLFLAG_RD, NULL, "External PHY sysctls");
4421 phy_list = SYSCTL_CHILDREN(phy_node);
4423 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4424 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
4425 "I", "Current External PHY Temperature (Celsius)");
4427 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4428 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4429 ixgbe_sysctl_phy_overtemp_occurred, "I",
4430 "External PHY High Temperature Event Occurred");
4433 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
4434 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
4435 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4436 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
4438 } /* ixgbe_add_device_sysctls */
4440 /************************************************************************
4441 * ixgbe_add_hw_stats
4443 * Add sysctl variables, one per statistic, to the system.
4444 ************************************************************************/
4446 ixgbe_add_hw_stats(struct adapter *adapter)
4448 device_t dev = adapter->dev;
4449 struct tx_ring *txr = adapter->tx_rings;
4450 struct rx_ring *rxr = adapter->rx_rings;
4451 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4452 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4453 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4454 struct ixgbe_hw_stats *stats = &adapter->stats_pf;
4455 struct sysctl_oid *stat_node, *queue_node;
4456 struct sysctl_oid_list *stat_list, *queue_list;
4458 #define QUEUE_NAME_LEN 32
4459 char namebuf[QUEUE_NAME_LEN];
4461 /* Driver Statistics */
4462 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4463 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
4464 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4465 CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
4466 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4467 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
4468 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4469 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
4471 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4472 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4473 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4474 CTLFLAG_RD, NULL, "Queue Name");
4475 queue_list = SYSCTL_CHILDREN(queue_node);
4477 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4478 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4479 sizeof(&adapter->queues[i]),
4480 ixgbe_sysctl_interrupt_rate_handler, "IU",
4482 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4483 CTLFLAG_RD, &(adapter->queues[i].irqs),
4484 "irqs on this queue");
4485 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4486 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4487 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
4488 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4489 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4490 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
4491 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4492 CTLFLAG_RD, &txr->tso_tx, "TSO");
4493 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4494 CTLFLAG_RD, &txr->no_tx_dma_setup,
4495 "Driver tx dma failure in xmit");
4496 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4497 CTLFLAG_RD, &txr->no_desc_avail,
4498 "Queue No Descriptor Available");
4499 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4500 CTLFLAG_RD, &txr->total_packets,
4501 "Queue Packets Transmitted");
4502 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4503 CTLFLAG_RD, &txr->br->br_drops,
4504 "Packets dropped in buf_ring");
4507 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4508 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4509 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4510 CTLFLAG_RD, NULL, "Queue Name");
4511 queue_list = SYSCTL_CHILDREN(queue_node);
4513 struct lro_ctrl *lro = &rxr->lro;
4515 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4516 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4517 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
4518 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4519 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4520 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
4521 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4522 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
4523 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4524 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
4525 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4526 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
4527 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
4528 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
4529 #if __FreeBSD_version < 1100000
4530 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4531 CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
4532 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4533 CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
4535 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
4536 CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
4537 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
4538 CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
4542 /* MAC stats get their own sub node */
4544 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4545 CTLFLAG_RD, NULL, "MAC Statistics");
4546 stat_list = SYSCTL_CHILDREN(stat_node);
4548 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4549 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
4550 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4551 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
4552 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4553 CTLFLAG_RD, &stats->errbc, "Byte Errors");
4554 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4555 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
4556 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4557 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
4558 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4559 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
4560 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4561 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
4562 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
4563 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
4565 /* Flow Control stats */
4566 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4567 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
4568 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4569 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
4570 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4571 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
4572 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4573 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
4575 /* Packet Reception Stats */
4576 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4577 CTLFLAG_RD, &stats->tor, "Total Octets Received");
4578 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4579 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
4580 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4581 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
4582 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4583 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
4584 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4585 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
4586 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4587 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
4588 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4589 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
4590 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4591 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
4592 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4593 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
4594 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4595 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
4596 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4597 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
4598 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4599 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
4600 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4601 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
4602 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4603 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
4604 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4605 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
4606 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4607 CTLFLAG_RD, &stats->rjc, "Received Jabber");
4608 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4609 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
4610 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4611 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
4612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4613 CTLFLAG_RD, &stats->xec, "Checksum Errors");
4615 /* Packet Transmission Stats */
4616 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4617 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
4618 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4619 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
4620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4621 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
4622 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4623 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
4624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4625 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
4626 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4627 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
4628 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4629 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
4630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4631 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
4632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4633 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
4634 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4635 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
4636 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4637 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
4638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4639 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
4640 } /* ixgbe_add_hw_stats */
4642 /************************************************************************
4643 * ixgbe_set_sysctl_value
4644 ************************************************************************/
4646 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4647 const char *description, int *limit, int value)
4650 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4651 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4652 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4653 } /* ixgbe_set_sysctl_value */
4655 /************************************************************************
4656 * ixgbe_sysctl_flowcntl
4658 * SYSCTL wrapper around setting Flow Control
4659 ************************************************************************/
4661 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4663 struct adapter *adapter;
4666 adapter = (struct adapter *)arg1;
4667 fc = adapter->hw.fc.current_mode;
4669 error = sysctl_handle_int(oidp, &fc, 0, req);
4670 if ((error) || (req->newptr == NULL))
4673 /* Don't bother if it's not changed */
4674 if (fc == adapter->hw.fc.current_mode)
4677 return ixgbe_set_flowcntl(adapter, fc);
4678 } /* ixgbe_sysctl_flowcntl */
4680 /************************************************************************
4681 * ixgbe_set_flowcntl - Set flow control
4683 * Flow control values:
4688 ************************************************************************/
4690 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4693 case ixgbe_fc_rx_pause:
4694 case ixgbe_fc_tx_pause:
4696 adapter->hw.fc.requested_mode = fc;
4697 if (adapter->num_queues > 1)
4698 ixgbe_disable_rx_drop(adapter);
4701 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4702 if (adapter->num_queues > 1)
4703 ixgbe_enable_rx_drop(adapter);
4709 /* Don't autoneg if forcing a value */
4710 adapter->hw.fc.disable_fc_autoneg = TRUE;
4711 ixgbe_fc_enable(&adapter->hw);
4714 } /* ixgbe_set_flowcntl */
4716 /************************************************************************
4717 * ixgbe_sysctl_advertise
4719 * SYSCTL wrapper around setting advertised speed
4720 ************************************************************************/
4722 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4724 struct adapter *adapter;
4725 int error, advertise;
4727 adapter = (struct adapter *)arg1;
4728 advertise = adapter->advertise;
4730 error = sysctl_handle_int(oidp, &advertise, 0, req);
4731 if ((error) || (req->newptr == NULL))
4734 return ixgbe_set_advertise(adapter, advertise);
4735 } /* ixgbe_sysctl_advertise */
4737 /************************************************************************
4738 * ixgbe_set_advertise - Control advertised link speed
4741 * 0x1 - advertise 100 Mb
4742 * 0x2 - advertise 1G
4743 * 0x4 - advertise 10G
4744 * 0x8 - advertise 10 Mb (yes, Mb)
4745 ************************************************************************/
4747 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4750 struct ixgbe_hw *hw;
4751 ixgbe_link_speed speed = 0;
4752 ixgbe_link_speed link_caps = 0;
4753 s32 err = IXGBE_NOT_IMPLEMENTED;
4754 bool negotiate = FALSE;
4756 /* Checks to validate new value */
4757 if (adapter->advertise == advertise) /* no change */
4763 /* No speed changes for backplane media */
4764 if (hw->phy.media_type == ixgbe_media_type_backplane)
4767 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4768 (hw->phy.multispeed_fiber))) {
4769 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4773 if (advertise < 0x1 || advertise > 0xF) {
4774 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4778 if (hw->mac.ops.get_link_capabilities) {
4779 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4781 if (err != IXGBE_SUCCESS) {
4782 device_printf(dev, "Unable to determine supported advertise speeds\n");
4787 /* Set new value and report new advertised mode */
4788 if (advertise & 0x1) {
4789 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4790 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4793 speed |= IXGBE_LINK_SPEED_100_FULL;
4795 if (advertise & 0x2) {
4796 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4797 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4800 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4802 if (advertise & 0x4) {
4803 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4804 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4807 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4809 if (advertise & 0x8) {
4810 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4811 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4814 speed |= IXGBE_LINK_SPEED_10_FULL;
4817 hw->mac.autotry_restart = TRUE;
4818 hw->mac.ops.setup_link(hw, speed, TRUE);
4819 adapter->advertise = advertise;
4822 } /* ixgbe_set_advertise */
4824 /************************************************************************
4825 * ixgbe_get_advertise - Get current advertised speed settings
4827 * Formatted for sysctl usage.
4829 * 0x1 - advertise 100 Mb
4830 * 0x2 - advertise 1G
4831 * 0x4 - advertise 10G
4832 * 0x8 - advertise 10 Mb (yes, Mb)
4833 ************************************************************************/
4835 ixgbe_get_advertise(struct adapter *adapter)
4837 struct ixgbe_hw *hw = &adapter->hw;
4839 ixgbe_link_speed link_caps = 0;
4841 bool negotiate = FALSE;
4844 * Advertised speed means nothing unless it's copper or
4847 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4848 !(hw->phy.multispeed_fiber))
4851 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4852 if (err != IXGBE_SUCCESS)
4856 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4857 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
4858 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
4859 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
4862 } /* ixgbe_get_advertise */
4864 /************************************************************************
4865 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4867 * For X552/X557-AT devices using an external PHY
4868 ************************************************************************/
4870 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4872 struct adapter *adapter = (struct adapter *)arg1;
4873 struct ixgbe_hw *hw = &adapter->hw;
4876 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4877 device_printf(adapter->dev,
4878 "Device has no supported external thermal sensor.\n");
4882 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4883 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4884 device_printf(adapter->dev,
4885 "Error reading from PHY's current temperature register\n");
4889 /* Shift temp for output */
4892 return (sysctl_handle_int(oidp, NULL, reg, req));
4893 } /* ixgbe_sysctl_phy_temp */
4895 /************************************************************************
4896 * ixgbe_sysctl_phy_overtemp_occurred
4898 * Reports (directly from the PHY) whether the current PHY
4899 * temperature is over the overtemp threshold.
4900 ************************************************************************/
4902 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4904 struct adapter *adapter = (struct adapter *)arg1;
4905 struct ixgbe_hw *hw = &adapter->hw;
4908 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4909 device_printf(adapter->dev,
4910 "Device has no supported external thermal sensor.\n");
4914 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4915 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4916 device_printf(adapter->dev,
4917 "Error reading from PHY's temperature status register\n");
4921 /* Get occurrence bit */
4922 reg = !!(reg & 0x4000);
4924 return (sysctl_handle_int(oidp, 0, reg, req));
4925 } /* ixgbe_sysctl_phy_overtemp_occurred */
4927 /************************************************************************
4928 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4931 * 0/1 - off / on (use default value of 1000)
4933 * Legal timer values are:
4934 * 50,100,250,500,1000,2000,5000,10000
4936 * Turning off interrupt moderation will also turn this off.
4937 ************************************************************************/
4939 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4941 struct adapter *adapter = (struct adapter *)arg1;
4942 struct ifnet *ifp = adapter->ifp;
4946 newval = adapter->dmac;
4947 error = sysctl_handle_int(oidp, &newval, 0, req);
4948 if ((error) || (req->newptr == NULL))
4957 /* Enable and use default */
4958 adapter->dmac = 1000;
4968 /* Legal values - allow */
4969 adapter->dmac = newval;
4972 /* Do nothing, illegal value */
4976 /* Re-initialize hardware if it's already running */
4977 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4978 ixgbe_init(adapter);
4981 } /* ixgbe_sysctl_dmac */
4984 /************************************************************************
4985 * ixgbe_sysctl_power_state
4987 * Sysctl to test power states
4989 * 0 - set device to D0
4990 * 3 - set device to D3
4991 * (none) - get current device power state
4992 ************************************************************************/
4994 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4996 struct adapter *adapter = (struct adapter *)arg1;
4997 device_t dev = adapter->dev;
4998 int curr_ps, new_ps, error = 0;
5000 curr_ps = new_ps = pci_get_powerstate(dev);
5002 error = sysctl_handle_int(oidp, &new_ps, 0, req);
5003 if ((error) || (req->newptr == NULL))
5006 if (new_ps == curr_ps)
5009 if (new_ps == 3 && curr_ps == 0)
5010 error = DEVICE_SUSPEND(dev);
5011 else if (new_ps == 0 && curr_ps == 3)
5012 error = DEVICE_RESUME(dev);
5016 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5019 } /* ixgbe_sysctl_power_state */
5022 /************************************************************************
5023 * ixgbe_sysctl_eee_state
5025 * Sysctl to set EEE power saving feature
5029 * (none) - get current device EEE state
5030 ************************************************************************/
5032 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
5034 struct adapter *adapter = (struct adapter *)arg1;
5035 device_t dev = adapter->dev;
5036 int curr_eee, new_eee, error = 0;
5039 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5041 error = sysctl_handle_int(oidp, &new_eee, 0, req);
5042 if ((error) || (req->newptr == NULL))
5046 if (new_eee == curr_eee)
5050 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5053 /* Bounds checking */
5054 if ((new_eee < 0) || (new_eee > 1))
5057 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5059 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5063 /* Restart auto-neg */
5064 ixgbe_init(adapter);
5066 device_printf(dev, "New EEE state: %d\n", new_eee);
5068 /* Cache new value */
5070 adapter->feat_en |= IXGBE_FEATURE_EEE;
5072 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5075 } /* ixgbe_sysctl_eee_state */
5077 /************************************************************************
5078 * ixgbe_sysctl_wol_enable
5080 * Sysctl to enable/disable the WoL capability,
5081 * if supported by the adapter.
5086 ************************************************************************/
5088 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
5090 struct adapter *adapter = (struct adapter *)arg1;
5091 struct ixgbe_hw *hw = &adapter->hw;
5092 int new_wol_enabled;
5095 new_wol_enabled = hw->wol_enabled;
5096 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
5097 if ((error) || (req->newptr == NULL))
5099 new_wol_enabled = !!(new_wol_enabled);
5100 if (new_wol_enabled == hw->wol_enabled)
5103 if (new_wol_enabled > 0 && !adapter->wol_support)
5106 hw->wol_enabled = new_wol_enabled;
5109 } /* ixgbe_sysctl_wol_enable */
5111 /************************************************************************
5112 * ixgbe_sysctl_wufc - Wake Up Filter Control
5114 * Sysctl to enable/disable the types of packets that the
5115 * adapter will wake up on upon receipt.
5117 * 0x1 - Link Status Change
5118 * 0x2 - Magic Packet
5119 * 0x4 - Direct Exact
5120 * 0x8 - Directed Multicast
5122 * 0x20 - ARP/IPv4 Request Packet
5123 * 0x40 - Direct IPv4 Packet
5124 * 0x80 - Direct IPv6 Packet
5126 * Settings not listed above will cause the sysctl to return an error.
5127 ************************************************************************/
5129 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5131 struct adapter *adapter = (struct adapter *)arg1;
5135 new_wufc = adapter->wufc;
5137 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
5138 if ((error) || (req->newptr == NULL))
5140 if (new_wufc == adapter->wufc)
5143 if (new_wufc & 0xffffff00)
5147 new_wufc |= (0xffffff & adapter->wufc);
5148 adapter->wufc = new_wufc;
5151 } /* ixgbe_sysctl_wufc */
5154 /************************************************************************
5155 * ixgbe_sysctl_print_rss_config
5156 ************************************************************************/
5158 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5160 struct adapter *adapter = (struct adapter *)arg1;
5161 struct ixgbe_hw *hw = &adapter->hw;
5162 device_t dev = adapter->dev;
5164 int error = 0, reta_size;
5167 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5169 device_printf(dev, "Could not allocate sbuf for output.\n");
5173 // TODO: use sbufs to make a string to print out
5174 /* Set multiplier for RETA setup and table size based on MAC */
5175 switch (adapter->hw.mac.type) {
5176 case ixgbe_mac_X550:
5177 case ixgbe_mac_X550EM_x:
5178 case ixgbe_mac_X550EM_a:
5186 /* Print out the redirection table */
5187 sbuf_cat(buf, "\n");
5188 for (int i = 0; i < reta_size; i++) {
5190 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5191 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5193 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5194 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5198 // TODO: print more config
5200 error = sbuf_finish(buf);
5202 device_printf(dev, "Error finishing sbuf: %d\n", error);
5207 } /* ixgbe_sysctl_print_rss_config */
5208 #endif /* IXGBE_DEBUG */
5210 /************************************************************************
5211 * ixgbe_enable_rx_drop
5213 * Enable the hardware to drop packets when the buffer is
5214 * full. This is useful with multiqueue, so that no single
5215 * queue being full stalls the entire RX engine. We only
5216 * enable this when Multiqueue is enabled AND Flow Control
5218 ************************************************************************/
5220 ixgbe_enable_rx_drop(struct adapter *adapter)
5222 struct ixgbe_hw *hw = &adapter->hw;
5223 struct rx_ring *rxr;
5226 for (int i = 0; i < adapter->num_queues; i++) {
5227 rxr = &adapter->rx_rings[i];
5228 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5229 srrctl |= IXGBE_SRRCTL_DROP_EN;
5230 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5233 /* enable drop for each vf */
5234 for (int i = 0; i < adapter->num_vfs; i++) {
5235 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5236 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5239 } /* ixgbe_enable_rx_drop */
5241 /************************************************************************
5242 * ixgbe_disable_rx_drop
5243 ************************************************************************/
5245 ixgbe_disable_rx_drop(struct adapter *adapter)
5247 struct ixgbe_hw *hw = &adapter->hw;
5248 struct rx_ring *rxr;
5251 for (int i = 0; i < adapter->num_queues; i++) {
5252 rxr = &adapter->rx_rings[i];
5253 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5254 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5255 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5258 /* disable drop for each vf */
5259 for (int i = 0; i < adapter->num_vfs; i++) {
5260 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5261 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5263 } /* ixgbe_disable_rx_drop */
5265 /************************************************************************
5266 * ixgbe_rearm_queues
5267 ************************************************************************/
5269 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5273 switch (adapter->hw.mac.type) {
5274 case ixgbe_mac_82598EB:
5275 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5276 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5278 case ixgbe_mac_82599EB:
5279 case ixgbe_mac_X540:
5280 case ixgbe_mac_X550:
5281 case ixgbe_mac_X550EM_x:
5282 case ixgbe_mac_X550EM_a:
5283 mask = (queues & 0xFFFFFFFF);
5284 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5285 mask = (queues >> 32);
5286 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5291 } /* ixgbe_rearm_queues */