1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
40 #include "ixgbe_sriov.h"
43 #include <net/netmap.h>
44 #include <dev/netmap/netmap_kern.h>
46 /************************************************************************
48 ************************************************************************/
49 char ixgbe_driver_version[] = "4.0.1-k";
51 /************************************************************************
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixgbe_strings
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 ************************************************************************/
60 static pci_vendor_info_t ixgbe_vendor_info_array[] =
62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"),
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"),
70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"),
73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"),
75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"),
77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"),
79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"),
81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"),
85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"),
87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"),
91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"),
93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
106 /* required last entry */
110 static void *ixgbe_register(device_t);
111 static int ixgbe_if_attach_pre(if_ctx_t);
112 static int ixgbe_if_attach_post(if_ctx_t);
113 static int ixgbe_if_detach(if_ctx_t);
114 static int ixgbe_if_shutdown(if_ctx_t);
115 static int ixgbe_if_suspend(if_ctx_t);
116 static int ixgbe_if_resume(if_ctx_t);
118 static void ixgbe_if_stop(if_ctx_t);
119 void ixgbe_if_enable_intr(if_ctx_t);
120 static void ixgbe_if_disable_intr(if_ctx_t);
121 static void ixgbe_link_intr_enable(if_ctx_t);
122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
124 static int ixgbe_if_media_change(if_ctx_t);
125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t);
127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
128 static void ixgbe_if_multi_set(if_ctx_t);
129 static int ixgbe_if_promisc_set(if_ctx_t, int);
130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
132 static void ixgbe_if_queues_free(if_ctx_t);
133 static void ixgbe_if_timer(if_ctx_t, uint16_t);
134 static void ixgbe_if_update_admin_status(if_ctx_t);
135 static void ixgbe_if_vlan_register(if_ctx_t, u16);
136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
139 int ixgbe_intr(void *);
141 /************************************************************************
142 * Function prototypes
143 ************************************************************************/
144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
148 static void ixgbe_add_device_sysctls(if_ctx_t);
149 static int ixgbe_allocate_pci_resources(if_ctx_t);
150 static int ixgbe_setup_low_power_mode(if_ctx_t);
152 static void ixgbe_config_dmac(struct ixgbe_softc *);
153 static void ixgbe_configure_ivars(struct ixgbe_softc *);
154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
156 static bool ixgbe_sfp_probe(if_ctx_t);
158 static void ixgbe_free_pci_resources(if_ctx_t);
160 static int ixgbe_msix_link(void *);
161 static int ixgbe_msix_que(void *);
162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
163 static void ixgbe_initialize_receive_units(if_ctx_t);
164 static void ixgbe_initialize_transmit_units(if_ctx_t);
166 static int ixgbe_setup_interface(if_ctx_t);
167 static void ixgbe_init_device_features(struct ixgbe_softc *);
168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
170 static void ixgbe_print_fw_version(if_ctx_t);
171 static void ixgbe_add_media_types(if_ctx_t);
172 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
173 static void ixgbe_config_link(if_ctx_t);
174 static void ixgbe_get_slot_info(struct ixgbe_softc *);
175 static void ixgbe_check_wol_support(struct ixgbe_softc *);
176 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
177 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
179 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
180 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
181 static int ixgbe_set_advertise(struct ixgbe_softc *, int);
182 static int ixgbe_get_default_advertise(struct ixgbe_softc *);
183 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
184 static void ixgbe_config_gpie(struct ixgbe_softc *);
185 static void ixgbe_config_delay_values(struct ixgbe_softc *);
187 /* Sysctl handlers */
188 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
189 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
191 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
192 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
193 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
194 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
196 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
197 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
199 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
200 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
201 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
202 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
203 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
204 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
205 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
207 /* Deferred interrupt tasklets */
208 static void ixgbe_handle_msf(void *);
209 static void ixgbe_handle_mod(void *);
210 static void ixgbe_handle_phy(void *);
212 /************************************************************************
213 * FreeBSD Device Interface Entry Points
214 ************************************************************************/
215 static device_method_t ix_methods[] = {
216 /* Device interface */
217 DEVMETHOD(device_register, ixgbe_register),
218 DEVMETHOD(device_probe, iflib_device_probe),
219 DEVMETHOD(device_attach, iflib_device_attach),
220 DEVMETHOD(device_detach, iflib_device_detach),
221 DEVMETHOD(device_shutdown, iflib_device_shutdown),
222 DEVMETHOD(device_suspend, iflib_device_suspend),
223 DEVMETHOD(device_resume, iflib_device_resume),
225 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
226 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
227 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
232 static driver_t ix_driver = {
233 "ix", ix_methods, sizeof(struct ixgbe_softc),
236 DRIVER_MODULE(ix, pci, ix_driver, 0, 0);
237 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
238 MODULE_DEPEND(ix, pci, 1, 1, 1);
239 MODULE_DEPEND(ix, ether, 1, 1, 1);
240 MODULE_DEPEND(ix, iflib, 1, 1, 1);
242 static device_method_t ixgbe_if_methods[] = {
243 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
244 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
245 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
246 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
247 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
248 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
249 DEVMETHOD(ifdi_init, ixgbe_if_init),
250 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
251 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
252 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
253 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
254 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
255 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
256 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
257 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
258 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
259 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
260 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
261 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
262 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
263 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
264 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
265 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
266 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
267 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
268 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
269 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
270 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
271 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
272 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
274 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
275 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
276 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
282 * TUNEABLE PARAMETERS:
285 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
286 "IXGBE driver parameters");
287 static driver_t ixgbe_if_driver = {
288 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
291 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
292 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
293 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
295 /* Flow control setting, default to full */
296 static int ixgbe_flow_control = ixgbe_fc_full;
297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
298 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
300 /* Advertise Speed, default to 0 (auto) */
301 static int ixgbe_advertise_speed = 0;
302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
306 * Smart speed setting, default to on
307 * this only works as a compile option
308 * right now as its during attach, set
309 * this to 'ixgbe_smart_speed_off' to
312 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
315 * MSI-X should be the default for best performance,
316 * but this allows it to be forced off for testing.
318 static int ixgbe_enable_msix = 1;
319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
320 "Enable MSI-X interrupts");
323 * Defining this on will allow the use
324 * of unsupported SFP+ modules, note that
325 * doing so you are on your own :)
327 static int allow_unsupported_sfp = false;
328 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
329 &allow_unsupported_sfp, 0,
330 "Allow unsupported SFP modules...use at your own risk");
333 * Not sure if Flow Director is fully baked,
334 * so we'll default to turning it off.
336 static int ixgbe_enable_fdir = 0;
337 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
338 "Enable Flow Director");
340 /* Receive-Side Scaling */
341 static int ixgbe_enable_rss = 1;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
343 "Enable Receive-Side Scaling (RSS)");
346 * AIM: Adaptive Interrupt Moderation
347 * which means that the interrupt rate
348 * is varied over time based on the
349 * traffic for that interrupt vector
351 static int ixgbe_enable_aim = false;
352 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
353 "Enable adaptive interrupt moderation");
356 /* Keep running tab on them for sanity check */
357 static int ixgbe_total_ports;
360 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
363 * For Flow Director: this is the number of TX packets we sample
364 * for the filter pool, this means every 20th packet will be probed.
366 * This feature can be disabled by setting this to 0.
368 static int atr_sample_rate = 20;
370 extern struct if_txrx ixgbe_txrx;
372 static struct if_shared_ctx ixgbe_sctx_init = {
373 .isc_magic = IFLIB_MAGIC,
374 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
375 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
376 .isc_tx_maxsegsize = PAGE_SIZE,
377 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
378 .isc_tso_maxsegsize = PAGE_SIZE,
379 .isc_rx_maxsize = PAGE_SIZE*4,
380 .isc_rx_nsegments = 1,
381 .isc_rx_maxsegsize = PAGE_SIZE*4,
386 .isc_admin_intrcnt = 1,
387 .isc_vendor_info = ixgbe_vendor_info_array,
388 .isc_driver_version = ixgbe_driver_version,
389 .isc_driver = &ixgbe_if_driver,
390 .isc_flags = IFLIB_TSO_INIT_IP,
392 .isc_nrxd_min = {MIN_RXD},
393 .isc_ntxd_min = {MIN_TXD},
394 .isc_nrxd_max = {MAX_RXD},
395 .isc_ntxd_max = {MAX_TXD},
396 .isc_nrxd_default = {DEFAULT_RXD},
397 .isc_ntxd_default = {DEFAULT_TXD},
400 /************************************************************************
401 * ixgbe_if_tx_queues_alloc
402 ************************************************************************/
404 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
405 int ntxqs, int ntxqsets)
407 struct ixgbe_softc *sc = iflib_get_softc(ctx);
408 if_softc_ctx_t scctx = sc->shared;
409 struct ix_tx_queue *que;
412 MPASS(sc->num_tx_queues > 0);
413 MPASS(sc->num_tx_queues == ntxqsets);
416 /* Allocate queue structure memory */
418 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
419 M_IXGBE, M_NOWAIT | M_ZERO);
420 if (!sc->tx_queues) {
421 device_printf(iflib_get_dev(ctx),
422 "Unable to allocate TX ring memory\n");
426 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
427 struct tx_ring *txr = &que->txr;
429 /* In case SR-IOV is enabled, align the index properly */
430 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
433 txr->sc = que->sc = sc;
435 /* Allocate report status array */
436 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
437 if (txr->tx_rsq == NULL) {
441 for (j = 0; j < scctx->isc_ntxd[0]; j++)
442 txr->tx_rsq[j] = QIDX_INVALID;
443 /* get the virtual and physical address of the hardware queues */
444 txr->tail = IXGBE_TDT(txr->me);
445 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
446 txr->tx_paddr = paddrs[i];
449 txr->total_packets = 0;
451 /* Set the rate at which we sample packets */
452 if (sc->feat_en & IXGBE_FEATURE_FDIR)
453 txr->atr_sample = atr_sample_rate;
457 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
463 ixgbe_if_queues_free(ctx);
466 } /* ixgbe_if_tx_queues_alloc */
468 /************************************************************************
469 * ixgbe_if_rx_queues_alloc
470 ************************************************************************/
472 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
473 int nrxqs, int nrxqsets)
475 struct ixgbe_softc *sc = iflib_get_softc(ctx);
476 struct ix_rx_queue *que;
479 MPASS(sc->num_rx_queues > 0);
480 MPASS(sc->num_rx_queues == nrxqsets);
483 /* Allocate queue structure memory */
485 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
486 M_IXGBE, M_NOWAIT | M_ZERO);
487 if (!sc->rx_queues) {
488 device_printf(iflib_get_dev(ctx),
489 "Unable to allocate TX ring memory\n");
493 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
494 struct rx_ring *rxr = &que->rxr;
496 /* In case SR-IOV is enabled, align the index properly */
497 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
500 rxr->sc = que->sc = sc;
502 /* get the virtual and physical address of the hw queues */
503 rxr->tail = IXGBE_RDT(rxr->me);
504 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
505 rxr->rx_paddr = paddrs[i];
510 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
514 } /* ixgbe_if_rx_queues_alloc */
516 /************************************************************************
517 * ixgbe_if_queues_free
518 ************************************************************************/
520 ixgbe_if_queues_free(if_ctx_t ctx)
522 struct ixgbe_softc *sc = iflib_get_softc(ctx);
523 struct ix_tx_queue *tx_que = sc->tx_queues;
524 struct ix_rx_queue *rx_que = sc->rx_queues;
527 if (tx_que != NULL) {
528 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
529 struct tx_ring *txr = &tx_que->txr;
530 if (txr->tx_rsq == NULL)
533 free(txr->tx_rsq, M_IXGBE);
537 free(sc->tx_queues, M_IXGBE);
538 sc->tx_queues = NULL;
540 if (rx_que != NULL) {
541 free(sc->rx_queues, M_IXGBE);
542 sc->rx_queues = NULL;
544 } /* ixgbe_if_queues_free */
546 /************************************************************************
547 * ixgbe_initialize_rss_mapping
548 ************************************************************************/
550 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
552 struct ixgbe_hw *hw = &sc->hw;
553 u32 reta = 0, mrqc, rss_key[10];
554 int queue_id, table_size, index_mult;
558 if (sc->feat_en & IXGBE_FEATURE_RSS) {
559 /* Fetch the configured RSS key */
560 rss_getkey((uint8_t *)&rss_key);
562 /* set up random bits */
563 arc4rand(&rss_key, sizeof(rss_key), 0);
566 /* Set multiplier for RETA setup and table size based on MAC */
569 switch (sc->hw.mac.type) {
570 case ixgbe_mac_82598EB:
574 case ixgbe_mac_X550EM_x:
575 case ixgbe_mac_X550EM_a:
582 /* Set up the redirection table */
583 for (i = 0, j = 0; i < table_size; i++, j++) {
584 if (j == sc->num_rx_queues)
587 if (sc->feat_en & IXGBE_FEATURE_RSS) {
589 * Fetch the RSS bucket id for the given indirection
590 * entry. Cap it at the number of configured buckets
591 * (which is num_rx_queues.)
593 queue_id = rss_get_indirection_to_bucket(i);
594 queue_id = queue_id % sc->num_rx_queues;
596 queue_id = (j * index_mult);
599 * The low 8 bits are for hash value (n+0);
600 * The next 8 bits are for hash value (n+1), etc.
603 reta = reta | (((uint32_t)queue_id) << 24);
606 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
608 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
614 /* Now fill our hash function seeds */
615 for (i = 0; i < 10; i++)
616 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
618 /* Perform hash on these packet types */
619 if (sc->feat_en & IXGBE_FEATURE_RSS)
620 rss_hash_config = rss_gethashconfig();
623 * Disable UDP - IP fragments aren't currently being handled
624 * and so we end up with a mix of 2-tuple and 4-tuple
627 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
628 | RSS_HASHTYPE_RSS_TCP_IPV4
629 | RSS_HASHTYPE_RSS_IPV6
630 | RSS_HASHTYPE_RSS_TCP_IPV6
631 | RSS_HASHTYPE_RSS_IPV6_EX
632 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
635 mrqc = IXGBE_MRQC_RSSEN;
636 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
637 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
638 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
639 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
640 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
641 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
642 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
643 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
644 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
645 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
646 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
647 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
648 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
649 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
650 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
651 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
652 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
653 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
654 mrqc |= ixgbe_get_mrqc(sc->iov_mode);
655 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
656 } /* ixgbe_initialize_rss_mapping */
658 /************************************************************************
659 * ixgbe_initialize_receive_units - Setup receive registers and features.
660 ************************************************************************/
661 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
664 ixgbe_initialize_receive_units(if_ctx_t ctx)
666 struct ixgbe_softc *sc = iflib_get_softc(ctx);
667 if_softc_ctx_t scctx = sc->shared;
668 struct ixgbe_hw *hw = &sc->hw;
669 struct ifnet *ifp = iflib_get_ifp(ctx);
670 struct ix_rx_queue *que;
672 u32 bufsz, fctrl, srrctl, rxcsum;
676 * Make sure receives are disabled while
677 * setting up the descriptor ring
679 ixgbe_disable_rx(hw);
681 /* Enable broadcasts */
682 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
683 fctrl |= IXGBE_FCTRL_BAM;
684 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
685 fctrl |= IXGBE_FCTRL_DPF;
686 fctrl |= IXGBE_FCTRL_PMCF;
688 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
690 /* Set for Jumbo Frames? */
691 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
692 if (ifp->if_mtu > ETHERMTU)
693 hlreg |= IXGBE_HLREG0_JUMBOEN;
695 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
696 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
698 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
699 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
701 /* Setup the Base and Length of the Rx Descriptor Ring */
702 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
703 struct rx_ring *rxr = &que->rxr;
704 u64 rdba = rxr->rx_paddr;
708 /* Setup the Base and Length of the Rx Descriptor Ring */
709 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
710 (rdba & 0x00000000ffffffffULL));
711 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
712 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
713 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
715 /* Set up the SRRCTL register */
716 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
717 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
718 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
720 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
723 * Set DROP_EN iff we have no flow control and >1 queue.
724 * Note that srrctl was cleared shortly before during reset,
725 * so we do not need to clear the bit, but do it just in case
726 * this code is moved elsewhere.
728 if (sc->num_rx_queues > 1 &&
729 sc->hw.fc.requested_mode == ixgbe_fc_none) {
730 srrctl |= IXGBE_SRRCTL_DROP_EN;
732 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
735 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
737 /* Setup the HW Rx Head and Tail Descriptor Pointers */
738 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
739 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
741 /* Set the driver rx tail address */
742 rxr->tail = IXGBE_RDT(rxr->me);
745 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
746 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
747 | IXGBE_PSRTYPE_UDPHDR
748 | IXGBE_PSRTYPE_IPV4HDR
749 | IXGBE_PSRTYPE_IPV6HDR;
750 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
753 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
755 ixgbe_initialize_rss_mapping(sc);
757 if (sc->num_rx_queues > 1) {
758 /* RSS and RX IPP Checksum are mutually exclusive */
759 rxcsum |= IXGBE_RXCSUM_PCSD;
762 if (ifp->if_capenable & IFCAP_RXCSUM)
763 rxcsum |= IXGBE_RXCSUM_PCSD;
765 /* This is useful for calculating UDP/IP fragment checksums */
766 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
767 rxcsum |= IXGBE_RXCSUM_IPPCSE;
769 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
771 } /* ixgbe_initialize_receive_units */
773 /************************************************************************
774 * ixgbe_initialize_transmit_units - Enable transmit units.
775 ************************************************************************/
777 ixgbe_initialize_transmit_units(if_ctx_t ctx)
779 struct ixgbe_softc *sc = iflib_get_softc(ctx);
780 struct ixgbe_hw *hw = &sc->hw;
781 if_softc_ctx_t scctx = sc->shared;
782 struct ix_tx_queue *que;
785 /* Setup the Base and Length of the Tx Descriptor Ring */
786 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
788 struct tx_ring *txr = &que->txr;
789 u64 tdba = txr->tx_paddr;
793 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
794 (tdba & 0x00000000ffffffffULL));
795 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
796 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
797 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
799 /* Setup the HW Tx Head and Tail descriptor pointers */
800 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
801 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
803 /* Cache the tail address */
804 txr->tail = IXGBE_TDT(txr->me);
806 txr->tx_rs_cidx = txr->tx_rs_pidx;
807 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
808 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
809 txr->tx_rsq[k] = QIDX_INVALID;
811 /* Disable Head Writeback */
813 * Note: for X550 series devices, these registers are actually
814 * prefixed with TPH_ isntead of DCA_, but the addresses and
815 * fields remain the same.
817 switch (hw->mac.type) {
818 case ixgbe_mac_82598EB:
819 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
822 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
825 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
826 switch (hw->mac.type) {
827 case ixgbe_mac_82598EB:
828 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
831 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
837 if (hw->mac.type != ixgbe_mac_82598EB) {
838 u32 dmatxctl, rttdcs;
840 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
841 dmatxctl |= IXGBE_DMATXCTL_TE;
842 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
843 /* Disable arbiter to set MTQC */
844 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
845 rttdcs |= IXGBE_RTTDCS_ARBDIS;
846 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
847 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
848 ixgbe_get_mtqc(sc->iov_mode));
849 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
850 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
853 } /* ixgbe_initialize_transmit_units */
855 /************************************************************************
857 ************************************************************************/
859 ixgbe_register(device_t dev)
861 return (&ixgbe_sctx_init);
862 } /* ixgbe_register */
864 /************************************************************************
865 * ixgbe_if_attach_pre - Device initialization routine, part 1
867 * Called when the driver is being loaded.
868 * Identifies the type of hardware, initializes the hardware,
869 * and initializes iflib structures.
871 * return 0 on success, positive on failure
872 ************************************************************************/
874 ixgbe_if_attach_pre(if_ctx_t ctx)
876 struct ixgbe_softc *sc;
878 if_softc_ctx_t scctx;
883 INIT_DEBUGOUT("ixgbe_attach: begin");
885 /* Allocate, clear, and link in our adapter structure */
886 dev = iflib_get_dev(ctx);
887 sc = iflib_get_softc(ctx);
891 scctx = sc->shared = iflib_get_softc_ctx(ctx);
892 sc->media = iflib_get_media(ctx);
895 /* Determine hardware revision */
896 hw->vendor_id = pci_get_vendor(dev);
897 hw->device_id = pci_get_device(dev);
898 hw->revision_id = pci_get_revid(dev);
899 hw->subsystem_vendor_id = pci_get_subvendor(dev);
900 hw->subsystem_device_id = pci_get_subdevice(dev);
902 /* Do base PCI setup - map BAR0 */
903 if (ixgbe_allocate_pci_resources(ctx)) {
904 device_printf(dev, "Allocation of PCI resources failed\n");
908 /* let hardware know driver is loaded */
909 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
910 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
911 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
914 * Initialize the shared code
916 if (ixgbe_init_shared_code(hw) != 0) {
917 device_printf(dev, "Unable to initialize the shared code\n");
922 if (hw->mbx.ops.init_params)
923 hw->mbx.ops.init_params(hw);
925 hw->allow_unsupported_sfp = allow_unsupported_sfp;
927 if (hw->mac.type != ixgbe_mac_82598EB)
928 hw->phy.smart_speed = ixgbe_smart_speed;
930 ixgbe_init_device_features(sc);
932 /* Enable WoL (if supported) */
933 ixgbe_check_wol_support(sc);
935 /* Verify adapter fan is still functional (if applicable) */
936 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
937 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
938 ixgbe_check_fan_failure(sc, esdp, false);
941 /* Ensure SW/FW semaphore is free */
942 ixgbe_init_swfw_semaphore(hw);
944 /* Set an initial default flow control value */
945 hw->fc.requested_mode = ixgbe_flow_control;
947 hw->phy.reset_if_overtemp = true;
948 error = ixgbe_reset_hw(hw);
949 hw->phy.reset_if_overtemp = false;
950 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
952 * No optics in this port, set up
953 * so the timer routine will probe
954 * for later insertion.
956 sc->sfp_probe = true;
958 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
959 device_printf(dev, "Unsupported SFP+ module detected!\n");
963 device_printf(dev, "Hardware initialization failed\n");
968 /* Make sure we have a good EEPROM before we read from it */
969 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
970 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
975 error = ixgbe_start_hw(hw);
977 case IXGBE_ERR_EEPROM_VERSION:
978 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
980 case IXGBE_ERR_SFP_NOT_SUPPORTED:
981 device_printf(dev, "Unsupported SFP+ Module\n");
984 case IXGBE_ERR_SFP_NOT_PRESENT:
985 device_printf(dev, "No SFP+ Module found\n");
991 /* Most of the iflib initialization... */
993 iflib_set_mac(ctx, hw->mac.addr);
994 switch (sc->hw.mac.type) {
996 case ixgbe_mac_X550EM_x:
997 case ixgbe_mac_X550EM_a:
998 scctx->isc_rss_table_size = 512;
999 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1002 scctx->isc_rss_table_size = 128;
1003 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1006 /* Allow legacy interrupts */
1007 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1009 scctx->isc_txqsizes[0] =
1010 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1011 sizeof(u32), DBA_ALIGN),
1012 scctx->isc_rxqsizes[0] =
1013 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1017 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1018 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1019 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1020 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1022 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1023 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1026 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1028 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1029 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1030 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1032 scctx->isc_txrx = &ixgbe_txrx;
1034 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1039 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1040 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1041 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1042 ixgbe_free_pci_resources(ctx);
1045 } /* ixgbe_if_attach_pre */
1047 /*********************************************************************
1048 * ixgbe_if_attach_post - Device initialization routine, part 2
1050 * Called during driver load, but after interrupts and
1051 * resources have been allocated and configured.
1052 * Sets up some data structures not relevant to iflib.
1054 * return 0 on success, positive on failure
1055 *********************************************************************/
1057 ixgbe_if_attach_post(if_ctx_t ctx)
1060 struct ixgbe_softc *sc;
1061 struct ixgbe_hw *hw;
1064 dev = iflib_get_dev(ctx);
1065 sc = iflib_get_softc(ctx);
1069 if (sc->intr_type == IFLIB_INTR_LEGACY &&
1070 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1071 device_printf(dev, "Device does not support legacy interrupts");
1076 /* Allocate multicast array memory. */
1077 sc->mta = malloc(sizeof(*sc->mta) *
1078 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1079 if (sc->mta == NULL) {
1080 device_printf(dev, "Can not allocate multicast setup array\n");
1085 /* hw.ix defaults init */
1086 ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1088 /* Enable the optics for 82599 SFP+ fiber */
1089 ixgbe_enable_tx_laser(hw);
1091 /* Enable power to the phy. */
1092 ixgbe_set_phy_power(hw, true);
1094 ixgbe_initialize_iov(sc);
1096 error = ixgbe_setup_interface(ctx);
1098 device_printf(dev, "Interface setup failed: %d\n", error);
1102 ixgbe_if_update_admin_status(ctx);
1104 /* Initialize statistics */
1105 ixgbe_update_stats_counters(sc);
1106 ixgbe_add_hw_stats(sc);
1108 /* Check PCIE slot type/speed/width */
1109 ixgbe_get_slot_info(sc);
1112 * Do time init and sysctl init here, but
1113 * only on the first port of a bypass sc.
1115 ixgbe_bypass_init(sc);
1117 /* Display NVM and Option ROM versions */
1118 ixgbe_print_fw_version(ctx);
1120 /* Set an initial dmac value */
1122 /* Set initial advertised speeds (if applicable) */
1123 sc->advertise = ixgbe_get_default_advertise(sc);
1125 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1126 ixgbe_define_iov_schemas(dev, &error);
1129 ixgbe_add_device_sysctls(ctx);
1134 } /* ixgbe_if_attach_post */
1136 /************************************************************************
1137 * ixgbe_check_wol_support
1139 * Checks whether the adapter's ports are capable of
1140 * Wake On LAN by reading the adapter's NVM.
1142 * Sets each port's hw->wol_enabled value depending
1143 * on the value read here.
1144 ************************************************************************/
1146 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1148 struct ixgbe_hw *hw = &sc->hw;
1151 /* Find out WoL support for port */
1152 sc->wol_support = hw->wol_enabled = 0;
1153 ixgbe_get_device_caps(hw, &dev_caps);
1154 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1155 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1157 sc->wol_support = hw->wol_enabled = 1;
1159 /* Save initial wake up filter configuration */
1160 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1163 } /* ixgbe_check_wol_support */
1165 /************************************************************************
1166 * ixgbe_setup_interface
1168 * Setup networking device structure and register an interface.
1169 ************************************************************************/
1171 ixgbe_setup_interface(if_ctx_t ctx)
1173 struct ifnet *ifp = iflib_get_ifp(ctx);
1174 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1176 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1178 if_setbaudrate(ifp, IF_Gbps(10));
1180 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1182 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1184 ixgbe_add_media_types(ctx);
1186 /* Autoselect media by default */
1187 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1190 } /* ixgbe_setup_interface */
1192 /************************************************************************
1193 * ixgbe_if_get_counter
1194 ************************************************************************/
1196 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1198 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1199 if_t ifp = iflib_get_ifp(ctx);
1202 case IFCOUNTER_IPACKETS:
1203 return (sc->ipackets);
1204 case IFCOUNTER_OPACKETS:
1205 return (sc->opackets);
1206 case IFCOUNTER_IBYTES:
1207 return (sc->ibytes);
1208 case IFCOUNTER_OBYTES:
1209 return (sc->obytes);
1210 case IFCOUNTER_IMCASTS:
1211 return (sc->imcasts);
1212 case IFCOUNTER_OMCASTS:
1213 return (sc->omcasts);
1214 case IFCOUNTER_COLLISIONS:
1216 case IFCOUNTER_IQDROPS:
1217 return (sc->iqdrops);
1218 case IFCOUNTER_OQDROPS:
1220 case IFCOUNTER_IERRORS:
1221 return (sc->ierrors);
1223 return (if_get_counter_default(ifp, cnt));
1225 } /* ixgbe_if_get_counter */
1227 /************************************************************************
1229 ************************************************************************/
1231 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1233 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1234 struct ixgbe_hw *hw = &sc->hw;
1238 if (hw->phy.ops.read_i2c_byte == NULL)
1240 for (i = 0; i < req->len; i++)
1241 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1242 req->dev_addr, &req->data[i]);
1244 } /* ixgbe_if_i2c_req */
1246 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1247 * @ctx: iflib context
1248 * @event: event code to check
1250 * Defaults to returning true for unknown events.
1252 * @returns true if iflib needs to reinit the interface
1255 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1258 case IFLIB_RESTART_VLAN_CONFIG:
1265 /************************************************************************
1266 * ixgbe_add_media_types
1267 ************************************************************************/
1269 ixgbe_add_media_types(if_ctx_t ctx)
1271 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1272 struct ixgbe_hw *hw = &sc->hw;
1273 device_t dev = iflib_get_dev(ctx);
1276 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1278 /* Media types with matching FreeBSD media defines */
1279 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1280 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1281 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1282 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1283 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1284 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1285 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1286 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1288 if (hw->mac.type == ixgbe_mac_X550) {
1289 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1290 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1293 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1294 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1295 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1298 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1299 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1300 if (hw->phy.multispeed_fiber)
1301 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1304 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1305 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1306 if (hw->phy.multispeed_fiber)
1307 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1309 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1310 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1311 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1312 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1314 #ifdef IFM_ETH_XTYPE
1315 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1316 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1317 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1318 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1319 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1320 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1321 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1322 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1324 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1325 device_printf(dev, "Media supported: 10GbaseKR\n");
1326 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1327 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1329 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1330 device_printf(dev, "Media supported: 10GbaseKX4\n");
1331 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1332 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1334 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1335 device_printf(dev, "Media supported: 1000baseKX\n");
1336 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1337 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1339 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1340 device_printf(dev, "Media supported: 2500baseKX\n");
1341 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1342 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1345 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1346 device_printf(dev, "Media supported: 1000baseBX\n");
1348 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1349 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1351 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1354 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1355 } /* ixgbe_add_media_types */
1357 /************************************************************************
1359 ************************************************************************/
1361 ixgbe_is_sfp(struct ixgbe_hw *hw)
1363 switch (hw->mac.type) {
1364 case ixgbe_mac_82598EB:
1365 if (hw->phy.type == ixgbe_phy_nl)
1368 case ixgbe_mac_82599EB:
1369 switch (hw->mac.ops.get_media_type(hw)) {
1370 case ixgbe_media_type_fiber:
1371 case ixgbe_media_type_fiber_qsfp:
1376 case ixgbe_mac_X550EM_x:
1377 case ixgbe_mac_X550EM_a:
1378 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1384 } /* ixgbe_is_sfp */
1386 /************************************************************************
1388 ************************************************************************/
1390 ixgbe_config_link(if_ctx_t ctx)
1392 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1393 struct ixgbe_hw *hw = &sc->hw;
1394 u32 autoneg, err = 0;
1395 bool sfp, negotiate;
1397 sfp = ixgbe_is_sfp(hw);
1400 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1401 iflib_admin_intr_deferred(ctx);
1403 if (hw->mac.ops.check_link)
1404 err = ixgbe_check_link(hw, &sc->link_speed,
1405 &sc->link_up, false);
1408 autoneg = hw->phy.autoneg_advertised;
1409 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1410 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1415 if (hw->mac.type == ixgbe_mac_X550 &&
1416 hw->phy.autoneg_advertised == 0) {
1418 * 2.5G and 5G autonegotiation speeds on X550
1419 * are disabled by default due to reported
1420 * interoperability issues with some switches.
1422 * The second condition checks if any operations
1423 * involving setting autonegotiation speeds have
1424 * been performed prior to this ixgbe_config_link()
1427 * If hw->phy.autoneg_advertised does not
1428 * equal 0, this means that the user might have
1429 * set autonegotiation speeds via the sysctl
1430 * before bringing the interface up. In this
1431 * case, we should not disable 2.5G and 5G
1432 * since that speeds might be selected by the
1435 * Otherwise (i.e. if hw->phy.autoneg_advertised
1436 * is set to 0), it is the first time we set
1437 * autonegotiation preferences and the default
1438 * set of speeds should exclude 2.5G and 5G.
1440 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
1441 IXGBE_LINK_SPEED_5GB_FULL);
1444 if (hw->mac.ops.setup_link)
1445 err = hw->mac.ops.setup_link(hw, autoneg,
1448 } /* ixgbe_config_link */
1450 /************************************************************************
1451 * ixgbe_update_stats_counters - Update board statistics counters.
1452 ************************************************************************/
1454 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1456 struct ixgbe_hw *hw = &sc->hw;
1457 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1458 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1460 u64 total_missed_rx = 0;
1462 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1463 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1464 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1465 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1466 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1468 for (int i = 0; i < 16; i++) {
1469 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1470 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1471 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1473 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1474 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1475 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1477 /* Hardware workaround, gprc counts missed packets */
1478 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1479 stats->gprc -= missed_rx;
1481 if (hw->mac.type != ixgbe_mac_82598EB) {
1482 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1483 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1484 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1485 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1486 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1487 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1488 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1489 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1490 stats->lxoffrxc += lxoffrxc;
1492 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1493 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1494 stats->lxoffrxc += lxoffrxc;
1495 /* 82598 only has a counter in the high register */
1496 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1497 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1498 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1502 * For watchdog management we need to know if we have been paused
1503 * during the last interval, so capture that here.
1506 sc->shared->isc_pause_frames = 1;
1509 * Workaround: mprc hardware is incorrectly counting
1510 * broadcasts, so for now we subtract those.
1512 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1513 stats->bprc += bprc;
1514 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1515 if (hw->mac.type == ixgbe_mac_82598EB)
1516 stats->mprc -= bprc;
1518 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1519 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1520 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1521 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1522 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1523 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1525 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1526 stats->lxontxc += lxon;
1527 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1528 stats->lxofftxc += lxoff;
1529 total = lxon + lxoff;
1531 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1532 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1533 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1534 stats->gptc -= total;
1535 stats->mptc -= total;
1536 stats->ptc64 -= total;
1537 stats->gotc -= total * ETHER_MIN_LEN;
1539 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1540 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1541 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1542 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1543 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1544 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1545 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1546 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1547 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1548 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1549 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1550 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1551 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1552 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1553 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1554 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1555 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1556 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1557 /* Only read FCOE on 82599 */
1558 if (hw->mac.type != ixgbe_mac_82598EB) {
1559 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1560 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1561 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1562 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1563 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1566 /* Fill out the OS statistics structure */
1567 IXGBE_SET_IPACKETS(sc, stats->gprc);
1568 IXGBE_SET_OPACKETS(sc, stats->gptc);
1569 IXGBE_SET_IBYTES(sc, stats->gorc);
1570 IXGBE_SET_OBYTES(sc, stats->gotc);
1571 IXGBE_SET_IMCASTS(sc, stats->mprc);
1572 IXGBE_SET_OMCASTS(sc, stats->mptc);
1573 IXGBE_SET_COLLISIONS(sc, 0);
1574 IXGBE_SET_IQDROPS(sc, total_missed_rx);
1577 * Aggregate following types of errors as RX errors:
1578 * - CRC error count,
1579 * - illegal byte error count,
1580 * - checksum error count,
1581 * - missed packets count,
1582 * - length error count,
1583 * - undersized packets count,
1584 * - fragmented packets count,
1585 * - oversized packets count,
1588 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + stats->xec +
1589 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1591 } /* ixgbe_update_stats_counters */
1593 /************************************************************************
1594 * ixgbe_add_hw_stats
1596 * Add sysctl variables, one per statistic, to the system.
1597 ************************************************************************/
1599 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1601 device_t dev = iflib_get_dev(sc->ctx);
1602 struct ix_rx_queue *rx_que;
1603 struct ix_tx_queue *tx_que;
1604 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1605 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1606 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1607 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1608 struct sysctl_oid *stat_node, *queue_node;
1609 struct sysctl_oid_list *stat_list, *queue_list;
1612 #define QUEUE_NAME_LEN 32
1613 char namebuf[QUEUE_NAME_LEN];
1615 /* Driver Statistics */
1616 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1617 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1618 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1619 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1620 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1621 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1623 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
1624 struct tx_ring *txr = &tx_que->txr;
1625 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1626 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1627 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1628 queue_list = SYSCTL_CHILDREN(queue_node);
1630 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1631 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1632 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1633 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1634 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1635 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1636 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1637 CTLFLAG_RD, &txr->tso_tx, "TSO");
1638 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1639 CTLFLAG_RD, &txr->total_packets,
1640 "Queue Packets Transmitted");
1643 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
1644 struct rx_ring *rxr = &rx_que->rxr;
1645 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1646 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1647 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1648 queue_list = SYSCTL_CHILDREN(queue_node);
1650 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1651 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1652 &sc->rx_queues[i], 0,
1653 ixgbe_sysctl_interrupt_rate_handler, "IU",
1655 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1656 CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1657 "irqs on this queue");
1658 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1659 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1660 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1661 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1662 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1663 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1664 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1665 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1666 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1667 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1668 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1669 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1670 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1671 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1674 /* MAC stats get their own sub node */
1676 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1677 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1678 stat_list = SYSCTL_CHILDREN(stat_node);
1680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1681 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1683 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1684 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1685 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1686 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1687 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1689 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1690 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1691 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1692 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1693 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1694 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1695 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1696 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1697 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1699 /* Flow Control stats */
1700 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1701 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1702 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1703 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1704 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1705 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1706 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1707 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1709 /* Packet Reception Stats */
1710 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1711 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1712 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1713 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1714 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1715 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1716 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1717 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1718 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1719 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1720 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1721 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1722 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1723 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1724 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1725 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1726 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1727 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1728 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1729 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1730 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1731 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1732 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1733 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1734 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1735 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1736 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1737 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1738 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1739 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1740 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1741 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1742 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1743 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1744 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1745 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1746 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1747 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1749 /* Packet Transmission Stats */
1750 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1751 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1752 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1753 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1754 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1755 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1756 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1757 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1758 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1759 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1760 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1761 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1762 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1763 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1764 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1765 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1766 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1767 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1768 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1769 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1770 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1771 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1772 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1773 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1774 } /* ixgbe_add_hw_stats */
1776 /************************************************************************
1777 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1779 * Retrieves the TDH value from the hardware
1780 ************************************************************************/
1782 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1784 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1791 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1792 error = sysctl_handle_int(oidp, &val, 0, req);
1793 if (error || !req->newptr)
1797 } /* ixgbe_sysctl_tdh_handler */
1799 /************************************************************************
1800 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1802 * Retrieves the TDT value from the hardware
1803 ************************************************************************/
1805 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1807 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1814 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1815 error = sysctl_handle_int(oidp, &val, 0, req);
1816 if (error || !req->newptr)
1820 } /* ixgbe_sysctl_tdt_handler */
1822 /************************************************************************
1823 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1825 * Retrieves the RDH value from the hardware
1826 ************************************************************************/
1828 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1830 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1837 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
1838 error = sysctl_handle_int(oidp, &val, 0, req);
1839 if (error || !req->newptr)
1843 } /* ixgbe_sysctl_rdh_handler */
1845 /************************************************************************
1846 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1848 * Retrieves the RDT value from the hardware
1849 ************************************************************************/
1851 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1853 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1860 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
1861 error = sysctl_handle_int(oidp, &val, 0, req);
1862 if (error || !req->newptr)
1866 } /* ixgbe_sysctl_rdt_handler */
1868 /************************************************************************
1869 * ixgbe_if_vlan_register
1871 * Run via vlan config EVENT, it enables us to use the
1872 * HW Filter table since we can get the vlan id. This
1873 * just creates the entry in the soft version of the
1874 * VFTA, init will repopulate the real table.
1875 ************************************************************************/
1877 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1879 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1882 index = (vtag >> 5) & 0x7F;
1884 sc->shadow_vfta[index] |= (1 << bit);
1886 ixgbe_setup_vlan_hw_support(ctx);
1887 } /* ixgbe_if_vlan_register */
1889 /************************************************************************
1890 * ixgbe_if_vlan_unregister
1892 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1893 ************************************************************************/
1895 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1897 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1900 index = (vtag >> 5) & 0x7F;
1902 sc->shadow_vfta[index] &= ~(1 << bit);
1904 /* Re-init to load the changes */
1905 ixgbe_setup_vlan_hw_support(ctx);
1906 } /* ixgbe_if_vlan_unregister */
1908 /************************************************************************
1909 * ixgbe_setup_vlan_hw_support
1910 ************************************************************************/
1912 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1914 struct ifnet *ifp = iflib_get_ifp(ctx);
1915 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1916 struct ixgbe_hw *hw = &sc->hw;
1917 struct rx_ring *rxr;
1923 * We get here thru init_locked, meaning
1924 * a soft reset, this has already cleared
1925 * the VFTA and other state, so if there
1926 * have been no vlan's registered do nothing.
1928 if (sc->num_vlans == 0)
1931 /* Setup the queues for vlans */
1932 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1933 for (i = 0; i < sc->num_rx_queues; i++) {
1934 rxr = &sc->rx_queues[i].rxr;
1935 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1936 if (hw->mac.type != ixgbe_mac_82598EB) {
1937 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1938 ctrl |= IXGBE_RXDCTL_VME;
1939 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1941 rxr->vtag_strip = true;
1945 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1948 * A soft reset zero's out the VFTA, so
1949 * we need to repopulate it now.
1951 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1952 if (sc->shadow_vfta[i] != 0)
1953 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1954 sc->shadow_vfta[i]);
1956 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1957 /* Enable the Filter Table if enabled */
1958 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1959 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1960 ctrl |= IXGBE_VLNCTRL_VFE;
1962 if (hw->mac.type == ixgbe_mac_82598EB)
1963 ctrl |= IXGBE_VLNCTRL_VME;
1964 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1965 } /* ixgbe_setup_vlan_hw_support */
1967 /************************************************************************
1968 * ixgbe_get_slot_info
1970 * Get the width and transaction speed of
1971 * the slot this adapter is plugged into.
1972 ************************************************************************/
1974 ixgbe_get_slot_info(struct ixgbe_softc *sc)
1976 device_t dev = iflib_get_dev(sc->ctx);
1977 struct ixgbe_hw *hw = &sc->hw;
1978 int bus_info_valid = true;
1982 /* Some devices are behind an internal bridge */
1983 switch (hw->device_id) {
1984 case IXGBE_DEV_ID_82599_SFP_SF_QP:
1985 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1986 goto get_parent_info;
1991 ixgbe_get_bus_info(hw);
1994 * Some devices don't use PCI-E, but there is no need
1995 * to display "Unknown" for bus speed and width.
1997 switch (hw->mac.type) {
1998 case ixgbe_mac_X550EM_x:
1999 case ixgbe_mac_X550EM_a:
2007 * For the Quad port adapter we need to parse back
2008 * up the PCI tree to find the speed of the expansion
2009 * slot into which this adapter is plugged. A bit more work.
2011 dev = device_get_parent(device_get_parent(dev));
2013 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2014 pci_get_slot(dev), pci_get_function(dev));
2016 dev = device_get_parent(device_get_parent(dev));
2018 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2019 pci_get_slot(dev), pci_get_function(dev));
2021 /* Now get the PCI Express Capabilities offset */
2022 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
2024 * Hmm...can't get PCI-Express capabilities.
2025 * Falling back to default method.
2027 bus_info_valid = false;
2028 ixgbe_get_bus_info(hw);
2031 /* ...and read the Link Status Register */
2032 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2033 ixgbe_set_pci_config_data_generic(hw, link);
2036 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
2037 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2038 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2039 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2041 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2042 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2043 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2046 if (bus_info_valid) {
2047 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2048 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2049 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2050 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2051 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2053 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2054 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2055 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2056 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2057 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2060 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2063 } /* ixgbe_get_slot_info */
2065 /************************************************************************
2066 * ixgbe_if_msix_intr_assign
2068 * Setup MSI-X Interrupt resources and handlers
2069 ************************************************************************/
2071 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2073 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2074 struct ix_rx_queue *rx_que = sc->rx_queues;
2075 struct ix_tx_queue *tx_que;
2076 int error, rid, vector = 0;
2079 /* Admin Que is vector 0*/
2081 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2084 snprintf(buf, sizeof(buf), "rxq%d", i);
2085 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2086 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2089 device_printf(iflib_get_dev(ctx),
2090 "Failed to allocate que int %d err: %d", i, error);
2091 sc->num_rx_queues = i + 1;
2095 rx_que->msix = vector;
2097 for (int i = 0; i < sc->num_tx_queues; i++) {
2098 snprintf(buf, sizeof(buf), "txq%d", i);
2099 tx_que = &sc->tx_queues[i];
2100 tx_que->msix = i % sc->num_rx_queues;
2101 iflib_softirq_alloc_generic(ctx,
2102 &sc->rx_queues[tx_que->msix].que_irq,
2103 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2106 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2107 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2109 device_printf(iflib_get_dev(ctx),
2110 "Failed to register admin handler");
2114 sc->vector = vector;
2118 iflib_irq_free(ctx, &sc->irq);
2119 rx_que = sc->rx_queues;
2120 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2121 iflib_irq_free(ctx, &rx_que->que_irq);
2124 } /* ixgbe_if_msix_intr_assign */
2127 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
2129 uint32_t newitr = 0;
2130 struct rx_ring *rxr = &que->rxr;
2133 * Do Adaptive Interrupt Moderation:
2134 * - Write out last calculated setting
2135 * - Calculate based on average size over
2136 * the last interval.
2138 if (que->eitr_setting) {
2139 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2143 que->eitr_setting = 0;
2144 /* Idle, do nothing */
2145 if (rxr->bytes == 0) {
2149 if ((rxr->bytes) && (rxr->packets)) {
2150 newitr = (rxr->bytes / rxr->packets);
2153 newitr += 24; /* account for hardware frame, crc */
2154 /* set an upper boundary */
2155 newitr = min(newitr, 3000);
2157 /* Be nice to the mid range */
2158 if ((newitr > 300) && (newitr < 1200)) {
2159 newitr = (newitr / 3);
2161 newitr = (newitr / 2);
2164 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2165 newitr |= newitr << 16;
2167 newitr |= IXGBE_EITR_CNT_WDIS;
2170 /* save for next interrupt */
2171 que->eitr_setting = newitr;
2180 /*********************************************************************
2181 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2182 **********************************************************************/
2184 ixgbe_msix_que(void *arg)
2186 struct ix_rx_queue *que = arg;
2187 struct ixgbe_softc *sc = que->sc;
2188 struct ifnet *ifp = iflib_get_ifp(que->sc->ctx);
2190 /* Protect against spurious interrupts */
2191 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2192 return (FILTER_HANDLED);
2194 ixgbe_disable_queue(sc, que->msix);
2198 if (sc->enable_aim) {
2199 ixgbe_perform_aim(sc, que);
2202 return (FILTER_SCHEDULE_THREAD);
2203 } /* ixgbe_msix_que */
2205 /************************************************************************
2206 * ixgbe_media_status - Media Ioctl callback
2208 * Called whenever the user queries the status of
2209 * the interface using ifconfig.
2210 ************************************************************************/
2212 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2214 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2215 struct ixgbe_hw *hw = &sc->hw;
2218 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2220 ifmr->ifm_status = IFM_AVALID;
2221 ifmr->ifm_active = IFM_ETHER;
2223 if (!sc->link_active)
2226 ifmr->ifm_status |= IFM_ACTIVE;
2227 layer = sc->phy_layer;
2229 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2230 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2231 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2232 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2233 switch (sc->link_speed) {
2234 case IXGBE_LINK_SPEED_10GB_FULL:
2235 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2237 case IXGBE_LINK_SPEED_1GB_FULL:
2238 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2240 case IXGBE_LINK_SPEED_100_FULL:
2241 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2243 case IXGBE_LINK_SPEED_10_FULL:
2244 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2247 if (hw->mac.type == ixgbe_mac_X550)
2248 switch (sc->link_speed) {
2249 case IXGBE_LINK_SPEED_5GB_FULL:
2250 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2252 case IXGBE_LINK_SPEED_2_5GB_FULL:
2253 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2256 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2257 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2258 switch (sc->link_speed) {
2259 case IXGBE_LINK_SPEED_10GB_FULL:
2260 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2263 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2264 switch (sc->link_speed) {
2265 case IXGBE_LINK_SPEED_10GB_FULL:
2266 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2268 case IXGBE_LINK_SPEED_1GB_FULL:
2269 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2272 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2273 switch (sc->link_speed) {
2274 case IXGBE_LINK_SPEED_10GB_FULL:
2275 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2277 case IXGBE_LINK_SPEED_1GB_FULL:
2278 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2281 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2282 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2283 switch (sc->link_speed) {
2284 case IXGBE_LINK_SPEED_10GB_FULL:
2285 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2287 case IXGBE_LINK_SPEED_1GB_FULL:
2288 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2291 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2292 switch (sc->link_speed) {
2293 case IXGBE_LINK_SPEED_10GB_FULL:
2294 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2298 * XXX: These need to use the proper media types once
2301 #ifndef IFM_ETH_XTYPE
2302 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2303 switch (sc->link_speed) {
2304 case IXGBE_LINK_SPEED_10GB_FULL:
2305 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2307 case IXGBE_LINK_SPEED_2_5GB_FULL:
2308 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2310 case IXGBE_LINK_SPEED_1GB_FULL:
2311 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2314 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2315 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2316 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2317 switch (sc->link_speed) {
2318 case IXGBE_LINK_SPEED_10GB_FULL:
2319 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2321 case IXGBE_LINK_SPEED_2_5GB_FULL:
2322 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2324 case IXGBE_LINK_SPEED_1GB_FULL:
2325 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2329 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2330 switch (sc->link_speed) {
2331 case IXGBE_LINK_SPEED_10GB_FULL:
2332 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2334 case IXGBE_LINK_SPEED_2_5GB_FULL:
2335 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2337 case IXGBE_LINK_SPEED_1GB_FULL:
2338 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2341 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2342 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2343 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2344 switch (sc->link_speed) {
2345 case IXGBE_LINK_SPEED_10GB_FULL:
2346 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2348 case IXGBE_LINK_SPEED_2_5GB_FULL:
2349 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2351 case IXGBE_LINK_SPEED_1GB_FULL:
2352 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2357 /* If nothing is recognized... */
2358 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2359 ifmr->ifm_active |= IFM_UNKNOWN;
2361 /* Display current flow control setting used on link */
2362 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2363 hw->fc.current_mode == ixgbe_fc_full)
2364 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2365 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2366 hw->fc.current_mode == ixgbe_fc_full)
2367 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2368 } /* ixgbe_media_status */
2370 /************************************************************************
2371 * ixgbe_media_change - Media Ioctl callback
2373 * Called when the user changes speed/duplex using
2374 * media/mediopt option with ifconfig.
2375 ************************************************************************/
2377 ixgbe_if_media_change(if_ctx_t ctx)
2379 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2380 struct ifmedia *ifm = iflib_get_media(ctx);
2381 struct ixgbe_hw *hw = &sc->hw;
2382 ixgbe_link_speed speed = 0;
2384 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2386 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2389 if (hw->phy.media_type == ixgbe_media_type_backplane)
2393 * We don't actually need to check against the supported
2394 * media types of the adapter; ifmedia will take care of
2397 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2400 speed |= IXGBE_LINK_SPEED_100_FULL;
2401 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2402 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2406 #ifndef IFM_ETH_XTYPE
2407 case IFM_10G_SR: /* KR, too */
2408 case IFM_10G_CX4: /* KX4 */
2413 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2414 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2416 #ifndef IFM_ETH_XTYPE
2417 case IFM_1000_CX: /* KX */
2423 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2426 speed |= IXGBE_LINK_SPEED_100_FULL;
2427 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2429 case IFM_10G_TWINAX:
2430 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2433 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2436 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2439 speed |= IXGBE_LINK_SPEED_100_FULL;
2442 speed |= IXGBE_LINK_SPEED_10_FULL;
2448 hw->mac.autotry_restart = true;
2449 hw->mac.ops.setup_link(hw, speed, true);
2451 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
2452 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
2453 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
2454 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
2455 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
2456 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
2461 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2464 } /* ixgbe_if_media_change */
2466 /************************************************************************
2468 ************************************************************************/
2470 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2472 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2473 struct ifnet *ifp = iflib_get_ifp(ctx);
2477 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2478 rctl &= (~IXGBE_FCTRL_UPE);
2479 if (ifp->if_flags & IFF_ALLMULTI)
2480 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2482 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2484 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2485 rctl &= (~IXGBE_FCTRL_MPE);
2486 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2488 if (ifp->if_flags & IFF_PROMISC) {
2489 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2490 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2491 } else if (ifp->if_flags & IFF_ALLMULTI) {
2492 rctl |= IXGBE_FCTRL_MPE;
2493 rctl &= ~IXGBE_FCTRL_UPE;
2494 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2497 } /* ixgbe_if_promisc_set */
2499 /************************************************************************
2500 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2501 ************************************************************************/
2503 ixgbe_msix_link(void *arg)
2505 struct ixgbe_softc *sc = arg;
2506 struct ixgbe_hw *hw = &sc->hw;
2507 u32 eicr, eicr_mask;
2512 /* Pause other interrupts */
2513 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2515 /* First get the cause */
2516 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2517 /* Be sure the queue bits are not cleared */
2518 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2519 /* Clear interrupt with write */
2520 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2522 /* Link status change */
2523 if (eicr & IXGBE_EICR_LSC) {
2524 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2525 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2528 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2529 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2530 (eicr & IXGBE_EICR_FLOW_DIR)) {
2531 /* This is probably overkill :) */
2532 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2533 return (FILTER_HANDLED);
2534 /* Disable the interrupt */
2535 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2536 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2538 if (eicr & IXGBE_EICR_ECC) {
2539 device_printf(iflib_get_dev(sc->ctx),
2540 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2541 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2544 /* Check for over temp condition */
2545 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2546 switch (sc->hw.mac.type) {
2547 case ixgbe_mac_X550EM_a:
2548 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2550 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2551 IXGBE_EICR_GPI_SDP0_X550EM_a);
2552 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2553 IXGBE_EICR_GPI_SDP0_X550EM_a);
2554 retval = hw->phy.ops.check_overtemp(hw);
2555 if (retval != IXGBE_ERR_OVERTEMP)
2557 device_printf(iflib_get_dev(sc->ctx),
2558 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2559 device_printf(iflib_get_dev(sc->ctx),
2560 "System shutdown required!\n");
2563 if (!(eicr & IXGBE_EICR_TS))
2565 retval = hw->phy.ops.check_overtemp(hw);
2566 if (retval != IXGBE_ERR_OVERTEMP)
2568 device_printf(iflib_get_dev(sc->ctx),
2569 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2570 device_printf(iflib_get_dev(sc->ctx),
2571 "System shutdown required!\n");
2572 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2577 /* Check for VF message */
2578 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2579 (eicr & IXGBE_EICR_MAILBOX))
2580 sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2583 if (ixgbe_is_sfp(hw)) {
2584 /* Pluggable optics-related interrupt */
2585 if (hw->mac.type >= ixgbe_mac_X540)
2586 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2588 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2590 if (eicr & eicr_mask) {
2591 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2592 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2595 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2596 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2597 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2598 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2599 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2603 /* Check for fan failure */
2604 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2605 ixgbe_check_fan_failure(sc, eicr, true);
2606 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2609 /* External PHY interrupt */
2610 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2611 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2612 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2613 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2616 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2617 } /* ixgbe_msix_link */
2619 /************************************************************************
2620 * ixgbe_sysctl_interrupt_rate_handler
2621 ************************************************************************/
2623 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2625 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2627 unsigned int reg, usec, rate;
2629 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2630 usec = ((reg & 0x0FF8) >> 3);
2632 rate = 500000 / usec;
2635 error = sysctl_handle_int(oidp, &rate, 0, req);
2636 if (error || !req->newptr)
2638 reg &= ~0xfff; /* default, no limitation */
2639 ixgbe_max_interrupt_rate = 0;
2640 if (rate > 0 && rate < 500000) {
2643 ixgbe_max_interrupt_rate = rate;
2644 reg |= ((4000000/rate) & 0xff8);
2646 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2649 } /* ixgbe_sysctl_interrupt_rate_handler */
2651 /************************************************************************
2652 * ixgbe_add_device_sysctls
2653 ************************************************************************/
2655 ixgbe_add_device_sysctls(if_ctx_t ctx)
2657 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2658 device_t dev = iflib_get_dev(ctx);
2659 struct ixgbe_hw *hw = &sc->hw;
2660 struct sysctl_oid_list *child;
2661 struct sysctl_ctx_list *ctx_list;
2663 ctx_list = device_get_sysctl_ctx(dev);
2664 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2666 /* Sysctls for all devices */
2667 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2668 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2669 sc, 0, ixgbe_sysctl_flowcntl, "I",
2670 IXGBE_SYSCTL_DESC_SET_FC);
2672 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2673 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2674 sc, 0, ixgbe_sysctl_advertise, "I",
2675 IXGBE_SYSCTL_DESC_ADV_SPEED);
2677 sc->enable_aim = ixgbe_enable_aim;
2678 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2679 &sc->enable_aim, 0, "Interrupt Moderation");
2681 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2682 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
2683 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2686 /* testing sysctls (for all devices) */
2687 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2688 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2689 sc, 0, ixgbe_sysctl_power_state,
2690 "I", "PCI Power State");
2692 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2693 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
2694 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2696 /* for X550 series devices */
2697 if (hw->mac.type >= ixgbe_mac_X550)
2698 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2699 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2700 sc, 0, ixgbe_sysctl_dmac,
2701 "I", "DMA Coalesce");
2703 /* for WoL-capable devices */
2704 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2705 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2706 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
2707 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2709 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2710 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2711 sc, 0, ixgbe_sysctl_wufc,
2712 "I", "Enable/Disable Wake Up Filters");
2715 /* for X552/X557-AT devices */
2716 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2717 struct sysctl_oid *phy_node;
2718 struct sysctl_oid_list *phy_list;
2720 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2721 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2722 phy_list = SYSCTL_CHILDREN(phy_node);
2724 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2725 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2726 sc, 0, ixgbe_sysctl_phy_temp,
2727 "I", "Current External PHY Temperature (Celsius)");
2729 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2730 "overtemp_occurred",
2731 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
2732 ixgbe_sysctl_phy_overtemp_occurred, "I",
2733 "External PHY High Temperature Event Occurred");
2736 if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2737 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2738 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
2739 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2741 } /* ixgbe_add_device_sysctls */
2743 /************************************************************************
2744 * ixgbe_allocate_pci_resources
2745 ************************************************************************/
2747 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2749 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2750 device_t dev = iflib_get_dev(ctx);
2754 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2757 if (!(sc->pci_mem)) {
2758 device_printf(dev, "Unable to allocate bus resource: memory\n");
2762 /* Save bus_space values for READ/WRITE_REG macros */
2763 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
2764 sc->osdep.mem_bus_space_handle =
2765 rman_get_bushandle(sc->pci_mem);
2766 /* Set hw values for shared code */
2767 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
2770 } /* ixgbe_allocate_pci_resources */
2772 /************************************************************************
2773 * ixgbe_detach - Device removal routine
2775 * Called when the driver is being removed.
2776 * Stops the adapter and deallocates all the resources
2777 * that were allocated for driver operation.
2779 * return 0 on success, positive on failure
2780 ************************************************************************/
2782 ixgbe_if_detach(if_ctx_t ctx)
2784 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2785 device_t dev = iflib_get_dev(ctx);
2788 INIT_DEBUGOUT("ixgbe_detach: begin");
2790 if (ixgbe_pci_iov_detach(dev) != 0) {
2791 device_printf(dev, "SR-IOV in use; detach first.\n");
2795 ixgbe_setup_low_power_mode(ctx);
2797 /* let hardware know driver is unloading */
2798 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
2799 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2800 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
2802 ixgbe_free_pci_resources(ctx);
2803 free(sc->mta, M_IXGBE);
2806 } /* ixgbe_if_detach */
2808 /************************************************************************
2809 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2811 * Prepare the adapter/port for LPLU and/or WoL
2812 ************************************************************************/
2814 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2816 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2817 struct ixgbe_hw *hw = &sc->hw;
2818 device_t dev = iflib_get_dev(ctx);
2821 if (!hw->wol_enabled)
2822 ixgbe_set_phy_power(hw, false);
2824 /* Limit power management flow to X550EM baseT */
2825 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2826 hw->phy.ops.enter_lplu) {
2827 /* Turn off support for APM wakeup. (Using ACPI instead) */
2828 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2829 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2832 * Clear Wake Up Status register to prevent any previous wakeup
2833 * events from waking us up immediately after we suspend.
2835 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2838 * Program the Wakeup Filter Control register with user filter
2841 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
2843 /* Enable wakeups and power management in Wakeup Control */
2844 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2845 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2847 /* X550EM baseT adapters need a special LPLU flow */
2848 hw->phy.reset_disable = true;
2850 error = hw->phy.ops.enter_lplu(hw);
2852 device_printf(dev, "Error entering LPLU: %d\n", error);
2853 hw->phy.reset_disable = false;
2855 /* Just stop for other adapters */
2860 } /* ixgbe_setup_low_power_mode */
2862 /************************************************************************
2863 * ixgbe_shutdown - Shutdown entry point
2864 ************************************************************************/
2866 ixgbe_if_shutdown(if_ctx_t ctx)
2870 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2872 error = ixgbe_setup_low_power_mode(ctx);
2875 } /* ixgbe_if_shutdown */
2877 /************************************************************************
2881 ************************************************************************/
2883 ixgbe_if_suspend(if_ctx_t ctx)
2887 INIT_DEBUGOUT("ixgbe_suspend: begin");
2889 error = ixgbe_setup_low_power_mode(ctx);
2892 } /* ixgbe_if_suspend */
2894 /************************************************************************
2898 ************************************************************************/
2900 ixgbe_if_resume(if_ctx_t ctx)
2902 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2903 device_t dev = iflib_get_dev(ctx);
2904 struct ifnet *ifp = iflib_get_ifp(ctx);
2905 struct ixgbe_hw *hw = &sc->hw;
2908 INIT_DEBUGOUT("ixgbe_resume: begin");
2910 /* Read & clear WUS register */
2911 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2913 device_printf(dev, "Woken up by (WUS): %#010x\n",
2914 IXGBE_READ_REG(hw, IXGBE_WUS));
2915 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2916 /* And clear WUFC until next low-power transition */
2917 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2920 * Required after D3->D0 transition;
2921 * will re-advertise all previous advertised speeds
2923 if (ifp->if_flags & IFF_UP)
2927 } /* ixgbe_if_resume */
2929 /************************************************************************
2930 * ixgbe_if_mtu_set - Ioctl mtu entry point
2932 * Return 0 on success, EINVAL on failure
2933 ************************************************************************/
2935 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2937 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2940 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2942 if (mtu > IXGBE_MAX_MTU) {
2945 sc->max_frame_size = mtu + IXGBE_MTU_HDR;
2949 } /* ixgbe_if_mtu_set */
2951 /************************************************************************
2952 * ixgbe_if_crcstrip_set
2953 ************************************************************************/
2955 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2957 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2958 struct ixgbe_hw *hw = &sc->hw;
2959 /* crc stripping is set in two places:
2960 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2961 * IXGBE_RDRXCTL (set by the original driver in
2962 * ixgbe_setup_hw_rsc() called in init_locked.
2963 * We disable the setting when netmap is compiled in).
2964 * We update the values here, but also in ixgbe.c because
2965 * init_locked sometimes is called outside our control.
2969 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2970 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2973 D("%s read HLREG 0x%x rxc 0x%x",
2974 onoff ? "enter" : "exit", hl, rxc);
2976 /* hw requirements ... */
2977 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2978 rxc |= IXGBE_RDRXCTL_RSCACKC;
2979 if (onoff && !crcstrip) {
2980 /* keep the crc. Fast rx */
2981 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2982 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2984 /* reset default mode */
2985 hl |= IXGBE_HLREG0_RXCRCSTRP;
2986 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2990 D("%s write HLREG 0x%x rxc 0x%x",
2991 onoff ? "enter" : "exit", hl, rxc);
2993 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2994 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2995 } /* ixgbe_if_crcstrip_set */
2997 /*********************************************************************
2998 * ixgbe_if_init - Init entry point
3000 * Used in two ways: It is used by the stack as an init
3001 * entry point in network interface structure. It is also
3002 * used by the driver as a hw/sw initialization routine to
3003 * get to a consistent state.
3005 * Return 0 on success, positive on failure
3006 **********************************************************************/
3008 ixgbe_if_init(if_ctx_t ctx)
3010 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3011 struct ifnet *ifp = iflib_get_ifp(ctx);
3012 device_t dev = iflib_get_dev(ctx);
3013 struct ixgbe_hw *hw = &sc->hw;
3014 struct ix_rx_queue *rx_que;
3015 struct ix_tx_queue *tx_que;
3022 INIT_DEBUGOUT("ixgbe_if_init: begin");
3024 /* Queue indices may change with IOV mode */
3025 ixgbe_align_all_queue_indices(sc);
3027 /* reprogram the RAR[0] in case user changed it. */
3028 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3030 /* Get the latest mac address, User can use a LAA */
3031 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3032 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3033 hw->addr_ctrl.rar_used_count = 1;
3037 ixgbe_initialize_iov(sc);
3039 ixgbe_initialize_transmit_units(ctx);
3041 /* Setup Multicast table */
3042 ixgbe_if_multi_set(ctx);
3044 /* Determine the correct mbuf pool, based on frame size */
3045 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3047 /* Configure RX settings */
3048 ixgbe_initialize_receive_units(ctx);
3051 * Initialize variable holding task enqueue requests
3052 * from MSI-X interrupts
3054 sc->task_requests = 0;
3056 /* Enable SDP & MSI-X interrupts based on adapter */
3057 ixgbe_config_gpie(sc);
3060 if (ifp->if_mtu > ETHERMTU) {
3061 /* aka IXGBE_MAXFRS on 82599 and newer */
3062 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3063 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3064 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3065 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3068 /* Now enable all the queues */
3069 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
3070 struct tx_ring *txr = &tx_que->txr;
3072 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3073 txdctl |= IXGBE_TXDCTL_ENABLE;
3074 /* Set WTHRESH to 8, burst writeback */
3075 txdctl |= (8 << 16);
3077 * When the internal queue falls below PTHRESH (32),
3078 * start prefetching as long as there are at least
3079 * HTHRESH (1) buffers ready. The values are taken
3080 * from the Intel linux driver 3.8.21.
3081 * Prefetching enables tx line rate even with 1 queue.
3083 txdctl |= (32 << 0) | (1 << 8);
3084 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3087 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
3088 struct rx_ring *rxr = &rx_que->rxr;
3090 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3091 if (hw->mac.type == ixgbe_mac_82598EB) {
3097 rxdctl &= ~0x3FFFFF;
3100 rxdctl |= IXGBE_RXDCTL_ENABLE;
3101 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3102 for (j = 0; j < 10; j++) {
3103 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3104 IXGBE_RXDCTL_ENABLE)
3112 /* Enable Receive engine */
3113 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3114 if (hw->mac.type == ixgbe_mac_82598EB)
3115 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3116 rxctrl |= IXGBE_RXCTRL_RXEN;
3117 ixgbe_enable_rx_dma(hw, rxctrl);
3119 /* Set up MSI/MSI-X routing */
3120 if (ixgbe_enable_msix) {
3121 ixgbe_configure_ivars(sc);
3122 /* Set up auto-mask */
3123 if (hw->mac.type == ixgbe_mac_82598EB)
3124 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3126 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3127 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3129 } else { /* Simple settings for Legacy/MSI */
3130 ixgbe_set_ivar(sc, 0, 0, 0);
3131 ixgbe_set_ivar(sc, 0, 0, 1);
3132 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3135 ixgbe_init_fdir(sc);
3138 * Check on any SFP devices that
3139 * need to be kick-started
3141 if (hw->phy.type == ixgbe_phy_none) {
3142 err = hw->phy.ops.identify(hw);
3143 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3145 "Unsupported SFP+ module type was detected.\n");
3150 /* Set moderation on the Link interrupt */
3151 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3153 /* Enable power to the phy. */
3154 ixgbe_set_phy_power(hw, true);
3156 /* Config/Enable Link */
3157 ixgbe_config_link(ctx);
3159 /* Hardware Packet Buffer & Flow Control setup */
3160 ixgbe_config_delay_values(sc);
3162 /* Initialize the FC settings */
3165 /* Set up VLAN support and filter */
3166 ixgbe_setup_vlan_hw_support(ctx);
3168 /* Setup DMA Coalescing */
3169 ixgbe_config_dmac(sc);
3171 /* And now turn on interrupts */
3172 ixgbe_if_enable_intr(ctx);
3174 /* Enable the use of the MBX by the VF's */
3175 if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3176 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3177 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3178 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3181 } /* ixgbe_init_locked */
3183 /************************************************************************
3186 * Setup the correct IVAR register for a particular MSI-X interrupt
3187 * (yes this is all very magic and confusing :)
3188 * - entry is the register array entry
3189 * - vector is the MSI-X vector for this queue
3190 * - type is RX/TX/MISC
3191 ************************************************************************/
3193 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3195 struct ixgbe_hw *hw = &sc->hw;
3198 vector |= IXGBE_IVAR_ALLOC_VAL;
3200 switch (hw->mac.type) {
3201 case ixgbe_mac_82598EB:
3203 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3205 entry += (type * 64);
3206 index = (entry >> 2) & 0x1F;
3207 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3208 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3209 ivar |= (vector << (8 * (entry & 0x3)));
3210 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3212 case ixgbe_mac_82599EB:
3213 case ixgbe_mac_X540:
3214 case ixgbe_mac_X550:
3215 case ixgbe_mac_X550EM_x:
3216 case ixgbe_mac_X550EM_a:
3217 if (type == -1) { /* MISC IVAR */
3218 index = (entry & 1) * 8;
3219 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3220 ivar &= ~(0xFF << index);
3221 ivar |= (vector << index);
3222 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3223 } else { /* RX/TX IVARS */
3224 index = (16 * (entry & 1)) + (8 * type);
3225 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3226 ivar &= ~(0xFF << index);
3227 ivar |= (vector << index);
3228 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3233 } /* ixgbe_set_ivar */
3235 /************************************************************************
3236 * ixgbe_configure_ivars
3237 ************************************************************************/
3239 ixgbe_configure_ivars(struct ixgbe_softc *sc)
3241 struct ix_rx_queue *rx_que = sc->rx_queues;
3242 struct ix_tx_queue *tx_que = sc->tx_queues;
3245 if (ixgbe_max_interrupt_rate > 0)
3246 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3249 * Disable DMA coalescing if interrupt moderation is
3256 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3257 struct rx_ring *rxr = &rx_que->rxr;
3259 /* First the RX queue entry */
3260 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3262 /* Set an Initial EITR value */
3263 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3265 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3266 struct tx_ring *txr = &tx_que->txr;
3268 /* ... and the TX */
3269 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3271 /* For the Link interrupt */
3272 ixgbe_set_ivar(sc, 1, sc->vector, -1);
3273 } /* ixgbe_configure_ivars */
3275 /************************************************************************
3277 ************************************************************************/
3279 ixgbe_config_gpie(struct ixgbe_softc *sc)
3281 struct ixgbe_hw *hw = &sc->hw;
3284 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3286 if (sc->intr_type == IFLIB_INTR_MSIX) {
3287 /* Enable Enhanced MSI-X mode */
3288 gpie |= IXGBE_GPIE_MSIX_MODE
3290 | IXGBE_GPIE_PBA_SUPPORT
3294 /* Fan Failure Interrupt */
3295 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3296 gpie |= IXGBE_SDP1_GPIEN;
3298 /* Thermal Sensor Interrupt */
3299 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3300 gpie |= IXGBE_SDP0_GPIEN_X540;
3302 /* Link detection */
3303 switch (hw->mac.type) {
3304 case ixgbe_mac_82599EB:
3305 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3307 case ixgbe_mac_X550EM_x:
3308 case ixgbe_mac_X550EM_a:
3309 gpie |= IXGBE_SDP0_GPIEN_X540;
3315 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3317 } /* ixgbe_config_gpie */
3319 /************************************************************************
3320 * ixgbe_config_delay_values
3322 * Requires sc->max_frame_size to be set.
3323 ************************************************************************/
3325 ixgbe_config_delay_values(struct ixgbe_softc *sc)
3327 struct ixgbe_hw *hw = &sc->hw;
3328 u32 rxpb, frame, size, tmp;
3330 frame = sc->max_frame_size;
3332 /* Calculate High Water */
3333 switch (hw->mac.type) {
3334 case ixgbe_mac_X540:
3335 case ixgbe_mac_X550:
3336 case ixgbe_mac_X550EM_x:
3337 case ixgbe_mac_X550EM_a:
3338 tmp = IXGBE_DV_X540(frame, frame);
3341 tmp = IXGBE_DV(frame, frame);
3344 size = IXGBE_BT2KB(tmp);
3345 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3346 hw->fc.high_water[0] = rxpb - size;
3348 /* Now calculate Low Water */
3349 switch (hw->mac.type) {
3350 case ixgbe_mac_X540:
3351 case ixgbe_mac_X550:
3352 case ixgbe_mac_X550EM_x:
3353 case ixgbe_mac_X550EM_a:
3354 tmp = IXGBE_LOW_DV_X540(frame);
3357 tmp = IXGBE_LOW_DV(frame);
3360 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3362 hw->fc.pause_time = IXGBE_FC_PAUSE;
3363 hw->fc.send_xon = true;
3364 } /* ixgbe_config_delay_values */
3366 /************************************************************************
3367 * ixgbe_set_multi - Multicast Update
3369 * Called whenever multicast address list is updated.
3370 ************************************************************************/
3372 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3374 struct ixgbe_softc *sc = arg;
3375 struct ixgbe_mc_addr *mta = sc->mta;
3377 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3379 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3380 mta[idx].vmdq = sc->pool;
3383 } /* ixgbe_mc_filter_apply */
3386 ixgbe_if_multi_set(if_ctx_t ctx)
3388 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3389 struct ixgbe_mc_addr *mta;
3390 struct ifnet *ifp = iflib_get_ifp(ctx);
3395 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3398 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3400 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc);
3402 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3403 update_ptr = (u8 *)mta;
3404 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3405 ixgbe_mc_array_itr, true);
3408 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3410 if (ifp->if_flags & IFF_PROMISC)
3411 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3412 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3413 ifp->if_flags & IFF_ALLMULTI) {
3414 fctrl |= IXGBE_FCTRL_MPE;
3415 fctrl &= ~IXGBE_FCTRL_UPE;
3417 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3419 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3420 } /* ixgbe_if_multi_set */
3422 /************************************************************************
3423 * ixgbe_mc_array_itr
3425 * An iterator function needed by the multicast shared code.
3426 * It feeds the shared code routine the addresses in the
3427 * array of ixgbe_set_multi() one by one.
3428 ************************************************************************/
3430 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3432 struct ixgbe_mc_addr *mta;
3434 mta = (struct ixgbe_mc_addr *)*update_ptr;
3437 *update_ptr = (u8*)(mta + 1);
3440 } /* ixgbe_mc_array_itr */
3442 /************************************************************************
3443 * ixgbe_local_timer - Timer routine
3445 * Checks for link status, updates statistics,
3446 * and runs the watchdog check.
3447 ************************************************************************/
3449 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3451 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3456 /* Check for pluggable optics */
3458 if (!ixgbe_sfp_probe(ctx))
3459 return; /* Nothing to do */
3461 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3463 /* Fire off the adminq task */
3464 iflib_admin_intr_deferred(ctx);
3466 } /* ixgbe_if_timer */
3468 /************************************************************************
3471 * Determine if a port had optics inserted.
3472 ************************************************************************/
3474 ixgbe_sfp_probe(if_ctx_t ctx)
3476 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3477 struct ixgbe_hw *hw = &sc->hw;
3478 device_t dev = iflib_get_dev(ctx);
3479 bool result = false;
3481 if ((hw->phy.type == ixgbe_phy_nl) &&
3482 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3483 s32 ret = hw->phy.ops.identify_sfp(hw);
3486 ret = hw->phy.ops.reset(hw);
3487 sc->sfp_probe = false;
3488 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3489 device_printf(dev, "Unsupported SFP+ module detected!");
3491 "Reload driver with supported module.\n");
3494 device_printf(dev, "SFP+ module detected!\n");
3495 /* We now have supported optics */
3501 } /* ixgbe_sfp_probe */
3503 /************************************************************************
3504 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3505 ************************************************************************/
3507 ixgbe_handle_mod(void *context)
3509 if_ctx_t ctx = context;
3510 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3511 struct ixgbe_hw *hw = &sc->hw;
3512 device_t dev = iflib_get_dev(ctx);
3513 u32 err, cage_full = 0;
3515 if (sc->hw.need_crosstalk_fix) {
3516 switch (hw->mac.type) {
3517 case ixgbe_mac_82599EB:
3518 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3521 case ixgbe_mac_X550EM_x:
3522 case ixgbe_mac_X550EM_a:
3523 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3531 goto handle_mod_out;
3534 err = hw->phy.ops.identify_sfp(hw);
3535 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3537 "Unsupported SFP+ module type was detected.\n");
3538 goto handle_mod_out;
3541 if (hw->mac.type == ixgbe_mac_82598EB)
3542 err = hw->phy.ops.reset(hw);
3544 err = hw->mac.ops.setup_sfp(hw);
3546 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3548 "Setup failure - unsupported SFP+ module type.\n");
3549 goto handle_mod_out;
3551 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3555 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3556 } /* ixgbe_handle_mod */
3559 /************************************************************************
3560 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3561 ************************************************************************/
3563 ixgbe_handle_msf(void *context)
3565 if_ctx_t ctx = context;
3566 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3567 struct ixgbe_hw *hw = &sc->hw;
3571 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3572 sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3574 autoneg = hw->phy.autoneg_advertised;
3575 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3576 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3577 if (hw->mac.ops.setup_link)
3578 hw->mac.ops.setup_link(hw, autoneg, true);
3580 /* Adjust media types shown in ifconfig */
3581 ifmedia_removeall(sc->media);
3582 ixgbe_add_media_types(sc->ctx);
3583 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3584 } /* ixgbe_handle_msf */
3586 /************************************************************************
3587 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3588 ************************************************************************/
3590 ixgbe_handle_phy(void *context)
3592 if_ctx_t ctx = context;
3593 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3594 struct ixgbe_hw *hw = &sc->hw;
3597 error = hw->phy.ops.handle_lasi(hw);
3598 if (error == IXGBE_ERR_OVERTEMP)
3599 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3601 device_printf(sc->dev,
3602 "Error handling LASI interrupt: %d\n", error);
3603 } /* ixgbe_handle_phy */
3605 /************************************************************************
3606 * ixgbe_if_stop - Stop the hardware
3608 * Disables all traffic on the adapter by issuing a
3609 * global reset on the MAC and deallocates TX/RX buffers.
3610 ************************************************************************/
3612 ixgbe_if_stop(if_ctx_t ctx)
3614 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3615 struct ixgbe_hw *hw = &sc->hw;
3617 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3620 hw->adapter_stopped = false;
3621 ixgbe_stop_adapter(hw);
3622 if (hw->mac.type == ixgbe_mac_82599EB)
3623 ixgbe_stop_mac_link_on_d3_82599(hw);
3624 /* Turn off the laser - noop with no optics */
3625 ixgbe_disable_tx_laser(hw);
3627 /* Update the stack */
3628 sc->link_up = false;
3629 ixgbe_if_update_admin_status(ctx);
3631 /* reprogram the RAR[0] in case user changed it. */
3632 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
3635 } /* ixgbe_if_stop */
3637 /************************************************************************
3638 * ixgbe_update_link_status - Update OS on link state
3640 * Note: Only updates the OS on the cached link state.
3641 * The real check of the hardware only happens with
3643 ************************************************************************/
3645 ixgbe_if_update_admin_status(if_ctx_t ctx)
3647 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3648 device_t dev = iflib_get_dev(ctx);
3651 if (sc->link_active == false) {
3653 device_printf(dev, "Link is up %d Gbps %s \n",
3654 ((sc->link_speed == 128) ? 10 : 1),
3656 sc->link_active = true;
3657 /* Update any Flow Control changes */
3658 ixgbe_fc_enable(&sc->hw);
3659 /* Update DMA coalescing config */
3660 ixgbe_config_dmac(sc);
3661 /* should actually be negotiated value */
3662 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3664 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3665 ixgbe_ping_all_vfs(sc);
3667 } else { /* Link down */
3668 if (sc->link_active == true) {
3670 device_printf(dev, "Link is Down\n");
3671 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3672 sc->link_active = false;
3673 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3674 ixgbe_ping_all_vfs(sc);
3678 /* Handle task requests from msix_link() */
3679 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
3680 ixgbe_handle_mod(ctx);
3681 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
3682 ixgbe_handle_msf(ctx);
3683 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
3684 ixgbe_handle_mbx(ctx);
3685 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
3686 ixgbe_reinit_fdir(ctx);
3687 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
3688 ixgbe_handle_phy(ctx);
3689 sc->task_requests = 0;
3691 ixgbe_update_stats_counters(sc);
3692 } /* ixgbe_if_update_admin_status */
3694 /************************************************************************
3695 * ixgbe_config_dmac - Configure DMA Coalescing
3696 ************************************************************************/
3698 ixgbe_config_dmac(struct ixgbe_softc *sc)
3700 struct ixgbe_hw *hw = &sc->hw;
3701 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3703 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3706 if (dcfg->watchdog_timer ^ sc->dmac ||
3707 dcfg->link_speed ^ sc->link_speed) {
3708 dcfg->watchdog_timer = sc->dmac;
3709 dcfg->fcoe_en = false;
3710 dcfg->link_speed = sc->link_speed;
3713 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3714 dcfg->watchdog_timer, dcfg->link_speed);
3716 hw->mac.ops.dmac_config(hw);
3718 } /* ixgbe_config_dmac */
3720 /************************************************************************
3721 * ixgbe_if_enable_intr
3722 ************************************************************************/
3724 ixgbe_if_enable_intr(if_ctx_t ctx)
3726 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3727 struct ixgbe_hw *hw = &sc->hw;
3728 struct ix_rx_queue *que = sc->rx_queues;
3731 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3733 switch (sc->hw.mac.type) {
3734 case ixgbe_mac_82599EB:
3735 mask |= IXGBE_EIMS_ECC;
3736 /* Temperature sensor on some scs */
3737 mask |= IXGBE_EIMS_GPI_SDP0;
3738 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3739 mask |= IXGBE_EIMS_GPI_SDP1;
3740 mask |= IXGBE_EIMS_GPI_SDP2;
3742 case ixgbe_mac_X540:
3743 /* Detect if Thermal Sensor is enabled */
3744 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3745 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3746 mask |= IXGBE_EIMS_TS;
3747 mask |= IXGBE_EIMS_ECC;
3749 case ixgbe_mac_X550:
3750 /* MAC thermal sensor is automatically enabled */
3751 mask |= IXGBE_EIMS_TS;
3752 mask |= IXGBE_EIMS_ECC;
3754 case ixgbe_mac_X550EM_x:
3755 case ixgbe_mac_X550EM_a:
3756 /* Some devices use SDP0 for important information */
3757 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3758 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3759 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3760 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3761 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3762 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3763 mask |= IXGBE_EICR_GPI_SDP0_X540;
3764 mask |= IXGBE_EIMS_ECC;
3770 /* Enable Fan Failure detection */
3771 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3772 mask |= IXGBE_EIMS_GPI_SDP1;
3774 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3775 mask |= IXGBE_EIMS_MAILBOX;
3776 /* Enable Flow Director */
3777 if (sc->feat_en & IXGBE_FEATURE_FDIR)
3778 mask |= IXGBE_EIMS_FLOW_DIR;
3780 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3782 /* With MSI-X we use auto clear */
3783 if (sc->intr_type == IFLIB_INTR_MSIX) {
3784 mask = IXGBE_EIMS_ENABLE_MASK;
3785 /* Don't autoclear Link */
3786 mask &= ~IXGBE_EIMS_OTHER;
3787 mask &= ~IXGBE_EIMS_LSC;
3788 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
3789 mask &= ~IXGBE_EIMS_MAILBOX;
3790 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3794 * Now enable all queues, this is done separately to
3795 * allow for handling the extended (beyond 32) MSI-X
3796 * vectors that can be used by 82599
3798 for (int i = 0; i < sc->num_rx_queues; i++, que++)
3799 ixgbe_enable_queue(sc, que->msix);
3801 IXGBE_WRITE_FLUSH(hw);
3803 } /* ixgbe_if_enable_intr */
3805 /************************************************************************
3806 * ixgbe_disable_intr
3807 ************************************************************************/
3809 ixgbe_if_disable_intr(if_ctx_t ctx)
3811 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3813 if (sc->intr_type == IFLIB_INTR_MSIX)
3814 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3815 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3816 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3818 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3819 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3820 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3822 IXGBE_WRITE_FLUSH(&sc->hw);
3824 } /* ixgbe_if_disable_intr */
3826 /************************************************************************
3827 * ixgbe_link_intr_enable
3828 ************************************************************************/
3830 ixgbe_link_intr_enable(if_ctx_t ctx)
3832 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
3834 /* Re-enable other interrupts */
3835 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3836 } /* ixgbe_link_intr_enable */
3838 /************************************************************************
3839 * ixgbe_if_rx_queue_intr_enable
3840 ************************************************************************/
3842 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3844 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3845 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
3847 ixgbe_enable_queue(sc, que->msix);
3850 } /* ixgbe_if_rx_queue_intr_enable */
3852 /************************************************************************
3853 * ixgbe_enable_queue
3854 ************************************************************************/
3856 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
3858 struct ixgbe_hw *hw = &sc->hw;
3859 u64 queue = 1ULL << vector;
3862 if (hw->mac.type == ixgbe_mac_82598EB) {
3863 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3864 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3866 mask = (queue & 0xFFFFFFFF);
3868 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3869 mask = (queue >> 32);
3871 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3873 } /* ixgbe_enable_queue */
3875 /************************************************************************
3876 * ixgbe_disable_queue
3877 ************************************************************************/
3879 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
3881 struct ixgbe_hw *hw = &sc->hw;
3882 u64 queue = 1ULL << vector;
3885 if (hw->mac.type == ixgbe_mac_82598EB) {
3886 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3887 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3889 mask = (queue & 0xFFFFFFFF);
3891 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3892 mask = (queue >> 32);
3894 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3896 } /* ixgbe_disable_queue */
3898 /************************************************************************
3899 * ixgbe_intr - Legacy Interrupt Service Routine
3900 ************************************************************************/
3902 ixgbe_intr(void *arg)
3904 struct ixgbe_softc *sc = arg;
3905 struct ix_rx_queue *que = sc->rx_queues;
3906 struct ixgbe_hw *hw = &sc->hw;
3907 if_ctx_t ctx = sc->ctx;
3908 u32 eicr, eicr_mask;
3910 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3914 ixgbe_if_enable_intr(ctx);
3915 return (FILTER_HANDLED);
3918 /* Check for fan failure */
3919 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3920 (eicr & IXGBE_EICR_GPI_SDP1)) {
3921 device_printf(sc->dev,
3922 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3923 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3926 /* Link status change */
3927 if (eicr & IXGBE_EICR_LSC) {
3928 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3929 iflib_admin_intr_deferred(ctx);
3932 if (ixgbe_is_sfp(hw)) {
3933 /* Pluggable optics-related interrupt */
3934 if (hw->mac.type >= ixgbe_mac_X540)
3935 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3937 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3939 if (eicr & eicr_mask) {
3940 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3941 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
3944 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3945 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3946 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3947 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3948 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3952 /* External PHY interrupt */
3953 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3954 (eicr & IXGBE_EICR_GPI_SDP0_X540))
3955 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
3957 return (FILTER_SCHEDULE_THREAD);
3960 /************************************************************************
3961 * ixgbe_free_pci_resources
3962 ************************************************************************/
3964 ixgbe_free_pci_resources(if_ctx_t ctx)
3966 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3967 struct ix_rx_queue *que = sc->rx_queues;
3968 device_t dev = iflib_get_dev(ctx);
3970 /* Release all MSI-X queue resources */
3971 if (sc->intr_type == IFLIB_INTR_MSIX)
3972 iflib_irq_free(ctx, &sc->irq);
3975 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
3976 iflib_irq_free(ctx, &que->que_irq);
3980 if (sc->pci_mem != NULL)
3981 bus_release_resource(dev, SYS_RES_MEMORY,
3982 rman_get_rid(sc->pci_mem), sc->pci_mem);
3983 } /* ixgbe_free_pci_resources */
3985 /************************************************************************
3986 * ixgbe_sysctl_flowcntl
3988 * SYSCTL wrapper around setting Flow Control
3989 ************************************************************************/
3991 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3993 struct ixgbe_softc *sc;
3996 sc = (struct ixgbe_softc *)arg1;
3997 fc = sc->hw.fc.current_mode;
3999 error = sysctl_handle_int(oidp, &fc, 0, req);
4000 if ((error) || (req->newptr == NULL))
4003 /* Don't bother if it's not changed */
4004 if (fc == sc->hw.fc.current_mode)
4007 return ixgbe_set_flowcntl(sc, fc);
4008 } /* ixgbe_sysctl_flowcntl */
4010 /************************************************************************
4011 * ixgbe_set_flowcntl - Set flow control
4013 * Flow control values:
4018 ************************************************************************/
4020 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
4023 case ixgbe_fc_rx_pause:
4024 case ixgbe_fc_tx_pause:
4026 sc->hw.fc.requested_mode = fc;
4027 if (sc->num_rx_queues > 1)
4028 ixgbe_disable_rx_drop(sc);
4031 sc->hw.fc.requested_mode = ixgbe_fc_none;
4032 if (sc->num_rx_queues > 1)
4033 ixgbe_enable_rx_drop(sc);
4039 /* Don't autoneg if forcing a value */
4040 sc->hw.fc.disable_fc_autoneg = true;
4041 ixgbe_fc_enable(&sc->hw);
4044 } /* ixgbe_set_flowcntl */
4046 /************************************************************************
4047 * ixgbe_enable_rx_drop
4049 * Enable the hardware to drop packets when the buffer is
4050 * full. This is useful with multiqueue, so that no single
4051 * queue being full stalls the entire RX engine. We only
4052 * enable this when Multiqueue is enabled AND Flow Control
4054 ************************************************************************/
4056 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
4058 struct ixgbe_hw *hw = &sc->hw;
4059 struct rx_ring *rxr;
4062 for (int i = 0; i < sc->num_rx_queues; i++) {
4063 rxr = &sc->rx_queues[i].rxr;
4064 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4065 srrctl |= IXGBE_SRRCTL_DROP_EN;
4066 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4069 /* enable drop for each vf */
4070 for (int i = 0; i < sc->num_vfs; i++) {
4071 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4072 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4075 } /* ixgbe_enable_rx_drop */
4077 /************************************************************************
4078 * ixgbe_disable_rx_drop
4079 ************************************************************************/
4081 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
4083 struct ixgbe_hw *hw = &sc->hw;
4084 struct rx_ring *rxr;
4087 for (int i = 0; i < sc->num_rx_queues; i++) {
4088 rxr = &sc->rx_queues[i].rxr;
4089 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4090 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4091 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4094 /* disable drop for each vf */
4095 for (int i = 0; i < sc->num_vfs; i++) {
4096 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4097 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4099 } /* ixgbe_disable_rx_drop */
4101 /************************************************************************
4102 * ixgbe_sysctl_advertise
4104 * SYSCTL wrapper around setting advertised speed
4105 ************************************************************************/
4107 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4109 struct ixgbe_softc *sc;
4110 int error, advertise;
4112 sc = (struct ixgbe_softc *)arg1;
4113 advertise = sc->advertise;
4115 error = sysctl_handle_int(oidp, &advertise, 0, req);
4116 if ((error) || (req->newptr == NULL))
4119 return ixgbe_set_advertise(sc, advertise);
4120 } /* ixgbe_sysctl_advertise */
4122 /************************************************************************
4123 * ixgbe_set_advertise - Control advertised link speed
4126 * 0x1 - advertise 100 Mb
4127 * 0x2 - advertise 1G
4128 * 0x4 - advertise 10G
4129 * 0x8 - advertise 10 Mb (yes, Mb)
4130 * 0x10 - advertise 2.5G (disabled by default)
4131 * 0x20 - advertise 5G (disabled by default)
4133 ************************************************************************/
4135 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
4137 device_t dev = iflib_get_dev(sc->ctx);
4138 struct ixgbe_hw *hw;
4139 ixgbe_link_speed speed = 0;
4140 ixgbe_link_speed link_caps = 0;
4141 s32 err = IXGBE_NOT_IMPLEMENTED;
4142 bool negotiate = false;
4144 /* Checks to validate new value */
4145 if (sc->advertise == advertise) /* no change */
4150 /* No speed changes for backplane media */
4151 if (hw->phy.media_type == ixgbe_media_type_backplane)
4154 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4155 (hw->phy.multispeed_fiber))) {
4156 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4160 if (advertise < 0x1 || advertise > 0x3F) {
4161 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n");
4165 if (hw->mac.ops.get_link_capabilities) {
4166 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4168 if (err != IXGBE_SUCCESS) {
4169 device_printf(dev, "Unable to determine supported advertise speeds\n");
4174 /* Set new value and report new advertised mode */
4175 if (advertise & 0x1) {
4176 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4177 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4180 speed |= IXGBE_LINK_SPEED_100_FULL;
4182 if (advertise & 0x2) {
4183 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4184 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4187 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4189 if (advertise & 0x4) {
4190 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4191 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4194 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4196 if (advertise & 0x8) {
4197 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4198 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4201 speed |= IXGBE_LINK_SPEED_10_FULL;
4203 if (advertise & 0x10) {
4204 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
4205 device_printf(dev, "Interface does not support 2.5G advertised speed\n");
4208 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
4210 if (advertise & 0x20) {
4211 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
4212 device_printf(dev, "Interface does not support 5G advertised speed\n");
4215 speed |= IXGBE_LINK_SPEED_5GB_FULL;
4218 hw->mac.autotry_restart = true;
4219 hw->mac.ops.setup_link(hw, speed, true);
4220 sc->advertise = advertise;
4223 } /* ixgbe_set_advertise */
4225 /************************************************************************
4226 * ixgbe_get_default_advertise - Get default advertised speed settings
4228 * Formatted for sysctl usage.
4230 * 0x1 - advertise 100 Mb
4231 * 0x2 - advertise 1G
4232 * 0x4 - advertise 10G
4233 * 0x8 - advertise 10 Mb (yes, Mb)
4234 * 0x10 - advertise 2.5G (disabled by default)
4235 * 0x20 - advertise 5G (disabled by default)
4236 ************************************************************************/
4238 ixgbe_get_default_advertise(struct ixgbe_softc *sc)
4240 struct ixgbe_hw *hw = &sc->hw;
4242 ixgbe_link_speed link_caps = 0;
4244 bool negotiate = false;
4247 * Advertised speed means nothing unless it's copper or
4250 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4251 !(hw->phy.multispeed_fiber))
4254 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4255 if (err != IXGBE_SUCCESS)
4258 if (hw->mac.type == ixgbe_mac_X550) {
4260 * 2.5G and 5G autonegotiation speeds on X550
4261 * are disabled by default due to reported
4262 * interoperability issues with some switches.
4264 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
4265 IXGBE_LINK_SPEED_5GB_FULL);
4269 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
4270 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
4271 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
4272 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
4273 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
4274 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
4277 } /* ixgbe_get_default_advertise */
4279 /************************************************************************
4280 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4283 * 0/1 - off / on (use default value of 1000)
4285 * Legal timer values are:
4286 * 50,100,250,500,1000,2000,5000,10000
4288 * Turning off interrupt moderation will also turn this off.
4289 ************************************************************************/
4291 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4293 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4294 struct ifnet *ifp = iflib_get_ifp(sc->ctx);
4299 error = sysctl_handle_16(oidp, &newval, 0, req);
4300 if ((error) || (req->newptr == NULL))
4309 /* Enable and use default */
4320 /* Legal values - allow */
4324 /* Do nothing, illegal value */
4328 /* Re-initialize hardware if it's already running */
4329 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4333 } /* ixgbe_sysctl_dmac */
4336 /************************************************************************
4337 * ixgbe_sysctl_power_state
4339 * Sysctl to test power states
4341 * 0 - set device to D0
4342 * 3 - set device to D3
4343 * (none) - get current device power state
4344 ************************************************************************/
4346 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4348 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4349 device_t dev = sc->dev;
4350 int curr_ps, new_ps, error = 0;
4352 curr_ps = new_ps = pci_get_powerstate(dev);
4354 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4355 if ((error) || (req->newptr == NULL))
4358 if (new_ps == curr_ps)
4361 if (new_ps == 3 && curr_ps == 0)
4362 error = DEVICE_SUSPEND(dev);
4363 else if (new_ps == 0 && curr_ps == 3)
4364 error = DEVICE_RESUME(dev);
4368 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4371 } /* ixgbe_sysctl_power_state */
4374 /************************************************************************
4375 * ixgbe_sysctl_wol_enable
4377 * Sysctl to enable/disable the WoL capability,
4378 * if supported by the adapter.
4383 ************************************************************************/
4385 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4387 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4388 struct ixgbe_hw *hw = &sc->hw;
4389 int new_wol_enabled;
4392 new_wol_enabled = hw->wol_enabled;
4393 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4394 if ((error) || (req->newptr == NULL))
4396 new_wol_enabled = !!(new_wol_enabled);
4397 if (new_wol_enabled == hw->wol_enabled)
4400 if (new_wol_enabled > 0 && !sc->wol_support)
4403 hw->wol_enabled = new_wol_enabled;
4406 } /* ixgbe_sysctl_wol_enable */
4408 /************************************************************************
4409 * ixgbe_sysctl_wufc - Wake Up Filter Control
4411 * Sysctl to enable/disable the types of packets that the
4412 * adapter will wake up on upon receipt.
4414 * 0x1 - Link Status Change
4415 * 0x2 - Magic Packet
4416 * 0x4 - Direct Exact
4417 * 0x8 - Directed Multicast
4419 * 0x20 - ARP/IPv4 Request Packet
4420 * 0x40 - Direct IPv4 Packet
4421 * 0x80 - Direct IPv6 Packet
4423 * Settings not listed above will cause the sysctl to return an error.
4424 ************************************************************************/
4426 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4428 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4432 new_wufc = sc->wufc;
4434 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4435 if ((error) || (req->newptr == NULL))
4437 if (new_wufc == sc->wufc)
4440 if (new_wufc & 0xffffff00)
4444 new_wufc |= (0xffffff & sc->wufc);
4445 sc->wufc = new_wufc;
4448 } /* ixgbe_sysctl_wufc */
4451 /************************************************************************
4452 * ixgbe_sysctl_print_rss_config
4453 ************************************************************************/
4455 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4457 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4458 struct ixgbe_hw *hw = &sc->hw;
4459 device_t dev = sc->dev;
4461 int error = 0, reta_size;
4464 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4466 device_printf(dev, "Could not allocate sbuf for output.\n");
4470 // TODO: use sbufs to make a string to print out
4471 /* Set multiplier for RETA setup and table size based on MAC */
4472 switch (sc->hw.mac.type) {
4473 case ixgbe_mac_X550:
4474 case ixgbe_mac_X550EM_x:
4475 case ixgbe_mac_X550EM_a:
4483 /* Print out the redirection table */
4484 sbuf_cat(buf, "\n");
4485 for (int i = 0; i < reta_size; i++) {
4487 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4488 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4490 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4491 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4495 // TODO: print more config
4497 error = sbuf_finish(buf);
4499 device_printf(dev, "Error finishing sbuf: %d\n", error);
4504 } /* ixgbe_sysctl_print_rss_config */
4505 #endif /* IXGBE_DEBUG */
4507 /************************************************************************
4508 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4510 * For X552/X557-AT devices using an external PHY
4511 ************************************************************************/
4513 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4515 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4516 struct ixgbe_hw *hw = &sc->hw;
4519 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4520 device_printf(iflib_get_dev(sc->ctx),
4521 "Device has no supported external thermal sensor.\n");
4525 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4526 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4527 device_printf(iflib_get_dev(sc->ctx),
4528 "Error reading from PHY's current temperature register\n");
4532 /* Shift temp for output */
4535 return (sysctl_handle_16(oidp, NULL, reg, req));
4536 } /* ixgbe_sysctl_phy_temp */
4538 /************************************************************************
4539 * ixgbe_sysctl_phy_overtemp_occurred
4541 * Reports (directly from the PHY) whether the current PHY
4542 * temperature is over the overtemp threshold.
4543 ************************************************************************/
4545 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4547 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4548 struct ixgbe_hw *hw = &sc->hw;
4551 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4552 device_printf(iflib_get_dev(sc->ctx),
4553 "Device has no supported external thermal sensor.\n");
4557 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4558 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4559 device_printf(iflib_get_dev(sc->ctx),
4560 "Error reading from PHY's temperature status register\n");
4564 /* Get occurrence bit */
4565 reg = !!(reg & 0x4000);
4567 return (sysctl_handle_16(oidp, 0, reg, req));
4568 } /* ixgbe_sysctl_phy_overtemp_occurred */
4570 /************************************************************************
4571 * ixgbe_sysctl_eee_state
4573 * Sysctl to set EEE power saving feature
4577 * (none) - get current device EEE state
4578 ************************************************************************/
4580 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4582 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4583 device_t dev = sc->dev;
4584 struct ifnet *ifp = iflib_get_ifp(sc->ctx);
4585 int curr_eee, new_eee, error = 0;
4588 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
4590 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4591 if ((error) || (req->newptr == NULL))
4595 if (new_eee == curr_eee)
4599 if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
4602 /* Bounds checking */
4603 if ((new_eee < 0) || (new_eee > 1))
4606 retval = ixgbe_setup_eee(&sc->hw, new_eee);
4608 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4612 /* Restart auto-neg */
4615 device_printf(dev, "New EEE state: %d\n", new_eee);
4617 /* Cache new value */
4619 sc->feat_en |= IXGBE_FEATURE_EEE;
4621 sc->feat_en &= ~IXGBE_FEATURE_EEE;
4624 } /* ixgbe_sysctl_eee_state */
4626 /************************************************************************
4627 * ixgbe_init_device_features
4628 ************************************************************************/
4630 ixgbe_init_device_features(struct ixgbe_softc *sc)
4632 sc->feat_cap = IXGBE_FEATURE_NETMAP
4635 | IXGBE_FEATURE_MSIX
4636 | IXGBE_FEATURE_LEGACY_IRQ;
4638 /* Set capabilities first... */
4639 switch (sc->hw.mac.type) {
4640 case ixgbe_mac_82598EB:
4641 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
4642 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4644 case ixgbe_mac_X540:
4645 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4646 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4647 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4648 (sc->hw.bus.func == 0))
4649 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4651 case ixgbe_mac_X550:
4652 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4653 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4654 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4656 case ixgbe_mac_X550EM_x:
4657 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4658 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4660 case ixgbe_mac_X550EM_a:
4661 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4662 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4663 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4664 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4665 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4666 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4667 sc->feat_cap |= IXGBE_FEATURE_EEE;
4670 case ixgbe_mac_82599EB:
4671 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4672 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4673 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4674 (sc->hw.bus.func == 0))
4675 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4676 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4677 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4683 /* Enabled by default... */
4684 /* Fan failure detection */
4685 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4686 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4688 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
4689 sc->feat_en |= IXGBE_FEATURE_NETMAP;
4691 if (sc->feat_cap & IXGBE_FEATURE_EEE)
4692 sc->feat_en |= IXGBE_FEATURE_EEE;
4693 /* Thermal Sensor */
4694 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4695 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4697 /* Enabled via global sysctl... */
4699 if (ixgbe_enable_fdir) {
4700 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
4701 sc->feat_en |= IXGBE_FEATURE_FDIR;
4703 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled.");
4706 * Message Signal Interrupts - Extended (MSI-X)
4707 * Normal MSI is only enabled if MSI-X calls fail.
4709 if (!ixgbe_enable_msix)
4710 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
4711 /* Receive-Side Scaling (RSS) */
4712 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4713 sc->feat_en |= IXGBE_FEATURE_RSS;
4715 /* Disable features with unmet dependencies... */
4717 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
4718 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
4719 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4720 sc->feat_en &= ~IXGBE_FEATURE_RSS;
4721 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
4723 } /* ixgbe_init_device_features */
4725 /************************************************************************
4726 * ixgbe_check_fan_failure
4727 ************************************************************************/
4729 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
4733 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
4737 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4738 } /* ixgbe_check_fan_failure */
4740 /************************************************************************
4741 * ixgbe_sbuf_fw_version
4742 ************************************************************************/
4744 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
4746 struct ixgbe_nvm_version nvm_ver = {0};
4749 const char *space = "";
4751 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
4752 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
4753 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */
4754 status = ixgbe_get_phy_firmware_version(hw, &phyfw);
4756 if (nvm_ver.oem_valid) {
4757 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
4758 nvm_ver.oem_minor, nvm_ver.oem_release);
4762 if (nvm_ver.or_valid) {
4763 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
4764 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
4768 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
4770 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
4774 if (phyfw != 0 && status == IXGBE_SUCCESS)
4775 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw);
4776 } /* ixgbe_sbuf_fw_version */
4778 /************************************************************************
4779 * ixgbe_print_fw_version
4780 ************************************************************************/
4782 ixgbe_print_fw_version(if_ctx_t ctx)
4784 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4785 struct ixgbe_hw *hw = &sc->hw;
4786 device_t dev = sc->dev;
4790 buf = sbuf_new_auto();
4792 device_printf(dev, "Could not allocate sbuf for output.\n");
4796 ixgbe_sbuf_fw_version(hw, buf);
4798 error = sbuf_finish(buf);
4800 device_printf(dev, "Error finishing sbuf: %d\n", error);
4801 else if (sbuf_len(buf))
4802 device_printf(dev, "%s\n", sbuf_data(buf));
4805 } /* ixgbe_print_fw_version */
4807 /************************************************************************
4808 * ixgbe_sysctl_print_fw_version
4809 ************************************************************************/
4811 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
4813 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4814 struct ixgbe_hw *hw = &sc->hw;
4815 device_t dev = sc->dev;
4819 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4821 device_printf(dev, "Could not allocate sbuf for output.\n");
4825 ixgbe_sbuf_fw_version(hw, buf);
4827 error = sbuf_finish(buf);
4829 device_printf(dev, "Error finishing sbuf: %d\n", error);
4834 } /* ixgbe_sysctl_print_fw_version */