1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
40 #include "ixgbe_sriov.h"
43 #include <net/netmap.h>
44 #include <dev/netmap/netmap_kern.h>
46 /************************************************************************
48 ************************************************************************/
49 char ixgbe_driver_version[] = "4.0.1-k";
51 /************************************************************************
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixgbe_strings
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 ************************************************************************/
60 static pci_vendor_info_t ixgbe_vendor_info_array[] =
62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"),
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"),
70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"),
73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"),
75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"),
77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"),
79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"),
81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"),
85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"),
87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"),
91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"),
93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
106 /* required last entry */
110 static void *ixgbe_register(device_t);
111 static int ixgbe_if_attach_pre(if_ctx_t);
112 static int ixgbe_if_attach_post(if_ctx_t);
113 static int ixgbe_if_detach(if_ctx_t);
114 static int ixgbe_if_shutdown(if_ctx_t);
115 static int ixgbe_if_suspend(if_ctx_t);
116 static int ixgbe_if_resume(if_ctx_t);
118 static void ixgbe_if_stop(if_ctx_t);
119 void ixgbe_if_enable_intr(if_ctx_t);
120 static void ixgbe_if_disable_intr(if_ctx_t);
121 static void ixgbe_link_intr_enable(if_ctx_t);
122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
124 static int ixgbe_if_media_change(if_ctx_t);
125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t);
127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
128 static void ixgbe_if_multi_set(if_ctx_t);
129 static int ixgbe_if_promisc_set(if_ctx_t, int);
130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
132 static void ixgbe_if_queues_free(if_ctx_t);
133 static void ixgbe_if_timer(if_ctx_t, uint16_t);
134 static void ixgbe_if_update_admin_status(if_ctx_t);
135 static void ixgbe_if_vlan_register(if_ctx_t, u16);
136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
139 int ixgbe_intr(void *);
141 /************************************************************************
142 * Function prototypes
143 ************************************************************************/
144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
148 static void ixgbe_add_device_sysctls(if_ctx_t);
149 static int ixgbe_allocate_pci_resources(if_ctx_t);
150 static int ixgbe_setup_low_power_mode(if_ctx_t);
152 static void ixgbe_config_dmac(struct ixgbe_softc *);
153 static void ixgbe_configure_ivars(struct ixgbe_softc *);
154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
156 static bool ixgbe_sfp_probe(if_ctx_t);
158 static void ixgbe_free_pci_resources(if_ctx_t);
160 static int ixgbe_msix_link(void *);
161 static int ixgbe_msix_que(void *);
162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
163 static void ixgbe_initialize_receive_units(if_ctx_t);
164 static void ixgbe_initialize_transmit_units(if_ctx_t);
166 static int ixgbe_setup_interface(if_ctx_t);
167 static void ixgbe_init_device_features(struct ixgbe_softc *);
168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
170 static void ixgbe_print_fw_version(if_ctx_t);
171 static void ixgbe_add_media_types(if_ctx_t);
172 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
173 static void ixgbe_config_link(if_ctx_t);
174 static void ixgbe_get_slot_info(struct ixgbe_softc *);
175 static void ixgbe_check_wol_support(struct ixgbe_softc *);
176 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
177 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
179 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
180 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
181 static int ixgbe_set_advertise(struct ixgbe_softc *, int);
182 static int ixgbe_get_default_advertise(struct ixgbe_softc *);
183 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
184 static void ixgbe_config_gpie(struct ixgbe_softc *);
185 static void ixgbe_config_delay_values(struct ixgbe_softc *);
187 /* Sysctl handlers */
188 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
189 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
191 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
192 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
193 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
194 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
196 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
197 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
199 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
200 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
201 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
202 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
203 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
204 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
205 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
207 /* Deferred interrupt tasklets */
208 static void ixgbe_handle_msf(void *);
209 static void ixgbe_handle_mod(void *);
210 static void ixgbe_handle_phy(void *);
212 /************************************************************************
213 * FreeBSD Device Interface Entry Points
214 ************************************************************************/
215 static device_method_t ix_methods[] = {
216 /* Device interface */
217 DEVMETHOD(device_register, ixgbe_register),
218 DEVMETHOD(device_probe, iflib_device_probe),
219 DEVMETHOD(device_attach, iflib_device_attach),
220 DEVMETHOD(device_detach, iflib_device_detach),
221 DEVMETHOD(device_shutdown, iflib_device_shutdown),
222 DEVMETHOD(device_suspend, iflib_device_suspend),
223 DEVMETHOD(device_resume, iflib_device_resume),
225 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
226 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
227 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
232 static driver_t ix_driver = {
233 "ix", ix_methods, sizeof(struct ixgbe_softc),
236 devclass_t ix_devclass;
237 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
238 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
239 MODULE_DEPEND(ix, pci, 1, 1, 1);
240 MODULE_DEPEND(ix, ether, 1, 1, 1);
241 MODULE_DEPEND(ix, iflib, 1, 1, 1);
243 static device_method_t ixgbe_if_methods[] = {
244 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
245 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
246 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
247 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
248 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
249 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
250 DEVMETHOD(ifdi_init, ixgbe_if_init),
251 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
252 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
253 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
254 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
255 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
256 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
257 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
259 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
260 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
261 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
262 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
263 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
264 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
265 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
266 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
267 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
268 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
269 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
270 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
271 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
272 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
273 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
275 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
276 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
277 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
283 * TUNEABLE PARAMETERS:
286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
287 "IXGBE driver parameters");
288 static driver_t ixgbe_if_driver = {
289 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
292 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
293 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
294 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
296 /* Flow control setting, default to full */
297 static int ixgbe_flow_control = ixgbe_fc_full;
298 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
299 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
301 /* Advertise Speed, default to 0 (auto) */
302 static int ixgbe_advertise_speed = 0;
303 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
304 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
307 * Smart speed setting, default to on
308 * this only works as a compile option
309 * right now as its during attach, set
310 * this to 'ixgbe_smart_speed_off' to
313 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
316 * MSI-X should be the default for best performance,
317 * but this allows it to be forced off for testing.
319 static int ixgbe_enable_msix = 1;
320 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
321 "Enable MSI-X interrupts");
324 * Defining this on will allow the use
325 * of unsupported SFP+ modules, note that
326 * doing so you are on your own :)
328 static int allow_unsupported_sfp = false;
329 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
330 &allow_unsupported_sfp, 0,
331 "Allow unsupported SFP modules...use at your own risk");
334 * Not sure if Flow Director is fully baked,
335 * so we'll default to turning it off.
337 static int ixgbe_enable_fdir = 0;
338 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
339 "Enable Flow Director");
341 /* Receive-Side Scaling */
342 static int ixgbe_enable_rss = 1;
343 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
344 "Enable Receive-Side Scaling (RSS)");
347 * AIM: Adaptive Interrupt Moderation
348 * which means that the interrupt rate
349 * is varied over time based on the
350 * traffic for that interrupt vector
352 static int ixgbe_enable_aim = false;
353 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
354 "Enable adaptive interrupt moderation");
357 /* Keep running tab on them for sanity check */
358 static int ixgbe_total_ports;
361 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
364 * For Flow Director: this is the number of TX packets we sample
365 * for the filter pool, this means every 20th packet will be probed.
367 * This feature can be disabled by setting this to 0.
369 static int atr_sample_rate = 20;
371 extern struct if_txrx ixgbe_txrx;
373 static struct if_shared_ctx ixgbe_sctx_init = {
374 .isc_magic = IFLIB_MAGIC,
375 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
376 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
377 .isc_tx_maxsegsize = PAGE_SIZE,
378 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
379 .isc_tso_maxsegsize = PAGE_SIZE,
380 .isc_rx_maxsize = PAGE_SIZE*4,
381 .isc_rx_nsegments = 1,
382 .isc_rx_maxsegsize = PAGE_SIZE*4,
387 .isc_admin_intrcnt = 1,
388 .isc_vendor_info = ixgbe_vendor_info_array,
389 .isc_driver_version = ixgbe_driver_version,
390 .isc_driver = &ixgbe_if_driver,
391 .isc_flags = IFLIB_TSO_INIT_IP,
393 .isc_nrxd_min = {MIN_RXD},
394 .isc_ntxd_min = {MIN_TXD},
395 .isc_nrxd_max = {MAX_RXD},
396 .isc_ntxd_max = {MAX_TXD},
397 .isc_nrxd_default = {DEFAULT_RXD},
398 .isc_ntxd_default = {DEFAULT_TXD},
401 /************************************************************************
402 * ixgbe_if_tx_queues_alloc
403 ************************************************************************/
405 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
406 int ntxqs, int ntxqsets)
408 struct ixgbe_softc *sc = iflib_get_softc(ctx);
409 if_softc_ctx_t scctx = sc->shared;
410 struct ix_tx_queue *que;
413 MPASS(sc->num_tx_queues > 0);
414 MPASS(sc->num_tx_queues == ntxqsets);
417 /* Allocate queue structure memory */
419 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
420 M_IXGBE, M_NOWAIT | M_ZERO);
421 if (!sc->tx_queues) {
422 device_printf(iflib_get_dev(ctx),
423 "Unable to allocate TX ring memory\n");
427 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
428 struct tx_ring *txr = &que->txr;
430 /* In case SR-IOV is enabled, align the index properly */
431 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
434 txr->sc = que->sc = sc;
436 /* Allocate report status array */
437 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
438 if (txr->tx_rsq == NULL) {
442 for (j = 0; j < scctx->isc_ntxd[0]; j++)
443 txr->tx_rsq[j] = QIDX_INVALID;
444 /* get the virtual and physical address of the hardware queues */
445 txr->tail = IXGBE_TDT(txr->me);
446 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
447 txr->tx_paddr = paddrs[i];
450 txr->total_packets = 0;
452 /* Set the rate at which we sample packets */
453 if (sc->feat_en & IXGBE_FEATURE_FDIR)
454 txr->atr_sample = atr_sample_rate;
458 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
464 ixgbe_if_queues_free(ctx);
467 } /* ixgbe_if_tx_queues_alloc */
469 /************************************************************************
470 * ixgbe_if_rx_queues_alloc
471 ************************************************************************/
473 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
474 int nrxqs, int nrxqsets)
476 struct ixgbe_softc *sc = iflib_get_softc(ctx);
477 struct ix_rx_queue *que;
480 MPASS(sc->num_rx_queues > 0);
481 MPASS(sc->num_rx_queues == nrxqsets);
484 /* Allocate queue structure memory */
486 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
487 M_IXGBE, M_NOWAIT | M_ZERO);
488 if (!sc->rx_queues) {
489 device_printf(iflib_get_dev(ctx),
490 "Unable to allocate TX ring memory\n");
494 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
495 struct rx_ring *rxr = &que->rxr;
497 /* In case SR-IOV is enabled, align the index properly */
498 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
501 rxr->sc = que->sc = sc;
503 /* get the virtual and physical address of the hw queues */
504 rxr->tail = IXGBE_RDT(rxr->me);
505 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
506 rxr->rx_paddr = paddrs[i];
511 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
515 } /* ixgbe_if_rx_queues_alloc */
517 /************************************************************************
518 * ixgbe_if_queues_free
519 ************************************************************************/
521 ixgbe_if_queues_free(if_ctx_t ctx)
523 struct ixgbe_softc *sc = iflib_get_softc(ctx);
524 struct ix_tx_queue *tx_que = sc->tx_queues;
525 struct ix_rx_queue *rx_que = sc->rx_queues;
528 if (tx_que != NULL) {
529 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
530 struct tx_ring *txr = &tx_que->txr;
531 if (txr->tx_rsq == NULL)
534 free(txr->tx_rsq, M_IXGBE);
538 free(sc->tx_queues, M_IXGBE);
539 sc->tx_queues = NULL;
541 if (rx_que != NULL) {
542 free(sc->rx_queues, M_IXGBE);
543 sc->rx_queues = NULL;
545 } /* ixgbe_if_queues_free */
547 /************************************************************************
548 * ixgbe_initialize_rss_mapping
549 ************************************************************************/
551 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
553 struct ixgbe_hw *hw = &sc->hw;
554 u32 reta = 0, mrqc, rss_key[10];
555 int queue_id, table_size, index_mult;
559 if (sc->feat_en & IXGBE_FEATURE_RSS) {
560 /* Fetch the configured RSS key */
561 rss_getkey((uint8_t *)&rss_key);
563 /* set up random bits */
564 arc4rand(&rss_key, sizeof(rss_key), 0);
567 /* Set multiplier for RETA setup and table size based on MAC */
570 switch (sc->hw.mac.type) {
571 case ixgbe_mac_82598EB:
575 case ixgbe_mac_X550EM_x:
576 case ixgbe_mac_X550EM_a:
583 /* Set up the redirection table */
584 for (i = 0, j = 0; i < table_size; i++, j++) {
585 if (j == sc->num_rx_queues)
588 if (sc->feat_en & IXGBE_FEATURE_RSS) {
590 * Fetch the RSS bucket id for the given indirection
591 * entry. Cap it at the number of configured buckets
592 * (which is num_rx_queues.)
594 queue_id = rss_get_indirection_to_bucket(i);
595 queue_id = queue_id % sc->num_rx_queues;
597 queue_id = (j * index_mult);
600 * The low 8 bits are for hash value (n+0);
601 * The next 8 bits are for hash value (n+1), etc.
604 reta = reta | (((uint32_t)queue_id) << 24);
607 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
609 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
615 /* Now fill our hash function seeds */
616 for (i = 0; i < 10; i++)
617 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
619 /* Perform hash on these packet types */
620 if (sc->feat_en & IXGBE_FEATURE_RSS)
621 rss_hash_config = rss_gethashconfig();
624 * Disable UDP - IP fragments aren't currently being handled
625 * and so we end up with a mix of 2-tuple and 4-tuple
628 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
629 | RSS_HASHTYPE_RSS_TCP_IPV4
630 | RSS_HASHTYPE_RSS_IPV6
631 | RSS_HASHTYPE_RSS_TCP_IPV6
632 | RSS_HASHTYPE_RSS_IPV6_EX
633 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
636 mrqc = IXGBE_MRQC_RSSEN;
637 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
638 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
639 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
640 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
641 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
642 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
643 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
645 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
647 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
649 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
650 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
651 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
652 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
653 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
654 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
655 mrqc |= ixgbe_get_mrqc(sc->iov_mode);
656 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
657 } /* ixgbe_initialize_rss_mapping */
659 /************************************************************************
660 * ixgbe_initialize_receive_units - Setup receive registers and features.
661 ************************************************************************/
662 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
665 ixgbe_initialize_receive_units(if_ctx_t ctx)
667 struct ixgbe_softc *sc = iflib_get_softc(ctx);
668 if_softc_ctx_t scctx = sc->shared;
669 struct ixgbe_hw *hw = &sc->hw;
670 struct ifnet *ifp = iflib_get_ifp(ctx);
671 struct ix_rx_queue *que;
673 u32 bufsz, fctrl, srrctl, rxcsum;
677 * Make sure receives are disabled while
678 * setting up the descriptor ring
680 ixgbe_disable_rx(hw);
682 /* Enable broadcasts */
683 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
684 fctrl |= IXGBE_FCTRL_BAM;
685 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
686 fctrl |= IXGBE_FCTRL_DPF;
687 fctrl |= IXGBE_FCTRL_PMCF;
689 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
691 /* Set for Jumbo Frames? */
692 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
693 if (ifp->if_mtu > ETHERMTU)
694 hlreg |= IXGBE_HLREG0_JUMBOEN;
696 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
697 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
699 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
700 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
702 /* Setup the Base and Length of the Rx Descriptor Ring */
703 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
704 struct rx_ring *rxr = &que->rxr;
705 u64 rdba = rxr->rx_paddr;
709 /* Setup the Base and Length of the Rx Descriptor Ring */
710 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
711 (rdba & 0x00000000ffffffffULL));
712 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
713 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
714 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
716 /* Set up the SRRCTL register */
717 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
718 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
719 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
721 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
724 * Set DROP_EN iff we have no flow control and >1 queue.
725 * Note that srrctl was cleared shortly before during reset,
726 * so we do not need to clear the bit, but do it just in case
727 * this code is moved elsewhere.
729 if (sc->num_rx_queues > 1 &&
730 sc->hw.fc.requested_mode == ixgbe_fc_none) {
731 srrctl |= IXGBE_SRRCTL_DROP_EN;
733 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
736 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
738 /* Setup the HW Rx Head and Tail Descriptor Pointers */
739 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
740 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
742 /* Set the driver rx tail address */
743 rxr->tail = IXGBE_RDT(rxr->me);
746 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
747 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
748 | IXGBE_PSRTYPE_UDPHDR
749 | IXGBE_PSRTYPE_IPV4HDR
750 | IXGBE_PSRTYPE_IPV6HDR;
751 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
754 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
756 ixgbe_initialize_rss_mapping(sc);
758 if (sc->num_rx_queues > 1) {
759 /* RSS and RX IPP Checksum are mutually exclusive */
760 rxcsum |= IXGBE_RXCSUM_PCSD;
763 if (ifp->if_capenable & IFCAP_RXCSUM)
764 rxcsum |= IXGBE_RXCSUM_PCSD;
766 /* This is useful for calculating UDP/IP fragment checksums */
767 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
768 rxcsum |= IXGBE_RXCSUM_IPPCSE;
770 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
772 } /* ixgbe_initialize_receive_units */
774 /************************************************************************
775 * ixgbe_initialize_transmit_units - Enable transmit units.
776 ************************************************************************/
778 ixgbe_initialize_transmit_units(if_ctx_t ctx)
780 struct ixgbe_softc *sc = iflib_get_softc(ctx);
781 struct ixgbe_hw *hw = &sc->hw;
782 if_softc_ctx_t scctx = sc->shared;
783 struct ix_tx_queue *que;
786 /* Setup the Base and Length of the Tx Descriptor Ring */
787 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
789 struct tx_ring *txr = &que->txr;
790 u64 tdba = txr->tx_paddr;
794 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
795 (tdba & 0x00000000ffffffffULL));
796 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
797 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
798 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
800 /* Setup the HW Tx Head and Tail descriptor pointers */
801 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
802 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
804 /* Cache the tail address */
805 txr->tail = IXGBE_TDT(txr->me);
807 txr->tx_rs_cidx = txr->tx_rs_pidx;
808 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
809 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
810 txr->tx_rsq[k] = QIDX_INVALID;
812 /* Disable Head Writeback */
814 * Note: for X550 series devices, these registers are actually
815 * prefixed with TPH_ isntead of DCA_, but the addresses and
816 * fields remain the same.
818 switch (hw->mac.type) {
819 case ixgbe_mac_82598EB:
820 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
823 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
826 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
827 switch (hw->mac.type) {
828 case ixgbe_mac_82598EB:
829 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
832 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
838 if (hw->mac.type != ixgbe_mac_82598EB) {
839 u32 dmatxctl, rttdcs;
841 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
842 dmatxctl |= IXGBE_DMATXCTL_TE;
843 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
844 /* Disable arbiter to set MTQC */
845 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
846 rttdcs |= IXGBE_RTTDCS_ARBDIS;
847 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
848 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
849 ixgbe_get_mtqc(sc->iov_mode));
850 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
851 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
854 } /* ixgbe_initialize_transmit_units */
856 /************************************************************************
858 ************************************************************************/
860 ixgbe_register(device_t dev)
862 return (&ixgbe_sctx_init);
863 } /* ixgbe_register */
865 /************************************************************************
866 * ixgbe_if_attach_pre - Device initialization routine, part 1
868 * Called when the driver is being loaded.
869 * Identifies the type of hardware, initializes the hardware,
870 * and initializes iflib structures.
872 * return 0 on success, positive on failure
873 ************************************************************************/
875 ixgbe_if_attach_pre(if_ctx_t ctx)
877 struct ixgbe_softc *sc;
879 if_softc_ctx_t scctx;
884 INIT_DEBUGOUT("ixgbe_attach: begin");
886 /* Allocate, clear, and link in our adapter structure */
887 dev = iflib_get_dev(ctx);
888 sc = iflib_get_softc(ctx);
892 scctx = sc->shared = iflib_get_softc_ctx(ctx);
893 sc->media = iflib_get_media(ctx);
896 /* Determine hardware revision */
897 hw->vendor_id = pci_get_vendor(dev);
898 hw->device_id = pci_get_device(dev);
899 hw->revision_id = pci_get_revid(dev);
900 hw->subsystem_vendor_id = pci_get_subvendor(dev);
901 hw->subsystem_device_id = pci_get_subdevice(dev);
903 /* Do base PCI setup - map BAR0 */
904 if (ixgbe_allocate_pci_resources(ctx)) {
905 device_printf(dev, "Allocation of PCI resources failed\n");
909 /* let hardware know driver is loaded */
910 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
911 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
912 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
915 * Initialize the shared code
917 if (ixgbe_init_shared_code(hw) != 0) {
918 device_printf(dev, "Unable to initialize the shared code\n");
923 if (hw->mbx.ops.init_params)
924 hw->mbx.ops.init_params(hw);
926 hw->allow_unsupported_sfp = allow_unsupported_sfp;
928 if (hw->mac.type != ixgbe_mac_82598EB)
929 hw->phy.smart_speed = ixgbe_smart_speed;
931 ixgbe_init_device_features(sc);
933 /* Enable WoL (if supported) */
934 ixgbe_check_wol_support(sc);
936 /* Verify adapter fan is still functional (if applicable) */
937 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
938 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
939 ixgbe_check_fan_failure(sc, esdp, false);
942 /* Ensure SW/FW semaphore is free */
943 ixgbe_init_swfw_semaphore(hw);
945 /* Set an initial default flow control value */
946 hw->fc.requested_mode = ixgbe_flow_control;
948 hw->phy.reset_if_overtemp = true;
949 error = ixgbe_reset_hw(hw);
950 hw->phy.reset_if_overtemp = false;
951 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
953 * No optics in this port, set up
954 * so the timer routine will probe
955 * for later insertion.
957 sc->sfp_probe = true;
959 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
960 device_printf(dev, "Unsupported SFP+ module detected!\n");
964 device_printf(dev, "Hardware initialization failed\n");
969 /* Make sure we have a good EEPROM before we read from it */
970 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
971 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
976 error = ixgbe_start_hw(hw);
978 case IXGBE_ERR_EEPROM_VERSION:
979 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
981 case IXGBE_ERR_SFP_NOT_SUPPORTED:
982 device_printf(dev, "Unsupported SFP+ Module\n");
985 case IXGBE_ERR_SFP_NOT_PRESENT:
986 device_printf(dev, "No SFP+ Module found\n");
992 /* Most of the iflib initialization... */
994 iflib_set_mac(ctx, hw->mac.addr);
995 switch (sc->hw.mac.type) {
997 case ixgbe_mac_X550EM_x:
998 case ixgbe_mac_X550EM_a:
999 scctx->isc_rss_table_size = 512;
1000 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1003 scctx->isc_rss_table_size = 128;
1004 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1007 /* Allow legacy interrupts */
1008 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1010 scctx->isc_txqsizes[0] =
1011 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1012 sizeof(u32), DBA_ALIGN),
1013 scctx->isc_rxqsizes[0] =
1014 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1018 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1019 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1020 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1021 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1023 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1024 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1027 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1029 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1030 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1031 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1033 scctx->isc_txrx = &ixgbe_txrx;
1035 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1040 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1041 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1042 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1043 ixgbe_free_pci_resources(ctx);
1046 } /* ixgbe_if_attach_pre */
1048 /*********************************************************************
1049 * ixgbe_if_attach_post - Device initialization routine, part 2
1051 * Called during driver load, but after interrupts and
1052 * resources have been allocated and configured.
1053 * Sets up some data structures not relevant to iflib.
1055 * return 0 on success, positive on failure
1056 *********************************************************************/
1058 ixgbe_if_attach_post(if_ctx_t ctx)
1061 struct ixgbe_softc *sc;
1062 struct ixgbe_hw *hw;
1065 dev = iflib_get_dev(ctx);
1066 sc = iflib_get_softc(ctx);
1070 if (sc->intr_type == IFLIB_INTR_LEGACY &&
1071 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1072 device_printf(dev, "Device does not support legacy interrupts");
1077 /* Allocate multicast array memory. */
1078 sc->mta = malloc(sizeof(*sc->mta) *
1079 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1080 if (sc->mta == NULL) {
1081 device_printf(dev, "Can not allocate multicast setup array\n");
1086 /* hw.ix defaults init */
1087 ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1089 /* Enable the optics for 82599 SFP+ fiber */
1090 ixgbe_enable_tx_laser(hw);
1092 /* Enable power to the phy. */
1093 ixgbe_set_phy_power(hw, true);
1095 ixgbe_initialize_iov(sc);
1097 error = ixgbe_setup_interface(ctx);
1099 device_printf(dev, "Interface setup failed: %d\n", error);
1103 ixgbe_if_update_admin_status(ctx);
1105 /* Initialize statistics */
1106 ixgbe_update_stats_counters(sc);
1107 ixgbe_add_hw_stats(sc);
1109 /* Check PCIE slot type/speed/width */
1110 ixgbe_get_slot_info(sc);
1113 * Do time init and sysctl init here, but
1114 * only on the first port of a bypass sc.
1116 ixgbe_bypass_init(sc);
1118 /* Display NVM and Option ROM versions */
1119 ixgbe_print_fw_version(ctx);
1121 /* Set an initial dmac value */
1123 /* Set initial advertised speeds (if applicable) */
1124 sc->advertise = ixgbe_get_default_advertise(sc);
1126 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1127 ixgbe_define_iov_schemas(dev, &error);
1130 ixgbe_add_device_sysctls(ctx);
1135 } /* ixgbe_if_attach_post */
1137 /************************************************************************
1138 * ixgbe_check_wol_support
1140 * Checks whether the adapter's ports are capable of
1141 * Wake On LAN by reading the adapter's NVM.
1143 * Sets each port's hw->wol_enabled value depending
1144 * on the value read here.
1145 ************************************************************************/
1147 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1149 struct ixgbe_hw *hw = &sc->hw;
1152 /* Find out WoL support for port */
1153 sc->wol_support = hw->wol_enabled = 0;
1154 ixgbe_get_device_caps(hw, &dev_caps);
1155 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1156 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1158 sc->wol_support = hw->wol_enabled = 1;
1160 /* Save initial wake up filter configuration */
1161 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1164 } /* ixgbe_check_wol_support */
1166 /************************************************************************
1167 * ixgbe_setup_interface
1169 * Setup networking device structure and register an interface.
1170 ************************************************************************/
1172 ixgbe_setup_interface(if_ctx_t ctx)
1174 struct ifnet *ifp = iflib_get_ifp(ctx);
1175 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1177 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1179 if_setbaudrate(ifp, IF_Gbps(10));
1181 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1183 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1185 ixgbe_add_media_types(ctx);
1187 /* Autoselect media by default */
1188 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1191 } /* ixgbe_setup_interface */
1193 /************************************************************************
1194 * ixgbe_if_get_counter
1195 ************************************************************************/
1197 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1199 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1200 if_t ifp = iflib_get_ifp(ctx);
1203 case IFCOUNTER_IPACKETS:
1204 return (sc->ipackets);
1205 case IFCOUNTER_OPACKETS:
1206 return (sc->opackets);
1207 case IFCOUNTER_IBYTES:
1208 return (sc->ibytes);
1209 case IFCOUNTER_OBYTES:
1210 return (sc->obytes);
1211 case IFCOUNTER_IMCASTS:
1212 return (sc->imcasts);
1213 case IFCOUNTER_OMCASTS:
1214 return (sc->omcasts);
1215 case IFCOUNTER_COLLISIONS:
1217 case IFCOUNTER_IQDROPS:
1218 return (sc->iqdrops);
1219 case IFCOUNTER_OQDROPS:
1221 case IFCOUNTER_IERRORS:
1222 return (sc->ierrors);
1224 return (if_get_counter_default(ifp, cnt));
1226 } /* ixgbe_if_get_counter */
1228 /************************************************************************
1230 ************************************************************************/
1232 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1234 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1235 struct ixgbe_hw *hw = &sc->hw;
1239 if (hw->phy.ops.read_i2c_byte == NULL)
1241 for (i = 0; i < req->len; i++)
1242 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1243 req->dev_addr, &req->data[i]);
1245 } /* ixgbe_if_i2c_req */
1247 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1248 * @ctx: iflib context
1249 * @event: event code to check
1251 * Defaults to returning true for unknown events.
1253 * @returns true if iflib needs to reinit the interface
1256 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1259 case IFLIB_RESTART_VLAN_CONFIG:
1266 /************************************************************************
1267 * ixgbe_add_media_types
1268 ************************************************************************/
1270 ixgbe_add_media_types(if_ctx_t ctx)
1272 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1273 struct ixgbe_hw *hw = &sc->hw;
1274 device_t dev = iflib_get_dev(ctx);
1277 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1279 /* Media types with matching FreeBSD media defines */
1280 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1281 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1282 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1283 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1284 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1285 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1286 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1287 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1289 if (hw->mac.type == ixgbe_mac_X550) {
1290 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1291 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1294 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1295 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1296 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1299 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1300 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1301 if (hw->phy.multispeed_fiber)
1302 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1305 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1306 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1307 if (hw->phy.multispeed_fiber)
1308 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1310 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1311 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1312 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1313 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1315 #ifdef IFM_ETH_XTYPE
1316 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1317 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1318 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1319 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1320 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1321 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1322 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1323 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1325 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1326 device_printf(dev, "Media supported: 10GbaseKR\n");
1327 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1328 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1330 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1331 device_printf(dev, "Media supported: 10GbaseKX4\n");
1332 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1333 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1335 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1336 device_printf(dev, "Media supported: 1000baseKX\n");
1337 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1338 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1340 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1341 device_printf(dev, "Media supported: 2500baseKX\n");
1342 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1343 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1346 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1347 device_printf(dev, "Media supported: 1000baseBX\n");
1349 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1350 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1352 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1355 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1356 } /* ixgbe_add_media_types */
1358 /************************************************************************
1360 ************************************************************************/
1362 ixgbe_is_sfp(struct ixgbe_hw *hw)
1364 switch (hw->mac.type) {
1365 case ixgbe_mac_82598EB:
1366 if (hw->phy.type == ixgbe_phy_nl)
1369 case ixgbe_mac_82599EB:
1370 switch (hw->mac.ops.get_media_type(hw)) {
1371 case ixgbe_media_type_fiber:
1372 case ixgbe_media_type_fiber_qsfp:
1377 case ixgbe_mac_X550EM_x:
1378 case ixgbe_mac_X550EM_a:
1379 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1385 } /* ixgbe_is_sfp */
1387 /************************************************************************
1389 ************************************************************************/
1391 ixgbe_config_link(if_ctx_t ctx)
1393 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1394 struct ixgbe_hw *hw = &sc->hw;
1395 u32 autoneg, err = 0;
1396 bool sfp, negotiate;
1398 sfp = ixgbe_is_sfp(hw);
1401 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1402 iflib_admin_intr_deferred(ctx);
1404 if (hw->mac.ops.check_link)
1405 err = ixgbe_check_link(hw, &sc->link_speed,
1406 &sc->link_up, false);
1409 autoneg = hw->phy.autoneg_advertised;
1410 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1411 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1416 if (hw->mac.type == ixgbe_mac_X550 &&
1417 hw->phy.autoneg_advertised == 0) {
1419 * 2.5G and 5G autonegotiation speeds on X550
1420 * are disabled by default due to reported
1421 * interoperability issues with some switches.
1423 * The second condition checks if any operations
1424 * involving setting autonegotiation speeds have
1425 * been performed prior to this ixgbe_config_link()
1428 * If hw->phy.autoneg_advertised does not
1429 * equal 0, this means that the user might have
1430 * set autonegotiation speeds via the sysctl
1431 * before bringing the interface up. In this
1432 * case, we should not disable 2.5G and 5G
1433 * since that speeds might be selected by the
1436 * Otherwise (i.e. if hw->phy.autoneg_advertised
1437 * is set to 0), it is the first time we set
1438 * autonegotiation preferences and the default
1439 * set of speeds should exclude 2.5G and 5G.
1441 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
1442 IXGBE_LINK_SPEED_5GB_FULL);
1445 if (hw->mac.ops.setup_link)
1446 err = hw->mac.ops.setup_link(hw, autoneg,
1449 } /* ixgbe_config_link */
1451 /************************************************************************
1452 * ixgbe_update_stats_counters - Update board statistics counters.
1453 ************************************************************************/
1455 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1457 struct ixgbe_hw *hw = &sc->hw;
1458 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1459 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1461 u64 total_missed_rx = 0;
1463 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1464 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1465 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1466 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1467 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1469 for (int i = 0; i < 16; i++) {
1470 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1471 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1472 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1474 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1475 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1476 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1478 /* Hardware workaround, gprc counts missed packets */
1479 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1480 stats->gprc -= missed_rx;
1482 if (hw->mac.type != ixgbe_mac_82598EB) {
1483 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1484 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1485 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1486 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1487 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1488 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1489 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1490 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1491 stats->lxoffrxc += lxoffrxc;
1493 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1494 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1495 stats->lxoffrxc += lxoffrxc;
1496 /* 82598 only has a counter in the high register */
1497 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1498 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1499 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1503 * For watchdog management we need to know if we have been paused
1504 * during the last interval, so capture that here.
1507 sc->shared->isc_pause_frames = 1;
1510 * Workaround: mprc hardware is incorrectly counting
1511 * broadcasts, so for now we subtract those.
1513 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1514 stats->bprc += bprc;
1515 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1516 if (hw->mac.type == ixgbe_mac_82598EB)
1517 stats->mprc -= bprc;
1519 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1520 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1521 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1522 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1523 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1524 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1526 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1527 stats->lxontxc += lxon;
1528 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1529 stats->lxofftxc += lxoff;
1530 total = lxon + lxoff;
1532 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1533 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1534 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1535 stats->gptc -= total;
1536 stats->mptc -= total;
1537 stats->ptc64 -= total;
1538 stats->gotc -= total * ETHER_MIN_LEN;
1540 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1541 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1542 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1543 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1544 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1545 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1546 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1547 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1548 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1549 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1550 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1551 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1552 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1553 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1554 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1555 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1556 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1557 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1558 /* Only read FCOE on 82599 */
1559 if (hw->mac.type != ixgbe_mac_82598EB) {
1560 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1561 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1562 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1563 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1564 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1567 /* Fill out the OS statistics structure */
1568 IXGBE_SET_IPACKETS(sc, stats->gprc);
1569 IXGBE_SET_OPACKETS(sc, stats->gptc);
1570 IXGBE_SET_IBYTES(sc, stats->gorc);
1571 IXGBE_SET_OBYTES(sc, stats->gotc);
1572 IXGBE_SET_IMCASTS(sc, stats->mprc);
1573 IXGBE_SET_OMCASTS(sc, stats->mptc);
1574 IXGBE_SET_COLLISIONS(sc, 0);
1575 IXGBE_SET_IQDROPS(sc, total_missed_rx);
1578 * Aggregate following types of errors as RX errors:
1579 * - CRC error count,
1580 * - illegal byte error count,
1581 * - checksum error count,
1582 * - missed packets count,
1583 * - length error count,
1584 * - undersized packets count,
1585 * - fragmented packets count,
1586 * - oversized packets count,
1589 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + stats->xec +
1590 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1592 } /* ixgbe_update_stats_counters */
1594 /************************************************************************
1595 * ixgbe_add_hw_stats
1597 * Add sysctl variables, one per statistic, to the system.
1598 ************************************************************************/
1600 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1602 device_t dev = iflib_get_dev(sc->ctx);
1603 struct ix_rx_queue *rx_que;
1604 struct ix_tx_queue *tx_que;
1605 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1606 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1607 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1608 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1609 struct sysctl_oid *stat_node, *queue_node;
1610 struct sysctl_oid_list *stat_list, *queue_list;
1613 #define QUEUE_NAME_LEN 32
1614 char namebuf[QUEUE_NAME_LEN];
1616 /* Driver Statistics */
1617 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1618 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1619 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1620 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1621 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1622 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1624 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
1625 struct tx_ring *txr = &tx_que->txr;
1626 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1627 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1628 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1629 queue_list = SYSCTL_CHILDREN(queue_node);
1631 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1632 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1633 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1634 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1635 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1636 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1637 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1638 CTLFLAG_RD, &txr->tso_tx, "TSO");
1639 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1640 CTLFLAG_RD, &txr->total_packets,
1641 "Queue Packets Transmitted");
1644 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
1645 struct rx_ring *rxr = &rx_que->rxr;
1646 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1647 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1648 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1649 queue_list = SYSCTL_CHILDREN(queue_node);
1651 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1652 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1653 &sc->rx_queues[i], 0,
1654 ixgbe_sysctl_interrupt_rate_handler, "IU",
1656 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1657 CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1658 "irqs on this queue");
1659 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1660 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1661 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1662 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1663 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1664 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1665 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1666 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1667 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1668 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1669 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1670 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1671 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1672 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1675 /* MAC stats get their own sub node */
1677 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1678 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1679 stat_list = SYSCTL_CHILDREN(stat_node);
1681 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1682 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1683 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1684 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1685 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1686 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1687 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1688 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1689 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1690 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1691 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1692 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1693 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1694 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1695 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1696 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1697 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1698 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1700 /* Flow Control stats */
1701 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1702 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1703 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1704 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1705 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1706 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1707 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1708 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1710 /* Packet Reception Stats */
1711 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1712 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1713 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1714 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1715 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1716 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1717 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1718 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1719 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1720 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1721 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1722 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1723 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1724 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1725 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1726 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1728 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1729 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1730 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1731 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1732 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1733 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1734 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1735 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1736 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1737 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1738 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1739 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1740 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1741 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1742 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1743 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1744 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1745 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1746 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1747 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1748 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1750 /* Packet Transmission Stats */
1751 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1752 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1753 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1754 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1755 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1756 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1757 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1758 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1759 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1760 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1761 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1762 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1763 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1764 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1765 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1766 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1767 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1768 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1769 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1770 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1771 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1772 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1773 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1774 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1775 } /* ixgbe_add_hw_stats */
1777 /************************************************************************
1778 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1780 * Retrieves the TDH value from the hardware
1781 ************************************************************************/
1783 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1785 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1792 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1793 error = sysctl_handle_int(oidp, &val, 0, req);
1794 if (error || !req->newptr)
1798 } /* ixgbe_sysctl_tdh_handler */
1800 /************************************************************************
1801 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1803 * Retrieves the TDT value from the hardware
1804 ************************************************************************/
1806 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1808 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1815 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1816 error = sysctl_handle_int(oidp, &val, 0, req);
1817 if (error || !req->newptr)
1821 } /* ixgbe_sysctl_tdt_handler */
1823 /************************************************************************
1824 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1826 * Retrieves the RDH value from the hardware
1827 ************************************************************************/
1829 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1831 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1838 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
1839 error = sysctl_handle_int(oidp, &val, 0, req);
1840 if (error || !req->newptr)
1844 } /* ixgbe_sysctl_rdh_handler */
1846 /************************************************************************
1847 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1849 * Retrieves the RDT value from the hardware
1850 ************************************************************************/
1852 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1854 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1861 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
1862 error = sysctl_handle_int(oidp, &val, 0, req);
1863 if (error || !req->newptr)
1867 } /* ixgbe_sysctl_rdt_handler */
1869 /************************************************************************
1870 * ixgbe_if_vlan_register
1872 * Run via vlan config EVENT, it enables us to use the
1873 * HW Filter table since we can get the vlan id. This
1874 * just creates the entry in the soft version of the
1875 * VFTA, init will repopulate the real table.
1876 ************************************************************************/
1878 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1880 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1883 index = (vtag >> 5) & 0x7F;
1885 sc->shadow_vfta[index] |= (1 << bit);
1887 ixgbe_setup_vlan_hw_support(ctx);
1888 } /* ixgbe_if_vlan_register */
1890 /************************************************************************
1891 * ixgbe_if_vlan_unregister
1893 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1894 ************************************************************************/
1896 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1898 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1901 index = (vtag >> 5) & 0x7F;
1903 sc->shadow_vfta[index] &= ~(1 << bit);
1905 /* Re-init to load the changes */
1906 ixgbe_setup_vlan_hw_support(ctx);
1907 } /* ixgbe_if_vlan_unregister */
1909 /************************************************************************
1910 * ixgbe_setup_vlan_hw_support
1911 ************************************************************************/
1913 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1915 struct ifnet *ifp = iflib_get_ifp(ctx);
1916 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1917 struct ixgbe_hw *hw = &sc->hw;
1918 struct rx_ring *rxr;
1924 * We get here thru init_locked, meaning
1925 * a soft reset, this has already cleared
1926 * the VFTA and other state, so if there
1927 * have been no vlan's registered do nothing.
1929 if (sc->num_vlans == 0)
1932 /* Setup the queues for vlans */
1933 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1934 for (i = 0; i < sc->num_rx_queues; i++) {
1935 rxr = &sc->rx_queues[i].rxr;
1936 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1937 if (hw->mac.type != ixgbe_mac_82598EB) {
1938 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1939 ctrl |= IXGBE_RXDCTL_VME;
1940 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1942 rxr->vtag_strip = true;
1946 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1949 * A soft reset zero's out the VFTA, so
1950 * we need to repopulate it now.
1952 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1953 if (sc->shadow_vfta[i] != 0)
1954 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1955 sc->shadow_vfta[i]);
1957 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1958 /* Enable the Filter Table if enabled */
1959 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1960 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1961 ctrl |= IXGBE_VLNCTRL_VFE;
1963 if (hw->mac.type == ixgbe_mac_82598EB)
1964 ctrl |= IXGBE_VLNCTRL_VME;
1965 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1966 } /* ixgbe_setup_vlan_hw_support */
1968 /************************************************************************
1969 * ixgbe_get_slot_info
1971 * Get the width and transaction speed of
1972 * the slot this adapter is plugged into.
1973 ************************************************************************/
1975 ixgbe_get_slot_info(struct ixgbe_softc *sc)
1977 device_t dev = iflib_get_dev(sc->ctx);
1978 struct ixgbe_hw *hw = &sc->hw;
1979 int bus_info_valid = true;
1983 /* Some devices are behind an internal bridge */
1984 switch (hw->device_id) {
1985 case IXGBE_DEV_ID_82599_SFP_SF_QP:
1986 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1987 goto get_parent_info;
1992 ixgbe_get_bus_info(hw);
1995 * Some devices don't use PCI-E, but there is no need
1996 * to display "Unknown" for bus speed and width.
1998 switch (hw->mac.type) {
1999 case ixgbe_mac_X550EM_x:
2000 case ixgbe_mac_X550EM_a:
2008 * For the Quad port adapter we need to parse back
2009 * up the PCI tree to find the speed of the expansion
2010 * slot into which this adapter is plugged. A bit more work.
2012 dev = device_get_parent(device_get_parent(dev));
2014 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2015 pci_get_slot(dev), pci_get_function(dev));
2017 dev = device_get_parent(device_get_parent(dev));
2019 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2020 pci_get_slot(dev), pci_get_function(dev));
2022 /* Now get the PCI Express Capabilities offset */
2023 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
2025 * Hmm...can't get PCI-Express capabilities.
2026 * Falling back to default method.
2028 bus_info_valid = false;
2029 ixgbe_get_bus_info(hw);
2032 /* ...and read the Link Status Register */
2033 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2034 ixgbe_set_pci_config_data_generic(hw, link);
2037 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
2038 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2039 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2040 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2042 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2043 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2044 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2047 if (bus_info_valid) {
2048 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2049 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2050 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2051 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2052 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2054 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2055 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2056 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2057 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2058 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2061 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2064 } /* ixgbe_get_slot_info */
2066 /************************************************************************
2067 * ixgbe_if_msix_intr_assign
2069 * Setup MSI-X Interrupt resources and handlers
2070 ************************************************************************/
2072 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2074 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2075 struct ix_rx_queue *rx_que = sc->rx_queues;
2076 struct ix_tx_queue *tx_que;
2077 int error, rid, vector = 0;
2081 /* Admin Que is vector 0*/
2083 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2086 snprintf(buf, sizeof(buf), "rxq%d", i);
2087 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2088 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2091 device_printf(iflib_get_dev(ctx),
2092 "Failed to allocate que int %d err: %d", i, error);
2093 sc->num_rx_queues = i + 1;
2097 rx_que->msix = vector;
2098 if (sc->feat_en & IXGBE_FEATURE_RSS) {
2100 * The queue ID is used as the RSS layer bucket ID.
2101 * We look up the queue ID -> RSS CPU ID and select
2104 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2107 * Bind the MSI-X vector, and thus the
2108 * rings to the corresponding cpu.
2110 * This just happens to match the default RSS
2111 * round-robin bucket -> queue -> CPU allocation.
2113 if (sc->num_rx_queues > 1)
2118 for (int i = 0; i < sc->num_tx_queues; i++) {
2119 snprintf(buf, sizeof(buf), "txq%d", i);
2120 tx_que = &sc->tx_queues[i];
2121 tx_que->msix = i % sc->num_rx_queues;
2122 iflib_softirq_alloc_generic(ctx,
2123 &sc->rx_queues[tx_que->msix].que_irq,
2124 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2127 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2128 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2130 device_printf(iflib_get_dev(ctx),
2131 "Failed to register admin handler");
2135 sc->vector = vector;
2139 iflib_irq_free(ctx, &sc->irq);
2140 rx_que = sc->rx_queues;
2141 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2142 iflib_irq_free(ctx, &rx_que->que_irq);
2145 } /* ixgbe_if_msix_intr_assign */
2148 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
2150 uint32_t newitr = 0;
2151 struct rx_ring *rxr = &que->rxr;
2154 * Do Adaptive Interrupt Moderation:
2155 * - Write out last calculated setting
2156 * - Calculate based on average size over
2157 * the last interval.
2159 if (que->eitr_setting) {
2160 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2164 que->eitr_setting = 0;
2165 /* Idle, do nothing */
2166 if (rxr->bytes == 0) {
2170 if ((rxr->bytes) && (rxr->packets)) {
2171 newitr = (rxr->bytes / rxr->packets);
2174 newitr += 24; /* account for hardware frame, crc */
2175 /* set an upper boundary */
2176 newitr = min(newitr, 3000);
2178 /* Be nice to the mid range */
2179 if ((newitr > 300) && (newitr < 1200)) {
2180 newitr = (newitr / 3);
2182 newitr = (newitr / 2);
2185 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2186 newitr |= newitr << 16;
2188 newitr |= IXGBE_EITR_CNT_WDIS;
2191 /* save for next interrupt */
2192 que->eitr_setting = newitr;
2201 /*********************************************************************
2202 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2203 **********************************************************************/
2205 ixgbe_msix_que(void *arg)
2207 struct ix_rx_queue *que = arg;
2208 struct ixgbe_softc *sc = que->sc;
2209 struct ifnet *ifp = iflib_get_ifp(que->sc->ctx);
2211 /* Protect against spurious interrupts */
2212 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2213 return (FILTER_HANDLED);
2215 ixgbe_disable_queue(sc, que->msix);
2219 if (sc->enable_aim) {
2220 ixgbe_perform_aim(sc, que);
2223 return (FILTER_SCHEDULE_THREAD);
2224 } /* ixgbe_msix_que */
2226 /************************************************************************
2227 * ixgbe_media_status - Media Ioctl callback
2229 * Called whenever the user queries the status of
2230 * the interface using ifconfig.
2231 ************************************************************************/
2233 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2235 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2236 struct ixgbe_hw *hw = &sc->hw;
2239 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2241 ifmr->ifm_status = IFM_AVALID;
2242 ifmr->ifm_active = IFM_ETHER;
2244 if (!sc->link_active)
2247 ifmr->ifm_status |= IFM_ACTIVE;
2248 layer = sc->phy_layer;
2250 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2251 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2252 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2253 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2254 switch (sc->link_speed) {
2255 case IXGBE_LINK_SPEED_10GB_FULL:
2256 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2258 case IXGBE_LINK_SPEED_1GB_FULL:
2259 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2261 case IXGBE_LINK_SPEED_100_FULL:
2262 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2264 case IXGBE_LINK_SPEED_10_FULL:
2265 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2268 if (hw->mac.type == ixgbe_mac_X550)
2269 switch (sc->link_speed) {
2270 case IXGBE_LINK_SPEED_5GB_FULL:
2271 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2273 case IXGBE_LINK_SPEED_2_5GB_FULL:
2274 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2277 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2278 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2279 switch (sc->link_speed) {
2280 case IXGBE_LINK_SPEED_10GB_FULL:
2281 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2284 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2285 switch (sc->link_speed) {
2286 case IXGBE_LINK_SPEED_10GB_FULL:
2287 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2289 case IXGBE_LINK_SPEED_1GB_FULL:
2290 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2293 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2294 switch (sc->link_speed) {
2295 case IXGBE_LINK_SPEED_10GB_FULL:
2296 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2298 case IXGBE_LINK_SPEED_1GB_FULL:
2299 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2302 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2303 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2304 switch (sc->link_speed) {
2305 case IXGBE_LINK_SPEED_10GB_FULL:
2306 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2308 case IXGBE_LINK_SPEED_1GB_FULL:
2309 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2312 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2313 switch (sc->link_speed) {
2314 case IXGBE_LINK_SPEED_10GB_FULL:
2315 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2319 * XXX: These need to use the proper media types once
2322 #ifndef IFM_ETH_XTYPE
2323 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2324 switch (sc->link_speed) {
2325 case IXGBE_LINK_SPEED_10GB_FULL:
2326 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2328 case IXGBE_LINK_SPEED_2_5GB_FULL:
2329 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2331 case IXGBE_LINK_SPEED_1GB_FULL:
2332 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2335 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2336 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2337 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2338 switch (sc->link_speed) {
2339 case IXGBE_LINK_SPEED_10GB_FULL:
2340 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2342 case IXGBE_LINK_SPEED_2_5GB_FULL:
2343 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2345 case IXGBE_LINK_SPEED_1GB_FULL:
2346 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2350 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2351 switch (sc->link_speed) {
2352 case IXGBE_LINK_SPEED_10GB_FULL:
2353 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2355 case IXGBE_LINK_SPEED_2_5GB_FULL:
2356 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2358 case IXGBE_LINK_SPEED_1GB_FULL:
2359 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2362 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2363 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2364 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2365 switch (sc->link_speed) {
2366 case IXGBE_LINK_SPEED_10GB_FULL:
2367 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2369 case IXGBE_LINK_SPEED_2_5GB_FULL:
2370 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2372 case IXGBE_LINK_SPEED_1GB_FULL:
2373 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2378 /* If nothing is recognized... */
2379 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2380 ifmr->ifm_active |= IFM_UNKNOWN;
2382 /* Display current flow control setting used on link */
2383 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2384 hw->fc.current_mode == ixgbe_fc_full)
2385 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2386 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2387 hw->fc.current_mode == ixgbe_fc_full)
2388 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2389 } /* ixgbe_media_status */
2391 /************************************************************************
2392 * ixgbe_media_change - Media Ioctl callback
2394 * Called when the user changes speed/duplex using
2395 * media/mediopt option with ifconfig.
2396 ************************************************************************/
2398 ixgbe_if_media_change(if_ctx_t ctx)
2400 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2401 struct ifmedia *ifm = iflib_get_media(ctx);
2402 struct ixgbe_hw *hw = &sc->hw;
2403 ixgbe_link_speed speed = 0;
2405 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2407 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2410 if (hw->phy.media_type == ixgbe_media_type_backplane)
2414 * We don't actually need to check against the supported
2415 * media types of the adapter; ifmedia will take care of
2418 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2421 speed |= IXGBE_LINK_SPEED_100_FULL;
2422 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2423 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2427 #ifndef IFM_ETH_XTYPE
2428 case IFM_10G_SR: /* KR, too */
2429 case IFM_10G_CX4: /* KX4 */
2434 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2435 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2437 #ifndef IFM_ETH_XTYPE
2438 case IFM_1000_CX: /* KX */
2444 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2447 speed |= IXGBE_LINK_SPEED_100_FULL;
2448 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2450 case IFM_10G_TWINAX:
2451 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2454 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2457 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2460 speed |= IXGBE_LINK_SPEED_100_FULL;
2463 speed |= IXGBE_LINK_SPEED_10_FULL;
2469 hw->mac.autotry_restart = true;
2470 hw->mac.ops.setup_link(hw, speed, true);
2472 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
2473 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
2474 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
2475 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
2476 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
2477 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
2482 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2485 } /* ixgbe_if_media_change */
2487 /************************************************************************
2489 ************************************************************************/
2491 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2493 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2494 struct ifnet *ifp = iflib_get_ifp(ctx);
2498 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2499 rctl &= (~IXGBE_FCTRL_UPE);
2500 if (ifp->if_flags & IFF_ALLMULTI)
2501 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2503 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2505 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2506 rctl &= (~IXGBE_FCTRL_MPE);
2507 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2509 if (ifp->if_flags & IFF_PROMISC) {
2510 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2511 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2512 } else if (ifp->if_flags & IFF_ALLMULTI) {
2513 rctl |= IXGBE_FCTRL_MPE;
2514 rctl &= ~IXGBE_FCTRL_UPE;
2515 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2518 } /* ixgbe_if_promisc_set */
2520 /************************************************************************
2521 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2522 ************************************************************************/
2524 ixgbe_msix_link(void *arg)
2526 struct ixgbe_softc *sc = arg;
2527 struct ixgbe_hw *hw = &sc->hw;
2528 u32 eicr, eicr_mask;
2533 /* Pause other interrupts */
2534 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2536 /* First get the cause */
2537 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2538 /* Be sure the queue bits are not cleared */
2539 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2540 /* Clear interrupt with write */
2541 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2543 /* Link status change */
2544 if (eicr & IXGBE_EICR_LSC) {
2545 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2546 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2549 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2550 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2551 (eicr & IXGBE_EICR_FLOW_DIR)) {
2552 /* This is probably overkill :) */
2553 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2554 return (FILTER_HANDLED);
2555 /* Disable the interrupt */
2556 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2557 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2559 if (eicr & IXGBE_EICR_ECC) {
2560 device_printf(iflib_get_dev(sc->ctx),
2561 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2562 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2565 /* Check for over temp condition */
2566 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2567 switch (sc->hw.mac.type) {
2568 case ixgbe_mac_X550EM_a:
2569 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2571 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2572 IXGBE_EICR_GPI_SDP0_X550EM_a);
2573 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2574 IXGBE_EICR_GPI_SDP0_X550EM_a);
2575 retval = hw->phy.ops.check_overtemp(hw);
2576 if (retval != IXGBE_ERR_OVERTEMP)
2578 device_printf(iflib_get_dev(sc->ctx),
2579 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2580 device_printf(iflib_get_dev(sc->ctx),
2581 "System shutdown required!\n");
2584 if (!(eicr & IXGBE_EICR_TS))
2586 retval = hw->phy.ops.check_overtemp(hw);
2587 if (retval != IXGBE_ERR_OVERTEMP)
2589 device_printf(iflib_get_dev(sc->ctx),
2590 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2591 device_printf(iflib_get_dev(sc->ctx),
2592 "System shutdown required!\n");
2593 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2598 /* Check for VF message */
2599 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2600 (eicr & IXGBE_EICR_MAILBOX))
2601 sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2604 if (ixgbe_is_sfp(hw)) {
2605 /* Pluggable optics-related interrupt */
2606 if (hw->mac.type >= ixgbe_mac_X540)
2607 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2609 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2611 if (eicr & eicr_mask) {
2612 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2613 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2616 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2617 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2618 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2619 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2620 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2624 /* Check for fan failure */
2625 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2626 ixgbe_check_fan_failure(sc, eicr, true);
2627 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2630 /* External PHY interrupt */
2631 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2632 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2633 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2634 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2637 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2638 } /* ixgbe_msix_link */
2640 /************************************************************************
2641 * ixgbe_sysctl_interrupt_rate_handler
2642 ************************************************************************/
2644 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2646 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2648 unsigned int reg, usec, rate;
2650 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2651 usec = ((reg & 0x0FF8) >> 3);
2653 rate = 500000 / usec;
2656 error = sysctl_handle_int(oidp, &rate, 0, req);
2657 if (error || !req->newptr)
2659 reg &= ~0xfff; /* default, no limitation */
2660 ixgbe_max_interrupt_rate = 0;
2661 if (rate > 0 && rate < 500000) {
2664 ixgbe_max_interrupt_rate = rate;
2665 reg |= ((4000000/rate) & 0xff8);
2667 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2670 } /* ixgbe_sysctl_interrupt_rate_handler */
2672 /************************************************************************
2673 * ixgbe_add_device_sysctls
2674 ************************************************************************/
2676 ixgbe_add_device_sysctls(if_ctx_t ctx)
2678 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2679 device_t dev = iflib_get_dev(ctx);
2680 struct ixgbe_hw *hw = &sc->hw;
2681 struct sysctl_oid_list *child;
2682 struct sysctl_ctx_list *ctx_list;
2684 ctx_list = device_get_sysctl_ctx(dev);
2685 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2687 /* Sysctls for all devices */
2688 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2689 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2690 sc, 0, ixgbe_sysctl_flowcntl, "I",
2691 IXGBE_SYSCTL_DESC_SET_FC);
2693 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2694 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2695 sc, 0, ixgbe_sysctl_advertise, "I",
2696 IXGBE_SYSCTL_DESC_ADV_SPEED);
2698 sc->enable_aim = ixgbe_enable_aim;
2699 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2700 &sc->enable_aim, 0, "Interrupt Moderation");
2702 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2703 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
2704 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2707 /* testing sysctls (for all devices) */
2708 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2709 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2710 sc, 0, ixgbe_sysctl_power_state,
2711 "I", "PCI Power State");
2713 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2714 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
2715 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2717 /* for X550 series devices */
2718 if (hw->mac.type >= ixgbe_mac_X550)
2719 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2720 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2721 sc, 0, ixgbe_sysctl_dmac,
2722 "I", "DMA Coalesce");
2724 /* for WoL-capable devices */
2725 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2726 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2727 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
2728 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2730 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2731 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2732 sc, 0, ixgbe_sysctl_wufc,
2733 "I", "Enable/Disable Wake Up Filters");
2736 /* for X552/X557-AT devices */
2737 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2738 struct sysctl_oid *phy_node;
2739 struct sysctl_oid_list *phy_list;
2741 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2742 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2743 phy_list = SYSCTL_CHILDREN(phy_node);
2745 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2746 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2747 sc, 0, ixgbe_sysctl_phy_temp,
2748 "I", "Current External PHY Temperature (Celsius)");
2750 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2751 "overtemp_occurred",
2752 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
2753 ixgbe_sysctl_phy_overtemp_occurred, "I",
2754 "External PHY High Temperature Event Occurred");
2757 if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2758 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2759 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
2760 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2762 } /* ixgbe_add_device_sysctls */
2764 /************************************************************************
2765 * ixgbe_allocate_pci_resources
2766 ************************************************************************/
2768 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2770 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2771 device_t dev = iflib_get_dev(ctx);
2775 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2778 if (!(sc->pci_mem)) {
2779 device_printf(dev, "Unable to allocate bus resource: memory\n");
2783 /* Save bus_space values for READ/WRITE_REG macros */
2784 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
2785 sc->osdep.mem_bus_space_handle =
2786 rman_get_bushandle(sc->pci_mem);
2787 /* Set hw values for shared code */
2788 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
2791 } /* ixgbe_allocate_pci_resources */
2793 /************************************************************************
2794 * ixgbe_detach - Device removal routine
2796 * Called when the driver is being removed.
2797 * Stops the adapter and deallocates all the resources
2798 * that were allocated for driver operation.
2800 * return 0 on success, positive on failure
2801 ************************************************************************/
2803 ixgbe_if_detach(if_ctx_t ctx)
2805 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2806 device_t dev = iflib_get_dev(ctx);
2809 INIT_DEBUGOUT("ixgbe_detach: begin");
2811 if (ixgbe_pci_iov_detach(dev) != 0) {
2812 device_printf(dev, "SR-IOV in use; detach first.\n");
2816 ixgbe_setup_low_power_mode(ctx);
2818 /* let hardware know driver is unloading */
2819 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
2820 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2821 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
2823 ixgbe_free_pci_resources(ctx);
2824 free(sc->mta, M_IXGBE);
2827 } /* ixgbe_if_detach */
2829 /************************************************************************
2830 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2832 * Prepare the adapter/port for LPLU and/or WoL
2833 ************************************************************************/
2835 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2837 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2838 struct ixgbe_hw *hw = &sc->hw;
2839 device_t dev = iflib_get_dev(ctx);
2842 if (!hw->wol_enabled)
2843 ixgbe_set_phy_power(hw, false);
2845 /* Limit power management flow to X550EM baseT */
2846 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2847 hw->phy.ops.enter_lplu) {
2848 /* Turn off support for APM wakeup. (Using ACPI instead) */
2849 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2850 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2853 * Clear Wake Up Status register to prevent any previous wakeup
2854 * events from waking us up immediately after we suspend.
2856 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2859 * Program the Wakeup Filter Control register with user filter
2862 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
2864 /* Enable wakeups and power management in Wakeup Control */
2865 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2866 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2868 /* X550EM baseT adapters need a special LPLU flow */
2869 hw->phy.reset_disable = true;
2871 error = hw->phy.ops.enter_lplu(hw);
2873 device_printf(dev, "Error entering LPLU: %d\n", error);
2874 hw->phy.reset_disable = false;
2876 /* Just stop for other adapters */
2881 } /* ixgbe_setup_low_power_mode */
2883 /************************************************************************
2884 * ixgbe_shutdown - Shutdown entry point
2885 ************************************************************************/
2887 ixgbe_if_shutdown(if_ctx_t ctx)
2891 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2893 error = ixgbe_setup_low_power_mode(ctx);
2896 } /* ixgbe_if_shutdown */
2898 /************************************************************************
2902 ************************************************************************/
2904 ixgbe_if_suspend(if_ctx_t ctx)
2908 INIT_DEBUGOUT("ixgbe_suspend: begin");
2910 error = ixgbe_setup_low_power_mode(ctx);
2913 } /* ixgbe_if_suspend */
2915 /************************************************************************
2919 ************************************************************************/
2921 ixgbe_if_resume(if_ctx_t ctx)
2923 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2924 device_t dev = iflib_get_dev(ctx);
2925 struct ifnet *ifp = iflib_get_ifp(ctx);
2926 struct ixgbe_hw *hw = &sc->hw;
2929 INIT_DEBUGOUT("ixgbe_resume: begin");
2931 /* Read & clear WUS register */
2932 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2934 device_printf(dev, "Woken up by (WUS): %#010x\n",
2935 IXGBE_READ_REG(hw, IXGBE_WUS));
2936 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2937 /* And clear WUFC until next low-power transition */
2938 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2941 * Required after D3->D0 transition;
2942 * will re-advertise all previous advertised speeds
2944 if (ifp->if_flags & IFF_UP)
2948 } /* ixgbe_if_resume */
2950 /************************************************************************
2951 * ixgbe_if_mtu_set - Ioctl mtu entry point
2953 * Return 0 on success, EINVAL on failure
2954 ************************************************************************/
2956 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2958 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2961 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2963 if (mtu > IXGBE_MAX_MTU) {
2966 sc->max_frame_size = mtu + IXGBE_MTU_HDR;
2970 } /* ixgbe_if_mtu_set */
2972 /************************************************************************
2973 * ixgbe_if_crcstrip_set
2974 ************************************************************************/
2976 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2978 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2979 struct ixgbe_hw *hw = &sc->hw;
2980 /* crc stripping is set in two places:
2981 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2982 * IXGBE_RDRXCTL (set by the original driver in
2983 * ixgbe_setup_hw_rsc() called in init_locked.
2984 * We disable the setting when netmap is compiled in).
2985 * We update the values here, but also in ixgbe.c because
2986 * init_locked sometimes is called outside our control.
2990 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2991 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2994 D("%s read HLREG 0x%x rxc 0x%x",
2995 onoff ? "enter" : "exit", hl, rxc);
2997 /* hw requirements ... */
2998 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2999 rxc |= IXGBE_RDRXCTL_RSCACKC;
3000 if (onoff && !crcstrip) {
3001 /* keep the crc. Fast rx */
3002 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
3003 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
3005 /* reset default mode */
3006 hl |= IXGBE_HLREG0_RXCRCSTRP;
3007 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
3011 D("%s write HLREG 0x%x rxc 0x%x",
3012 onoff ? "enter" : "exit", hl, rxc);
3014 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
3015 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
3016 } /* ixgbe_if_crcstrip_set */
3018 /*********************************************************************
3019 * ixgbe_if_init - Init entry point
3021 * Used in two ways: It is used by the stack as an init
3022 * entry point in network interface structure. It is also
3023 * used by the driver as a hw/sw initialization routine to
3024 * get to a consistent state.
3026 * Return 0 on success, positive on failure
3027 **********************************************************************/
3029 ixgbe_if_init(if_ctx_t ctx)
3031 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3032 struct ifnet *ifp = iflib_get_ifp(ctx);
3033 device_t dev = iflib_get_dev(ctx);
3034 struct ixgbe_hw *hw = &sc->hw;
3035 struct ix_rx_queue *rx_que;
3036 struct ix_tx_queue *tx_que;
3043 INIT_DEBUGOUT("ixgbe_if_init: begin");
3045 /* Queue indices may change with IOV mode */
3046 ixgbe_align_all_queue_indices(sc);
3048 /* reprogram the RAR[0] in case user changed it. */
3049 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3051 /* Get the latest mac address, User can use a LAA */
3052 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3053 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3054 hw->addr_ctrl.rar_used_count = 1;
3058 ixgbe_initialize_iov(sc);
3060 ixgbe_initialize_transmit_units(ctx);
3062 /* Setup Multicast table */
3063 ixgbe_if_multi_set(ctx);
3065 /* Determine the correct mbuf pool, based on frame size */
3066 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3068 /* Configure RX settings */
3069 ixgbe_initialize_receive_units(ctx);
3072 * Initialize variable holding task enqueue requests
3073 * from MSI-X interrupts
3075 sc->task_requests = 0;
3077 /* Enable SDP & MSI-X interrupts based on adapter */
3078 ixgbe_config_gpie(sc);
3081 if (ifp->if_mtu > ETHERMTU) {
3082 /* aka IXGBE_MAXFRS on 82599 and newer */
3083 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3084 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3085 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3086 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3089 /* Now enable all the queues */
3090 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
3091 struct tx_ring *txr = &tx_que->txr;
3093 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3094 txdctl |= IXGBE_TXDCTL_ENABLE;
3095 /* Set WTHRESH to 8, burst writeback */
3096 txdctl |= (8 << 16);
3098 * When the internal queue falls below PTHRESH (32),
3099 * start prefetching as long as there are at least
3100 * HTHRESH (1) buffers ready. The values are taken
3101 * from the Intel linux driver 3.8.21.
3102 * Prefetching enables tx line rate even with 1 queue.
3104 txdctl |= (32 << 0) | (1 << 8);
3105 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3108 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
3109 struct rx_ring *rxr = &rx_que->rxr;
3111 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3112 if (hw->mac.type == ixgbe_mac_82598EB) {
3118 rxdctl &= ~0x3FFFFF;
3121 rxdctl |= IXGBE_RXDCTL_ENABLE;
3122 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3123 for (j = 0; j < 10; j++) {
3124 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3125 IXGBE_RXDCTL_ENABLE)
3133 /* Enable Receive engine */
3134 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3135 if (hw->mac.type == ixgbe_mac_82598EB)
3136 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3137 rxctrl |= IXGBE_RXCTRL_RXEN;
3138 ixgbe_enable_rx_dma(hw, rxctrl);
3140 /* Set up MSI/MSI-X routing */
3141 if (ixgbe_enable_msix) {
3142 ixgbe_configure_ivars(sc);
3143 /* Set up auto-mask */
3144 if (hw->mac.type == ixgbe_mac_82598EB)
3145 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3147 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3148 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3150 } else { /* Simple settings for Legacy/MSI */
3151 ixgbe_set_ivar(sc, 0, 0, 0);
3152 ixgbe_set_ivar(sc, 0, 0, 1);
3153 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3156 ixgbe_init_fdir(sc);
3159 * Check on any SFP devices that
3160 * need to be kick-started
3162 if (hw->phy.type == ixgbe_phy_none) {
3163 err = hw->phy.ops.identify(hw);
3164 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3166 "Unsupported SFP+ module type was detected.\n");
3171 /* Set moderation on the Link interrupt */
3172 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3174 /* Enable power to the phy. */
3175 ixgbe_set_phy_power(hw, true);
3177 /* Config/Enable Link */
3178 ixgbe_config_link(ctx);
3180 /* Hardware Packet Buffer & Flow Control setup */
3181 ixgbe_config_delay_values(sc);
3183 /* Initialize the FC settings */
3186 /* Set up VLAN support and filter */
3187 ixgbe_setup_vlan_hw_support(ctx);
3189 /* Setup DMA Coalescing */
3190 ixgbe_config_dmac(sc);
3192 /* And now turn on interrupts */
3193 ixgbe_if_enable_intr(ctx);
3195 /* Enable the use of the MBX by the VF's */
3196 if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3197 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3198 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3199 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3202 } /* ixgbe_init_locked */
3204 /************************************************************************
3207 * Setup the correct IVAR register for a particular MSI-X interrupt
3208 * (yes this is all very magic and confusing :)
3209 * - entry is the register array entry
3210 * - vector is the MSI-X vector for this queue
3211 * - type is RX/TX/MISC
3212 ************************************************************************/
3214 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3216 struct ixgbe_hw *hw = &sc->hw;
3219 vector |= IXGBE_IVAR_ALLOC_VAL;
3221 switch (hw->mac.type) {
3222 case ixgbe_mac_82598EB:
3224 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3226 entry += (type * 64);
3227 index = (entry >> 2) & 0x1F;
3228 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3229 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3230 ivar |= (vector << (8 * (entry & 0x3)));
3231 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3233 case ixgbe_mac_82599EB:
3234 case ixgbe_mac_X540:
3235 case ixgbe_mac_X550:
3236 case ixgbe_mac_X550EM_x:
3237 case ixgbe_mac_X550EM_a:
3238 if (type == -1) { /* MISC IVAR */
3239 index = (entry & 1) * 8;
3240 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3241 ivar &= ~(0xFF << index);
3242 ivar |= (vector << index);
3243 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3244 } else { /* RX/TX IVARS */
3245 index = (16 * (entry & 1)) + (8 * type);
3246 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3247 ivar &= ~(0xFF << index);
3248 ivar |= (vector << index);
3249 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3254 } /* ixgbe_set_ivar */
3256 /************************************************************************
3257 * ixgbe_configure_ivars
3258 ************************************************************************/
3260 ixgbe_configure_ivars(struct ixgbe_softc *sc)
3262 struct ix_rx_queue *rx_que = sc->rx_queues;
3263 struct ix_tx_queue *tx_que = sc->tx_queues;
3266 if (ixgbe_max_interrupt_rate > 0)
3267 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3270 * Disable DMA coalescing if interrupt moderation is
3277 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3278 struct rx_ring *rxr = &rx_que->rxr;
3280 /* First the RX queue entry */
3281 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3283 /* Set an Initial EITR value */
3284 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3286 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3287 struct tx_ring *txr = &tx_que->txr;
3289 /* ... and the TX */
3290 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3292 /* For the Link interrupt */
3293 ixgbe_set_ivar(sc, 1, sc->vector, -1);
3294 } /* ixgbe_configure_ivars */
3296 /************************************************************************
3298 ************************************************************************/
3300 ixgbe_config_gpie(struct ixgbe_softc *sc)
3302 struct ixgbe_hw *hw = &sc->hw;
3305 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3307 if (sc->intr_type == IFLIB_INTR_MSIX) {
3308 /* Enable Enhanced MSI-X mode */
3309 gpie |= IXGBE_GPIE_MSIX_MODE
3311 | IXGBE_GPIE_PBA_SUPPORT
3315 /* Fan Failure Interrupt */
3316 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3317 gpie |= IXGBE_SDP1_GPIEN;
3319 /* Thermal Sensor Interrupt */
3320 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3321 gpie |= IXGBE_SDP0_GPIEN_X540;
3323 /* Link detection */
3324 switch (hw->mac.type) {
3325 case ixgbe_mac_82599EB:
3326 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3328 case ixgbe_mac_X550EM_x:
3329 case ixgbe_mac_X550EM_a:
3330 gpie |= IXGBE_SDP0_GPIEN_X540;
3336 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3338 } /* ixgbe_config_gpie */
3340 /************************************************************************
3341 * ixgbe_config_delay_values
3343 * Requires sc->max_frame_size to be set.
3344 ************************************************************************/
3346 ixgbe_config_delay_values(struct ixgbe_softc *sc)
3348 struct ixgbe_hw *hw = &sc->hw;
3349 u32 rxpb, frame, size, tmp;
3351 frame = sc->max_frame_size;
3353 /* Calculate High Water */
3354 switch (hw->mac.type) {
3355 case ixgbe_mac_X540:
3356 case ixgbe_mac_X550:
3357 case ixgbe_mac_X550EM_x:
3358 case ixgbe_mac_X550EM_a:
3359 tmp = IXGBE_DV_X540(frame, frame);
3362 tmp = IXGBE_DV(frame, frame);
3365 size = IXGBE_BT2KB(tmp);
3366 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3367 hw->fc.high_water[0] = rxpb - size;
3369 /* Now calculate Low Water */
3370 switch (hw->mac.type) {
3371 case ixgbe_mac_X540:
3372 case ixgbe_mac_X550:
3373 case ixgbe_mac_X550EM_x:
3374 case ixgbe_mac_X550EM_a:
3375 tmp = IXGBE_LOW_DV_X540(frame);
3378 tmp = IXGBE_LOW_DV(frame);
3381 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3383 hw->fc.pause_time = IXGBE_FC_PAUSE;
3384 hw->fc.send_xon = true;
3385 } /* ixgbe_config_delay_values */
3387 /************************************************************************
3388 * ixgbe_set_multi - Multicast Update
3390 * Called whenever multicast address list is updated.
3391 ************************************************************************/
3393 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3395 struct ixgbe_softc *sc = arg;
3396 struct ixgbe_mc_addr *mta = sc->mta;
3398 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3400 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3401 mta[idx].vmdq = sc->pool;
3404 } /* ixgbe_mc_filter_apply */
3407 ixgbe_if_multi_set(if_ctx_t ctx)
3409 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3410 struct ixgbe_mc_addr *mta;
3411 struct ifnet *ifp = iflib_get_ifp(ctx);
3416 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3419 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3421 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc);
3423 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3425 if (ifp->if_flags & IFF_PROMISC)
3426 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3427 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3428 ifp->if_flags & IFF_ALLMULTI) {
3429 fctrl |= IXGBE_FCTRL_MPE;
3430 fctrl &= ~IXGBE_FCTRL_UPE;
3432 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3434 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3436 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3437 update_ptr = (u8 *)mta;
3438 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3439 ixgbe_mc_array_itr, true);
3442 } /* ixgbe_if_multi_set */
3444 /************************************************************************
3445 * ixgbe_mc_array_itr
3447 * An iterator function needed by the multicast shared code.
3448 * It feeds the shared code routine the addresses in the
3449 * array of ixgbe_set_multi() one by one.
3450 ************************************************************************/
3452 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3454 struct ixgbe_mc_addr *mta;
3456 mta = (struct ixgbe_mc_addr *)*update_ptr;
3459 *update_ptr = (u8*)(mta + 1);
3462 } /* ixgbe_mc_array_itr */
3464 /************************************************************************
3465 * ixgbe_local_timer - Timer routine
3467 * Checks for link status, updates statistics,
3468 * and runs the watchdog check.
3469 ************************************************************************/
3471 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3473 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3478 /* Check for pluggable optics */
3480 if (!ixgbe_sfp_probe(ctx))
3481 return; /* Nothing to do */
3483 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3485 /* Fire off the adminq task */
3486 iflib_admin_intr_deferred(ctx);
3488 } /* ixgbe_if_timer */
3490 /************************************************************************
3493 * Determine if a port had optics inserted.
3494 ************************************************************************/
3496 ixgbe_sfp_probe(if_ctx_t ctx)
3498 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3499 struct ixgbe_hw *hw = &sc->hw;
3500 device_t dev = iflib_get_dev(ctx);
3501 bool result = false;
3503 if ((hw->phy.type == ixgbe_phy_nl) &&
3504 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3505 s32 ret = hw->phy.ops.identify_sfp(hw);
3508 ret = hw->phy.ops.reset(hw);
3509 sc->sfp_probe = false;
3510 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3511 device_printf(dev, "Unsupported SFP+ module detected!");
3513 "Reload driver with supported module.\n");
3516 device_printf(dev, "SFP+ module detected!\n");
3517 /* We now have supported optics */
3523 } /* ixgbe_sfp_probe */
3525 /************************************************************************
3526 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3527 ************************************************************************/
3529 ixgbe_handle_mod(void *context)
3531 if_ctx_t ctx = context;
3532 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3533 struct ixgbe_hw *hw = &sc->hw;
3534 device_t dev = iflib_get_dev(ctx);
3535 u32 err, cage_full = 0;
3537 if (sc->hw.need_crosstalk_fix) {
3538 switch (hw->mac.type) {
3539 case ixgbe_mac_82599EB:
3540 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3543 case ixgbe_mac_X550EM_x:
3544 case ixgbe_mac_X550EM_a:
3545 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3553 goto handle_mod_out;
3556 err = hw->phy.ops.identify_sfp(hw);
3557 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3559 "Unsupported SFP+ module type was detected.\n");
3560 goto handle_mod_out;
3563 if (hw->mac.type == ixgbe_mac_82598EB)
3564 err = hw->phy.ops.reset(hw);
3566 err = hw->mac.ops.setup_sfp(hw);
3568 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3570 "Setup failure - unsupported SFP+ module type.\n");
3571 goto handle_mod_out;
3573 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3577 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3578 } /* ixgbe_handle_mod */
3581 /************************************************************************
3582 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3583 ************************************************************************/
3585 ixgbe_handle_msf(void *context)
3587 if_ctx_t ctx = context;
3588 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3589 struct ixgbe_hw *hw = &sc->hw;
3593 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3594 sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3596 autoneg = hw->phy.autoneg_advertised;
3597 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3598 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3599 if (hw->mac.ops.setup_link)
3600 hw->mac.ops.setup_link(hw, autoneg, true);
3602 /* Adjust media types shown in ifconfig */
3603 ifmedia_removeall(sc->media);
3604 ixgbe_add_media_types(sc->ctx);
3605 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3606 } /* ixgbe_handle_msf */
3608 /************************************************************************
3609 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3610 ************************************************************************/
3612 ixgbe_handle_phy(void *context)
3614 if_ctx_t ctx = context;
3615 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3616 struct ixgbe_hw *hw = &sc->hw;
3619 error = hw->phy.ops.handle_lasi(hw);
3620 if (error == IXGBE_ERR_OVERTEMP)
3621 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3623 device_printf(sc->dev,
3624 "Error handling LASI interrupt: %d\n", error);
3625 } /* ixgbe_handle_phy */
3627 /************************************************************************
3628 * ixgbe_if_stop - Stop the hardware
3630 * Disables all traffic on the adapter by issuing a
3631 * global reset on the MAC and deallocates TX/RX buffers.
3632 ************************************************************************/
3634 ixgbe_if_stop(if_ctx_t ctx)
3636 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3637 struct ixgbe_hw *hw = &sc->hw;
3639 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3642 hw->adapter_stopped = false;
3643 ixgbe_stop_adapter(hw);
3644 if (hw->mac.type == ixgbe_mac_82599EB)
3645 ixgbe_stop_mac_link_on_d3_82599(hw);
3646 /* Turn off the laser - noop with no optics */
3647 ixgbe_disable_tx_laser(hw);
3649 /* Update the stack */
3650 sc->link_up = false;
3651 ixgbe_if_update_admin_status(ctx);
3653 /* reprogram the RAR[0] in case user changed it. */
3654 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
3657 } /* ixgbe_if_stop */
3659 /************************************************************************
3660 * ixgbe_update_link_status - Update OS on link state
3662 * Note: Only updates the OS on the cached link state.
3663 * The real check of the hardware only happens with
3665 ************************************************************************/
3667 ixgbe_if_update_admin_status(if_ctx_t ctx)
3669 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3670 device_t dev = iflib_get_dev(ctx);
3673 if (sc->link_active == false) {
3675 device_printf(dev, "Link is up %d Gbps %s \n",
3676 ((sc->link_speed == 128) ? 10 : 1),
3678 sc->link_active = true;
3679 /* Update any Flow Control changes */
3680 ixgbe_fc_enable(&sc->hw);
3681 /* Update DMA coalescing config */
3682 ixgbe_config_dmac(sc);
3683 /* should actually be negotiated value */
3684 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3686 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3687 ixgbe_ping_all_vfs(sc);
3689 } else { /* Link down */
3690 if (sc->link_active == true) {
3692 device_printf(dev, "Link is Down\n");
3693 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3694 sc->link_active = false;
3695 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3696 ixgbe_ping_all_vfs(sc);
3700 /* Handle task requests from msix_link() */
3701 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
3702 ixgbe_handle_mod(ctx);
3703 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
3704 ixgbe_handle_msf(ctx);
3705 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
3706 ixgbe_handle_mbx(ctx);
3707 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
3708 ixgbe_reinit_fdir(ctx);
3709 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
3710 ixgbe_handle_phy(ctx);
3711 sc->task_requests = 0;
3713 ixgbe_update_stats_counters(sc);
3714 } /* ixgbe_if_update_admin_status */
3716 /************************************************************************
3717 * ixgbe_config_dmac - Configure DMA Coalescing
3718 ************************************************************************/
3720 ixgbe_config_dmac(struct ixgbe_softc *sc)
3722 struct ixgbe_hw *hw = &sc->hw;
3723 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3725 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3728 if (dcfg->watchdog_timer ^ sc->dmac ||
3729 dcfg->link_speed ^ sc->link_speed) {
3730 dcfg->watchdog_timer = sc->dmac;
3731 dcfg->fcoe_en = false;
3732 dcfg->link_speed = sc->link_speed;
3735 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3736 dcfg->watchdog_timer, dcfg->link_speed);
3738 hw->mac.ops.dmac_config(hw);
3740 } /* ixgbe_config_dmac */
3742 /************************************************************************
3743 * ixgbe_if_enable_intr
3744 ************************************************************************/
3746 ixgbe_if_enable_intr(if_ctx_t ctx)
3748 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3749 struct ixgbe_hw *hw = &sc->hw;
3750 struct ix_rx_queue *que = sc->rx_queues;
3753 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3755 switch (sc->hw.mac.type) {
3756 case ixgbe_mac_82599EB:
3757 mask |= IXGBE_EIMS_ECC;
3758 /* Temperature sensor on some scs */
3759 mask |= IXGBE_EIMS_GPI_SDP0;
3760 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3761 mask |= IXGBE_EIMS_GPI_SDP1;
3762 mask |= IXGBE_EIMS_GPI_SDP2;
3764 case ixgbe_mac_X540:
3765 /* Detect if Thermal Sensor is enabled */
3766 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3767 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3768 mask |= IXGBE_EIMS_TS;
3769 mask |= IXGBE_EIMS_ECC;
3771 case ixgbe_mac_X550:
3772 /* MAC thermal sensor is automatically enabled */
3773 mask |= IXGBE_EIMS_TS;
3774 mask |= IXGBE_EIMS_ECC;
3776 case ixgbe_mac_X550EM_x:
3777 case ixgbe_mac_X550EM_a:
3778 /* Some devices use SDP0 for important information */
3779 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3780 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3781 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3782 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3783 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3784 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3785 mask |= IXGBE_EICR_GPI_SDP0_X540;
3786 mask |= IXGBE_EIMS_ECC;
3792 /* Enable Fan Failure detection */
3793 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3794 mask |= IXGBE_EIMS_GPI_SDP1;
3796 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3797 mask |= IXGBE_EIMS_MAILBOX;
3798 /* Enable Flow Director */
3799 if (sc->feat_en & IXGBE_FEATURE_FDIR)
3800 mask |= IXGBE_EIMS_FLOW_DIR;
3802 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3804 /* With MSI-X we use auto clear */
3805 if (sc->intr_type == IFLIB_INTR_MSIX) {
3806 mask = IXGBE_EIMS_ENABLE_MASK;
3807 /* Don't autoclear Link */
3808 mask &= ~IXGBE_EIMS_OTHER;
3809 mask &= ~IXGBE_EIMS_LSC;
3810 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
3811 mask &= ~IXGBE_EIMS_MAILBOX;
3812 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3816 * Now enable all queues, this is done separately to
3817 * allow for handling the extended (beyond 32) MSI-X
3818 * vectors that can be used by 82599
3820 for (int i = 0; i < sc->num_rx_queues; i++, que++)
3821 ixgbe_enable_queue(sc, que->msix);
3823 IXGBE_WRITE_FLUSH(hw);
3825 } /* ixgbe_if_enable_intr */
3827 /************************************************************************
3828 * ixgbe_disable_intr
3829 ************************************************************************/
3831 ixgbe_if_disable_intr(if_ctx_t ctx)
3833 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3835 if (sc->intr_type == IFLIB_INTR_MSIX)
3836 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3837 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3838 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3840 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3841 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3842 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3844 IXGBE_WRITE_FLUSH(&sc->hw);
3846 } /* ixgbe_if_disable_intr */
3848 /************************************************************************
3849 * ixgbe_link_intr_enable
3850 ************************************************************************/
3852 ixgbe_link_intr_enable(if_ctx_t ctx)
3854 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
3856 /* Re-enable other interrupts */
3857 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3858 } /* ixgbe_link_intr_enable */
3860 /************************************************************************
3861 * ixgbe_if_rx_queue_intr_enable
3862 ************************************************************************/
3864 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3866 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3867 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
3869 ixgbe_enable_queue(sc, que->msix);
3872 } /* ixgbe_if_rx_queue_intr_enable */
3874 /************************************************************************
3875 * ixgbe_enable_queue
3876 ************************************************************************/
3878 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
3880 struct ixgbe_hw *hw = &sc->hw;
3881 u64 queue = 1ULL << vector;
3884 if (hw->mac.type == ixgbe_mac_82598EB) {
3885 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3886 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3888 mask = (queue & 0xFFFFFFFF);
3890 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3891 mask = (queue >> 32);
3893 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3895 } /* ixgbe_enable_queue */
3897 /************************************************************************
3898 * ixgbe_disable_queue
3899 ************************************************************************/
3901 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
3903 struct ixgbe_hw *hw = &sc->hw;
3904 u64 queue = 1ULL << vector;
3907 if (hw->mac.type == ixgbe_mac_82598EB) {
3908 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3909 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3911 mask = (queue & 0xFFFFFFFF);
3913 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3914 mask = (queue >> 32);
3916 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3918 } /* ixgbe_disable_queue */
3920 /************************************************************************
3921 * ixgbe_intr - Legacy Interrupt Service Routine
3922 ************************************************************************/
3924 ixgbe_intr(void *arg)
3926 struct ixgbe_softc *sc = arg;
3927 struct ix_rx_queue *que = sc->rx_queues;
3928 struct ixgbe_hw *hw = &sc->hw;
3929 if_ctx_t ctx = sc->ctx;
3930 u32 eicr, eicr_mask;
3932 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3936 ixgbe_if_enable_intr(ctx);
3937 return (FILTER_HANDLED);
3940 /* Check for fan failure */
3941 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3942 (eicr & IXGBE_EICR_GPI_SDP1)) {
3943 device_printf(sc->dev,
3944 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3945 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3948 /* Link status change */
3949 if (eicr & IXGBE_EICR_LSC) {
3950 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3951 iflib_admin_intr_deferred(ctx);
3954 if (ixgbe_is_sfp(hw)) {
3955 /* Pluggable optics-related interrupt */
3956 if (hw->mac.type >= ixgbe_mac_X540)
3957 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3959 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3961 if (eicr & eicr_mask) {
3962 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3963 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
3966 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3967 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3968 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3969 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3970 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3974 /* External PHY interrupt */
3975 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3976 (eicr & IXGBE_EICR_GPI_SDP0_X540))
3977 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
3979 return (FILTER_SCHEDULE_THREAD);
3982 /************************************************************************
3983 * ixgbe_free_pci_resources
3984 ************************************************************************/
3986 ixgbe_free_pci_resources(if_ctx_t ctx)
3988 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3989 struct ix_rx_queue *que = sc->rx_queues;
3990 device_t dev = iflib_get_dev(ctx);
3992 /* Release all MSI-X queue resources */
3993 if (sc->intr_type == IFLIB_INTR_MSIX)
3994 iflib_irq_free(ctx, &sc->irq);
3997 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
3998 iflib_irq_free(ctx, &que->que_irq);
4002 if (sc->pci_mem != NULL)
4003 bus_release_resource(dev, SYS_RES_MEMORY,
4004 rman_get_rid(sc->pci_mem), sc->pci_mem);
4005 } /* ixgbe_free_pci_resources */
4007 /************************************************************************
4008 * ixgbe_sysctl_flowcntl
4010 * SYSCTL wrapper around setting Flow Control
4011 ************************************************************************/
4013 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4015 struct ixgbe_softc *sc;
4018 sc = (struct ixgbe_softc *)arg1;
4019 fc = sc->hw.fc.current_mode;
4021 error = sysctl_handle_int(oidp, &fc, 0, req);
4022 if ((error) || (req->newptr == NULL))
4025 /* Don't bother if it's not changed */
4026 if (fc == sc->hw.fc.current_mode)
4029 return ixgbe_set_flowcntl(sc, fc);
4030 } /* ixgbe_sysctl_flowcntl */
4032 /************************************************************************
4033 * ixgbe_set_flowcntl - Set flow control
4035 * Flow control values:
4040 ************************************************************************/
4042 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
4045 case ixgbe_fc_rx_pause:
4046 case ixgbe_fc_tx_pause:
4048 sc->hw.fc.requested_mode = fc;
4049 if (sc->num_rx_queues > 1)
4050 ixgbe_disable_rx_drop(sc);
4053 sc->hw.fc.requested_mode = ixgbe_fc_none;
4054 if (sc->num_rx_queues > 1)
4055 ixgbe_enable_rx_drop(sc);
4061 /* Don't autoneg if forcing a value */
4062 sc->hw.fc.disable_fc_autoneg = true;
4063 ixgbe_fc_enable(&sc->hw);
4066 } /* ixgbe_set_flowcntl */
4068 /************************************************************************
4069 * ixgbe_enable_rx_drop
4071 * Enable the hardware to drop packets when the buffer is
4072 * full. This is useful with multiqueue, so that no single
4073 * queue being full stalls the entire RX engine. We only
4074 * enable this when Multiqueue is enabled AND Flow Control
4076 ************************************************************************/
4078 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
4080 struct ixgbe_hw *hw = &sc->hw;
4081 struct rx_ring *rxr;
4084 for (int i = 0; i < sc->num_rx_queues; i++) {
4085 rxr = &sc->rx_queues[i].rxr;
4086 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4087 srrctl |= IXGBE_SRRCTL_DROP_EN;
4088 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4091 /* enable drop for each vf */
4092 for (int i = 0; i < sc->num_vfs; i++) {
4093 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4094 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4097 } /* ixgbe_enable_rx_drop */
4099 /************************************************************************
4100 * ixgbe_disable_rx_drop
4101 ************************************************************************/
4103 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
4105 struct ixgbe_hw *hw = &sc->hw;
4106 struct rx_ring *rxr;
4109 for (int i = 0; i < sc->num_rx_queues; i++) {
4110 rxr = &sc->rx_queues[i].rxr;
4111 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4112 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4113 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4116 /* disable drop for each vf */
4117 for (int i = 0; i < sc->num_vfs; i++) {
4118 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4119 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4121 } /* ixgbe_disable_rx_drop */
4123 /************************************************************************
4124 * ixgbe_sysctl_advertise
4126 * SYSCTL wrapper around setting advertised speed
4127 ************************************************************************/
4129 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4131 struct ixgbe_softc *sc;
4132 int error, advertise;
4134 sc = (struct ixgbe_softc *)arg1;
4135 advertise = sc->advertise;
4137 error = sysctl_handle_int(oidp, &advertise, 0, req);
4138 if ((error) || (req->newptr == NULL))
4141 return ixgbe_set_advertise(sc, advertise);
4142 } /* ixgbe_sysctl_advertise */
4144 /************************************************************************
4145 * ixgbe_set_advertise - Control advertised link speed
4148 * 0x1 - advertise 100 Mb
4149 * 0x2 - advertise 1G
4150 * 0x4 - advertise 10G
4151 * 0x8 - advertise 10 Mb (yes, Mb)
4152 * 0x10 - advertise 2.5G (disabled by default)
4153 * 0x20 - advertise 5G (disabled by default)
4155 ************************************************************************/
4157 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
4159 device_t dev = iflib_get_dev(sc->ctx);
4160 struct ixgbe_hw *hw;
4161 ixgbe_link_speed speed = 0;
4162 ixgbe_link_speed link_caps = 0;
4163 s32 err = IXGBE_NOT_IMPLEMENTED;
4164 bool negotiate = false;
4166 /* Checks to validate new value */
4167 if (sc->advertise == advertise) /* no change */
4172 /* No speed changes for backplane media */
4173 if (hw->phy.media_type == ixgbe_media_type_backplane)
4176 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4177 (hw->phy.multispeed_fiber))) {
4178 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4182 if (advertise < 0x1 || advertise > 0x3F) {
4183 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n");
4187 if (hw->mac.ops.get_link_capabilities) {
4188 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4190 if (err != IXGBE_SUCCESS) {
4191 device_printf(dev, "Unable to determine supported advertise speeds\n");
4196 /* Set new value and report new advertised mode */
4197 if (advertise & 0x1) {
4198 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4199 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4202 speed |= IXGBE_LINK_SPEED_100_FULL;
4204 if (advertise & 0x2) {
4205 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4206 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4209 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4211 if (advertise & 0x4) {
4212 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4213 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4216 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4218 if (advertise & 0x8) {
4219 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4220 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4223 speed |= IXGBE_LINK_SPEED_10_FULL;
4225 if (advertise & 0x10) {
4226 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
4227 device_printf(dev, "Interface does not support 2.5G advertised speed\n");
4230 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
4232 if (advertise & 0x20) {
4233 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
4234 device_printf(dev, "Interface does not support 5G advertised speed\n");
4237 speed |= IXGBE_LINK_SPEED_5GB_FULL;
4240 hw->mac.autotry_restart = true;
4241 hw->mac.ops.setup_link(hw, speed, true);
4242 sc->advertise = advertise;
4245 } /* ixgbe_set_advertise */
4247 /************************************************************************
4248 * ixgbe_get_default_advertise - Get default advertised speed settings
4250 * Formatted for sysctl usage.
4252 * 0x1 - advertise 100 Mb
4253 * 0x2 - advertise 1G
4254 * 0x4 - advertise 10G
4255 * 0x8 - advertise 10 Mb (yes, Mb)
4256 * 0x10 - advertise 2.5G (disabled by default)
4257 * 0x20 - advertise 5G (disabled by default)
4258 ************************************************************************/
4260 ixgbe_get_default_advertise(struct ixgbe_softc *sc)
4262 struct ixgbe_hw *hw = &sc->hw;
4264 ixgbe_link_speed link_caps = 0;
4266 bool negotiate = false;
4269 * Advertised speed means nothing unless it's copper or
4272 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4273 !(hw->phy.multispeed_fiber))
4276 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4277 if (err != IXGBE_SUCCESS)
4280 if (hw->mac.type == ixgbe_mac_X550) {
4282 * 2.5G and 5G autonegotiation speeds on X550
4283 * are disabled by default due to reported
4284 * interoperability issues with some switches.
4286 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
4287 IXGBE_LINK_SPEED_5GB_FULL);
4291 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
4292 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
4293 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
4294 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
4295 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
4296 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
4299 } /* ixgbe_get_default_advertise */
4301 /************************************************************************
4302 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4305 * 0/1 - off / on (use default value of 1000)
4307 * Legal timer values are:
4308 * 50,100,250,500,1000,2000,5000,10000
4310 * Turning off interrupt moderation will also turn this off.
4311 ************************************************************************/
4313 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4315 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4316 struct ifnet *ifp = iflib_get_ifp(sc->ctx);
4321 error = sysctl_handle_16(oidp, &newval, 0, req);
4322 if ((error) || (req->newptr == NULL))
4331 /* Enable and use default */
4342 /* Legal values - allow */
4346 /* Do nothing, illegal value */
4350 /* Re-initialize hardware if it's already running */
4351 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4355 } /* ixgbe_sysctl_dmac */
4358 /************************************************************************
4359 * ixgbe_sysctl_power_state
4361 * Sysctl to test power states
4363 * 0 - set device to D0
4364 * 3 - set device to D3
4365 * (none) - get current device power state
4366 ************************************************************************/
4368 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4370 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4371 device_t dev = sc->dev;
4372 int curr_ps, new_ps, error = 0;
4374 curr_ps = new_ps = pci_get_powerstate(dev);
4376 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4377 if ((error) || (req->newptr == NULL))
4380 if (new_ps == curr_ps)
4383 if (new_ps == 3 && curr_ps == 0)
4384 error = DEVICE_SUSPEND(dev);
4385 else if (new_ps == 0 && curr_ps == 3)
4386 error = DEVICE_RESUME(dev);
4390 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4393 } /* ixgbe_sysctl_power_state */
4396 /************************************************************************
4397 * ixgbe_sysctl_wol_enable
4399 * Sysctl to enable/disable the WoL capability,
4400 * if supported by the adapter.
4405 ************************************************************************/
4407 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4409 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4410 struct ixgbe_hw *hw = &sc->hw;
4411 int new_wol_enabled;
4414 new_wol_enabled = hw->wol_enabled;
4415 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4416 if ((error) || (req->newptr == NULL))
4418 new_wol_enabled = !!(new_wol_enabled);
4419 if (new_wol_enabled == hw->wol_enabled)
4422 if (new_wol_enabled > 0 && !sc->wol_support)
4425 hw->wol_enabled = new_wol_enabled;
4428 } /* ixgbe_sysctl_wol_enable */
4430 /************************************************************************
4431 * ixgbe_sysctl_wufc - Wake Up Filter Control
4433 * Sysctl to enable/disable the types of packets that the
4434 * adapter will wake up on upon receipt.
4436 * 0x1 - Link Status Change
4437 * 0x2 - Magic Packet
4438 * 0x4 - Direct Exact
4439 * 0x8 - Directed Multicast
4441 * 0x20 - ARP/IPv4 Request Packet
4442 * 0x40 - Direct IPv4 Packet
4443 * 0x80 - Direct IPv6 Packet
4445 * Settings not listed above will cause the sysctl to return an error.
4446 ************************************************************************/
4448 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4450 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4454 new_wufc = sc->wufc;
4456 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4457 if ((error) || (req->newptr == NULL))
4459 if (new_wufc == sc->wufc)
4462 if (new_wufc & 0xffffff00)
4466 new_wufc |= (0xffffff & sc->wufc);
4467 sc->wufc = new_wufc;
4470 } /* ixgbe_sysctl_wufc */
4473 /************************************************************************
4474 * ixgbe_sysctl_print_rss_config
4475 ************************************************************************/
4477 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4479 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4480 struct ixgbe_hw *hw = &sc->hw;
4481 device_t dev = sc->dev;
4483 int error = 0, reta_size;
4486 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4488 device_printf(dev, "Could not allocate sbuf for output.\n");
4492 // TODO: use sbufs to make a string to print out
4493 /* Set multiplier for RETA setup and table size based on MAC */
4494 switch (sc->hw.mac.type) {
4495 case ixgbe_mac_X550:
4496 case ixgbe_mac_X550EM_x:
4497 case ixgbe_mac_X550EM_a:
4505 /* Print out the redirection table */
4506 sbuf_cat(buf, "\n");
4507 for (int i = 0; i < reta_size; i++) {
4509 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4510 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4512 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4513 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4517 // TODO: print more config
4519 error = sbuf_finish(buf);
4521 device_printf(dev, "Error finishing sbuf: %d\n", error);
4526 } /* ixgbe_sysctl_print_rss_config */
4527 #endif /* IXGBE_DEBUG */
4529 /************************************************************************
4530 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4532 * For X552/X557-AT devices using an external PHY
4533 ************************************************************************/
4535 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4537 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4538 struct ixgbe_hw *hw = &sc->hw;
4541 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4542 device_printf(iflib_get_dev(sc->ctx),
4543 "Device has no supported external thermal sensor.\n");
4547 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4548 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4549 device_printf(iflib_get_dev(sc->ctx),
4550 "Error reading from PHY's current temperature register\n");
4554 /* Shift temp for output */
4557 return (sysctl_handle_16(oidp, NULL, reg, req));
4558 } /* ixgbe_sysctl_phy_temp */
4560 /************************************************************************
4561 * ixgbe_sysctl_phy_overtemp_occurred
4563 * Reports (directly from the PHY) whether the current PHY
4564 * temperature is over the overtemp threshold.
4565 ************************************************************************/
4567 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4569 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4570 struct ixgbe_hw *hw = &sc->hw;
4573 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4574 device_printf(iflib_get_dev(sc->ctx),
4575 "Device has no supported external thermal sensor.\n");
4579 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4580 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4581 device_printf(iflib_get_dev(sc->ctx),
4582 "Error reading from PHY's temperature status register\n");
4586 /* Get occurrence bit */
4587 reg = !!(reg & 0x4000);
4589 return (sysctl_handle_16(oidp, 0, reg, req));
4590 } /* ixgbe_sysctl_phy_overtemp_occurred */
4592 /************************************************************************
4593 * ixgbe_sysctl_eee_state
4595 * Sysctl to set EEE power saving feature
4599 * (none) - get current device EEE state
4600 ************************************************************************/
4602 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4604 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4605 device_t dev = sc->dev;
4606 struct ifnet *ifp = iflib_get_ifp(sc->ctx);
4607 int curr_eee, new_eee, error = 0;
4610 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
4612 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4613 if ((error) || (req->newptr == NULL))
4617 if (new_eee == curr_eee)
4621 if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
4624 /* Bounds checking */
4625 if ((new_eee < 0) || (new_eee > 1))
4628 retval = ixgbe_setup_eee(&sc->hw, new_eee);
4630 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4634 /* Restart auto-neg */
4637 device_printf(dev, "New EEE state: %d\n", new_eee);
4639 /* Cache new value */
4641 sc->feat_en |= IXGBE_FEATURE_EEE;
4643 sc->feat_en &= ~IXGBE_FEATURE_EEE;
4646 } /* ixgbe_sysctl_eee_state */
4648 /************************************************************************
4649 * ixgbe_init_device_features
4650 ************************************************************************/
4652 ixgbe_init_device_features(struct ixgbe_softc *sc)
4654 sc->feat_cap = IXGBE_FEATURE_NETMAP
4657 | IXGBE_FEATURE_MSIX
4658 | IXGBE_FEATURE_LEGACY_IRQ;
4660 /* Set capabilities first... */
4661 switch (sc->hw.mac.type) {
4662 case ixgbe_mac_82598EB:
4663 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
4664 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4666 case ixgbe_mac_X540:
4667 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4668 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4669 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4670 (sc->hw.bus.func == 0))
4671 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4673 case ixgbe_mac_X550:
4674 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4675 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4676 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4678 case ixgbe_mac_X550EM_x:
4679 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4680 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4682 case ixgbe_mac_X550EM_a:
4683 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4684 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4685 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4686 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4687 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4688 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4689 sc->feat_cap |= IXGBE_FEATURE_EEE;
4692 case ixgbe_mac_82599EB:
4693 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4694 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4695 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4696 (sc->hw.bus.func == 0))
4697 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4698 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4699 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4705 /* Enabled by default... */
4706 /* Fan failure detection */
4707 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4708 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4710 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
4711 sc->feat_en |= IXGBE_FEATURE_NETMAP;
4713 if (sc->feat_cap & IXGBE_FEATURE_EEE)
4714 sc->feat_en |= IXGBE_FEATURE_EEE;
4715 /* Thermal Sensor */
4716 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4717 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4719 /* Enabled via global sysctl... */
4721 if (ixgbe_enable_fdir) {
4722 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
4723 sc->feat_en |= IXGBE_FEATURE_FDIR;
4725 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled.");
4728 * Message Signal Interrupts - Extended (MSI-X)
4729 * Normal MSI is only enabled if MSI-X calls fail.
4731 if (!ixgbe_enable_msix)
4732 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
4733 /* Receive-Side Scaling (RSS) */
4734 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4735 sc->feat_en |= IXGBE_FEATURE_RSS;
4737 /* Disable features with unmet dependencies... */
4739 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
4740 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
4741 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4742 sc->feat_en &= ~IXGBE_FEATURE_RSS;
4743 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
4745 } /* ixgbe_init_device_features */
4747 /************************************************************************
4748 * ixgbe_check_fan_failure
4749 ************************************************************************/
4751 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
4755 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
4759 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4760 } /* ixgbe_check_fan_failure */
4762 /************************************************************************
4763 * ixgbe_sbuf_fw_version
4764 ************************************************************************/
4766 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
4768 struct ixgbe_nvm_version nvm_ver = {0};
4771 const char *space = "";
4773 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
4774 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
4775 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */
4776 status = ixgbe_get_phy_firmware_version(hw, &phyfw);
4778 if (nvm_ver.oem_valid) {
4779 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
4780 nvm_ver.oem_minor, nvm_ver.oem_release);
4784 if (nvm_ver.or_valid) {
4785 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
4786 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
4790 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
4792 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
4796 if (phyfw != 0 && status == IXGBE_SUCCESS)
4797 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw);
4798 } /* ixgbe_sbuf_fw_version */
4800 /************************************************************************
4801 * ixgbe_print_fw_version
4802 ************************************************************************/
4804 ixgbe_print_fw_version(if_ctx_t ctx)
4806 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4807 struct ixgbe_hw *hw = &sc->hw;
4808 device_t dev = sc->dev;
4812 buf = sbuf_new_auto();
4814 device_printf(dev, "Could not allocate sbuf for output.\n");
4818 ixgbe_sbuf_fw_version(hw, buf);
4820 error = sbuf_finish(buf);
4822 device_printf(dev, "Error finishing sbuf: %d\n", error);
4823 else if (sbuf_len(buf))
4824 device_printf(dev, "%s\n", sbuf_data(buf));
4827 } /* ixgbe_print_fw_version */
4829 /************************************************************************
4830 * ixgbe_sysctl_print_fw_version
4831 ************************************************************************/
4833 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
4835 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4836 struct ixgbe_hw *hw = &sc->hw;
4837 device_t dev = sc->dev;
4841 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4843 device_printf(dev, "Could not allocate sbuf for output.\n");
4847 ixgbe_sbuf_fw_version(hw, buf);
4849 error = sbuf_finish(buf);
4851 device_printf(dev, "Error finishing sbuf: %d\n", error);
4856 } /* ixgbe_sysctl_print_fw_version */