1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 #include "opt_inet6.h"
41 #include "ixgbe_sriov.h"
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
47 /************************************************************************
49 ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
53 /************************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixgbe_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"),
68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"),
73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"),
75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"),
77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"),
79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"),
81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"),
83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"),
87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"),
89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"),
93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"),
95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
107 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
108 /* required last entry */
112 static void *ixgbe_register(device_t dev);
113 static int ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int ixgbe_if_attach_post(if_ctx_t ctx);
115 static int ixgbe_if_detach(if_ctx_t ctx);
116 static int ixgbe_if_shutdown(if_ctx_t ctx);
117 static int ixgbe_if_suspend(if_ctx_t ctx);
118 static int ixgbe_if_resume(if_ctx_t ctx);
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int ixgbe_if_media_change(if_ctx_t ctx);
127 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133 uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135 uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 static bool ixgbe_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
143 int ixgbe_intr(void *arg);
145 /************************************************************************
146 * Function prototypes
147 ************************************************************************/
148 #if __FreeBSD_version >= 1100036
149 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
152 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
154 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
155 static int ixgbe_allocate_pci_resources(if_ctx_t ctx);
156 static int ixgbe_setup_low_power_mode(if_ctx_t ctx);
158 static void ixgbe_config_dmac(struct adapter *adapter);
159 static void ixgbe_configure_ivars(struct adapter *adapter);
160 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
162 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
163 static bool ixgbe_sfp_probe(if_ctx_t ctx);
165 static void ixgbe_free_pci_resources(if_ctx_t ctx);
167 static int ixgbe_msix_link(void *arg);
168 static int ixgbe_msix_que(void *arg);
169 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
170 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
171 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
173 static int ixgbe_setup_interface(if_ctx_t ctx);
174 static void ixgbe_init_device_features(struct adapter *adapter);
175 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
176 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
177 static void ixgbe_print_fw_version(if_ctx_t ctx);
178 static void ixgbe_add_media_types(if_ctx_t ctx);
179 static void ixgbe_update_stats_counters(struct adapter *adapter);
180 static void ixgbe_config_link(if_ctx_t ctx);
181 static void ixgbe_get_slot_info(struct adapter *);
182 static void ixgbe_check_wol_support(struct adapter *adapter);
183 static void ixgbe_enable_rx_drop(struct adapter *);
184 static void ixgbe_disable_rx_drop(struct adapter *);
186 static void ixgbe_add_hw_stats(struct adapter *adapter);
187 static int ixgbe_set_flowcntl(struct adapter *, int);
188 static int ixgbe_set_advertise(struct adapter *, int);
189 static int ixgbe_get_advertise(struct adapter *);
190 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
191 static void ixgbe_config_gpie(struct adapter *adapter);
192 static void ixgbe_config_delay_values(struct adapter *adapter);
194 /* Sysctl handlers */
195 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
196 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
197 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
198 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
199 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
200 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
201 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
203 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
204 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
206 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
207 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
208 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
209 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
210 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
211 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
212 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
214 /* Deferred interrupt tasklets */
215 static void ixgbe_handle_msf(void *);
216 static void ixgbe_handle_mod(void *);
217 static void ixgbe_handle_phy(void *);
219 /************************************************************************
220 * FreeBSD Device Interface Entry Points
221 ************************************************************************/
222 static device_method_t ix_methods[] = {
223 /* Device interface */
224 DEVMETHOD(device_register, ixgbe_register),
225 DEVMETHOD(device_probe, iflib_device_probe),
226 DEVMETHOD(device_attach, iflib_device_attach),
227 DEVMETHOD(device_detach, iflib_device_detach),
228 DEVMETHOD(device_shutdown, iflib_device_shutdown),
229 DEVMETHOD(device_suspend, iflib_device_suspend),
230 DEVMETHOD(device_resume, iflib_device_resume),
232 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
233 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
234 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
239 static driver_t ix_driver = {
240 "ix", ix_methods, sizeof(struct adapter),
243 devclass_t ix_devclass;
244 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
245 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
246 MODULE_DEPEND(ix, pci, 1, 1, 1);
247 MODULE_DEPEND(ix, ether, 1, 1, 1);
248 MODULE_DEPEND(ix, iflib, 1, 1, 1);
250 static device_method_t ixgbe_if_methods[] = {
251 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
252 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
253 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
254 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
255 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
256 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
257 DEVMETHOD(ifdi_init, ixgbe_if_init),
258 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
259 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
260 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
261 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
262 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
263 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
264 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
265 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
266 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
267 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
268 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
269 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
270 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
271 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
272 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
273 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
274 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
275 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
276 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
277 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
278 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
279 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
280 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
282 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
283 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
284 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
290 * TUNEABLE PARAMETERS:
293 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
294 "IXGBE driver parameters");
295 static driver_t ixgbe_if_driver = {
296 "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
299 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
300 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
301 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
303 /* Flow control setting, default to full */
304 static int ixgbe_flow_control = ixgbe_fc_full;
305 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
306 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
308 /* Advertise Speed, default to 0 (auto) */
309 static int ixgbe_advertise_speed = 0;
310 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
311 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
314 * Smart speed setting, default to on
315 * this only works as a compile option
316 * right now as its during attach, set
317 * this to 'ixgbe_smart_speed_off' to
320 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
323 * MSI-X should be the default for best performance,
324 * but this allows it to be forced off for testing.
326 static int ixgbe_enable_msix = 1;
327 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
328 "Enable MSI-X interrupts");
331 * Defining this on will allow the use
332 * of unsupported SFP+ modules, note that
333 * doing so you are on your own :)
335 static int allow_unsupported_sfp = false;
336 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
337 &allow_unsupported_sfp, 0,
338 "Allow unsupported SFP modules...use at your own risk");
341 * Not sure if Flow Director is fully baked,
342 * so we'll default to turning it off.
344 static int ixgbe_enable_fdir = 0;
345 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
346 "Enable Flow Director");
348 /* Receive-Side Scaling */
349 static int ixgbe_enable_rss = 1;
350 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
351 "Enable Receive-Side Scaling (RSS)");
354 * AIM: Adaptive Interrupt Moderation
355 * which means that the interrupt rate
356 * is varied over time based on the
357 * traffic for that interrupt vector
359 static int ixgbe_enable_aim = false;
360 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
361 "Enable adaptive interrupt moderation");
364 /* Keep running tab on them for sanity check */
365 static int ixgbe_total_ports;
368 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
371 * For Flow Director: this is the number of TX packets we sample
372 * for the filter pool, this means every 20th packet will be probed.
374 * This feature can be disabled by setting this to 0.
376 static int atr_sample_rate = 20;
378 extern struct if_txrx ixgbe_txrx;
380 static struct if_shared_ctx ixgbe_sctx_init = {
381 .isc_magic = IFLIB_MAGIC,
382 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
383 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
384 .isc_tx_maxsegsize = PAGE_SIZE,
385 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
386 .isc_tso_maxsegsize = PAGE_SIZE,
387 .isc_rx_maxsize = PAGE_SIZE*4,
388 .isc_rx_nsegments = 1,
389 .isc_rx_maxsegsize = PAGE_SIZE*4,
394 .isc_admin_intrcnt = 1,
395 .isc_vendor_info = ixgbe_vendor_info_array,
396 .isc_driver_version = ixgbe_driver_version,
397 .isc_driver = &ixgbe_if_driver,
398 .isc_flags = IFLIB_TSO_INIT_IP,
400 .isc_nrxd_min = {MIN_RXD},
401 .isc_ntxd_min = {MIN_TXD},
402 .isc_nrxd_max = {MAX_RXD},
403 .isc_ntxd_max = {MAX_TXD},
404 .isc_nrxd_default = {DEFAULT_RXD},
405 .isc_ntxd_default = {DEFAULT_TXD},
408 /************************************************************************
409 * ixgbe_if_tx_queues_alloc
410 ************************************************************************/
412 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
413 int ntxqs, int ntxqsets)
415 struct adapter *adapter = iflib_get_softc(ctx);
416 if_softc_ctx_t scctx = adapter->shared;
417 struct ix_tx_queue *que;
420 MPASS(adapter->num_tx_queues > 0);
421 MPASS(adapter->num_tx_queues == ntxqsets);
424 /* Allocate queue structure memory */
426 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
427 M_IXGBE, M_NOWAIT | M_ZERO);
428 if (!adapter->tx_queues) {
429 device_printf(iflib_get_dev(ctx),
430 "Unable to allocate TX ring memory\n");
434 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
435 struct tx_ring *txr = &que->txr;
437 /* In case SR-IOV is enabled, align the index properly */
438 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
441 txr->adapter = que->adapter = adapter;
443 /* Allocate report status array */
444 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
445 if (txr->tx_rsq == NULL) {
449 for (j = 0; j < scctx->isc_ntxd[0]; j++)
450 txr->tx_rsq[j] = QIDX_INVALID;
451 /* get the virtual and physical address of the hardware queues */
452 txr->tail = IXGBE_TDT(txr->me);
453 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
454 txr->tx_paddr = paddrs[i];
457 txr->total_packets = 0;
459 /* Set the rate at which we sample packets */
460 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
461 txr->atr_sample = atr_sample_rate;
465 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
466 adapter->num_tx_queues);
471 ixgbe_if_queues_free(ctx);
474 } /* ixgbe_if_tx_queues_alloc */
476 /************************************************************************
477 * ixgbe_if_rx_queues_alloc
478 ************************************************************************/
480 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
481 int nrxqs, int nrxqsets)
483 struct adapter *adapter = iflib_get_softc(ctx);
484 struct ix_rx_queue *que;
487 MPASS(adapter->num_rx_queues > 0);
488 MPASS(adapter->num_rx_queues == nrxqsets);
491 /* Allocate queue structure memory */
493 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
494 M_IXGBE, M_NOWAIT | M_ZERO);
495 if (!adapter->rx_queues) {
496 device_printf(iflib_get_dev(ctx),
497 "Unable to allocate TX ring memory\n");
501 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
502 struct rx_ring *rxr = &que->rxr;
504 /* In case SR-IOV is enabled, align the index properly */
505 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
508 rxr->adapter = que->adapter = adapter;
510 /* get the virtual and physical address of the hw queues */
511 rxr->tail = IXGBE_RDT(rxr->me);
512 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
513 rxr->rx_paddr = paddrs[i];
518 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
519 adapter->num_rx_queues);
522 } /* ixgbe_if_rx_queues_alloc */
524 /************************************************************************
525 * ixgbe_if_queues_free
526 ************************************************************************/
528 ixgbe_if_queues_free(if_ctx_t ctx)
530 struct adapter *adapter = iflib_get_softc(ctx);
531 struct ix_tx_queue *tx_que = adapter->tx_queues;
532 struct ix_rx_queue *rx_que = adapter->rx_queues;
535 if (tx_que != NULL) {
536 for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
537 struct tx_ring *txr = &tx_que->txr;
538 if (txr->tx_rsq == NULL)
541 free(txr->tx_rsq, M_IXGBE);
545 free(adapter->tx_queues, M_IXGBE);
546 adapter->tx_queues = NULL;
548 if (rx_que != NULL) {
549 free(adapter->rx_queues, M_IXGBE);
550 adapter->rx_queues = NULL;
552 } /* ixgbe_if_queues_free */
554 /************************************************************************
555 * ixgbe_initialize_rss_mapping
556 ************************************************************************/
558 ixgbe_initialize_rss_mapping(struct adapter *adapter)
560 struct ixgbe_hw *hw = &adapter->hw;
561 u32 reta = 0, mrqc, rss_key[10];
562 int queue_id, table_size, index_mult;
566 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
567 /* Fetch the configured RSS key */
568 rss_getkey((uint8_t *)&rss_key);
570 /* set up random bits */
571 arc4rand(&rss_key, sizeof(rss_key), 0);
574 /* Set multiplier for RETA setup and table size based on MAC */
577 switch (adapter->hw.mac.type) {
578 case ixgbe_mac_82598EB:
582 case ixgbe_mac_X550EM_x:
583 case ixgbe_mac_X550EM_a:
590 /* Set up the redirection table */
591 for (i = 0, j = 0; i < table_size; i++, j++) {
592 if (j == adapter->num_rx_queues)
595 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
597 * Fetch the RSS bucket id for the given indirection
598 * entry. Cap it at the number of configured buckets
599 * (which is num_rx_queues.)
601 queue_id = rss_get_indirection_to_bucket(i);
602 queue_id = queue_id % adapter->num_rx_queues;
604 queue_id = (j * index_mult);
607 * The low 8 bits are for hash value (n+0);
608 * The next 8 bits are for hash value (n+1), etc.
611 reta = reta | (((uint32_t)queue_id) << 24);
614 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
616 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
622 /* Now fill our hash function seeds */
623 for (i = 0; i < 10; i++)
624 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
626 /* Perform hash on these packet types */
627 if (adapter->feat_en & IXGBE_FEATURE_RSS)
628 rss_hash_config = rss_gethashconfig();
631 * Disable UDP - IP fragments aren't currently being handled
632 * and so we end up with a mix of 2-tuple and 4-tuple
635 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
636 | RSS_HASHTYPE_RSS_TCP_IPV4
637 | RSS_HASHTYPE_RSS_IPV6
638 | RSS_HASHTYPE_RSS_TCP_IPV6
639 | RSS_HASHTYPE_RSS_IPV6_EX
640 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
643 mrqc = IXGBE_MRQC_RSSEN;
644 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
645 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
646 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
647 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
648 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
649 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
650 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
651 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
652 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
653 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
654 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
655 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
656 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
657 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
658 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
659 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
660 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
661 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
662 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
663 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
664 } /* ixgbe_initialize_rss_mapping */
666 /************************************************************************
667 * ixgbe_initialize_receive_units - Setup receive registers and features.
668 ************************************************************************/
669 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
672 ixgbe_initialize_receive_units(if_ctx_t ctx)
674 struct adapter *adapter = iflib_get_softc(ctx);
675 if_softc_ctx_t scctx = adapter->shared;
676 struct ixgbe_hw *hw = &adapter->hw;
677 struct ifnet *ifp = iflib_get_ifp(ctx);
678 struct ix_rx_queue *que;
680 u32 bufsz, fctrl, srrctl, rxcsum;
684 * Make sure receives are disabled while
685 * setting up the descriptor ring
687 ixgbe_disable_rx(hw);
689 /* Enable broadcasts */
690 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
691 fctrl |= IXGBE_FCTRL_BAM;
692 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
693 fctrl |= IXGBE_FCTRL_DPF;
694 fctrl |= IXGBE_FCTRL_PMCF;
696 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
698 /* Set for Jumbo Frames? */
699 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
700 if (ifp->if_mtu > ETHERMTU)
701 hlreg |= IXGBE_HLREG0_JUMBOEN;
703 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
704 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
706 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
707 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
709 /* Setup the Base and Length of the Rx Descriptor Ring */
710 for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
711 struct rx_ring *rxr = &que->rxr;
712 u64 rdba = rxr->rx_paddr;
716 /* Setup the Base and Length of the Rx Descriptor Ring */
717 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
718 (rdba & 0x00000000ffffffffULL));
719 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
720 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
721 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
723 /* Set up the SRRCTL register */
724 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
725 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
726 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
728 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
731 * Set DROP_EN iff we have no flow control and >1 queue.
732 * Note that srrctl was cleared shortly before during reset,
733 * so we do not need to clear the bit, but do it just in case
734 * this code is moved elsewhere.
736 if (adapter->num_rx_queues > 1 &&
737 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
738 srrctl |= IXGBE_SRRCTL_DROP_EN;
740 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
743 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
745 /* Setup the HW Rx Head and Tail Descriptor Pointers */
746 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
747 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
749 /* Set the driver rx tail address */
750 rxr->tail = IXGBE_RDT(rxr->me);
753 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
754 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
755 | IXGBE_PSRTYPE_UDPHDR
756 | IXGBE_PSRTYPE_IPV4HDR
757 | IXGBE_PSRTYPE_IPV6HDR;
758 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
761 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
763 ixgbe_initialize_rss_mapping(adapter);
765 if (adapter->num_rx_queues > 1) {
766 /* RSS and RX IPP Checksum are mutually exclusive */
767 rxcsum |= IXGBE_RXCSUM_PCSD;
770 if (ifp->if_capenable & IFCAP_RXCSUM)
771 rxcsum |= IXGBE_RXCSUM_PCSD;
773 /* This is useful for calculating UDP/IP fragment checksums */
774 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
775 rxcsum |= IXGBE_RXCSUM_IPPCSE;
777 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
779 } /* ixgbe_initialize_receive_units */
781 /************************************************************************
782 * ixgbe_initialize_transmit_units - Enable transmit units.
783 ************************************************************************/
785 ixgbe_initialize_transmit_units(if_ctx_t ctx)
787 struct adapter *adapter = iflib_get_softc(ctx);
788 struct ixgbe_hw *hw = &adapter->hw;
789 if_softc_ctx_t scctx = adapter->shared;
790 struct ix_tx_queue *que;
793 /* Setup the Base and Length of the Tx Descriptor Ring */
794 for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
796 struct tx_ring *txr = &que->txr;
797 u64 tdba = txr->tx_paddr;
801 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
802 (tdba & 0x00000000ffffffffULL));
803 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
804 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
805 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
807 /* Setup the HW Tx Head and Tail descriptor pointers */
808 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
809 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
811 /* Cache the tail address */
812 txr->tail = IXGBE_TDT(txr->me);
814 txr->tx_rs_cidx = txr->tx_rs_pidx;
815 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
816 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
817 txr->tx_rsq[k] = QIDX_INVALID;
819 /* Disable Head Writeback */
821 * Note: for X550 series devices, these registers are actually
822 * prefixed with TPH_ isntead of DCA_, but the addresses and
823 * fields remain the same.
825 switch (hw->mac.type) {
826 case ixgbe_mac_82598EB:
827 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
830 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
833 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
834 switch (hw->mac.type) {
835 case ixgbe_mac_82598EB:
836 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
839 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
845 if (hw->mac.type != ixgbe_mac_82598EB) {
846 u32 dmatxctl, rttdcs;
848 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
849 dmatxctl |= IXGBE_DMATXCTL_TE;
850 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
851 /* Disable arbiter to set MTQC */
852 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
853 rttdcs |= IXGBE_RTTDCS_ARBDIS;
854 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
855 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
856 ixgbe_get_mtqc(adapter->iov_mode));
857 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
858 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
861 } /* ixgbe_initialize_transmit_units */
863 /************************************************************************
865 ************************************************************************/
867 ixgbe_register(device_t dev)
869 return (&ixgbe_sctx_init);
870 } /* ixgbe_register */
872 /************************************************************************
873 * ixgbe_if_attach_pre - Device initialization routine, part 1
875 * Called when the driver is being loaded.
876 * Identifies the type of hardware, initializes the hardware,
877 * and initializes iflib structures.
879 * return 0 on success, positive on failure
880 ************************************************************************/
882 ixgbe_if_attach_pre(if_ctx_t ctx)
884 struct adapter *adapter;
886 if_softc_ctx_t scctx;
891 INIT_DEBUGOUT("ixgbe_attach: begin");
893 /* Allocate, clear, and link in our adapter structure */
894 dev = iflib_get_dev(ctx);
895 adapter = iflib_get_softc(ctx);
896 adapter->hw.back = adapter;
899 scctx = adapter->shared = iflib_get_softc_ctx(ctx);
900 adapter->media = iflib_get_media(ctx);
903 /* Determine hardware revision */
904 hw->vendor_id = pci_get_vendor(dev);
905 hw->device_id = pci_get_device(dev);
906 hw->revision_id = pci_get_revid(dev);
907 hw->subsystem_vendor_id = pci_get_subvendor(dev);
908 hw->subsystem_device_id = pci_get_subdevice(dev);
910 /* Do base PCI setup - map BAR0 */
911 if (ixgbe_allocate_pci_resources(ctx)) {
912 device_printf(dev, "Allocation of PCI resources failed\n");
916 /* let hardware know driver is loaded */
917 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
918 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
919 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
922 * Initialize the shared code
924 if (ixgbe_init_shared_code(hw) != 0) {
925 device_printf(dev, "Unable to initialize the shared code\n");
930 if (hw->mbx.ops.init_params)
931 hw->mbx.ops.init_params(hw);
933 hw->allow_unsupported_sfp = allow_unsupported_sfp;
935 if (hw->mac.type != ixgbe_mac_82598EB)
936 hw->phy.smart_speed = ixgbe_smart_speed;
938 ixgbe_init_device_features(adapter);
940 /* Enable WoL (if supported) */
941 ixgbe_check_wol_support(adapter);
943 /* Verify adapter fan is still functional (if applicable) */
944 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
945 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
946 ixgbe_check_fan_failure(adapter, esdp, false);
949 /* Ensure SW/FW semaphore is free */
950 ixgbe_init_swfw_semaphore(hw);
952 /* Set an initial default flow control value */
953 hw->fc.requested_mode = ixgbe_flow_control;
955 hw->phy.reset_if_overtemp = true;
956 error = ixgbe_reset_hw(hw);
957 hw->phy.reset_if_overtemp = false;
958 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
960 * No optics in this port, set up
961 * so the timer routine will probe
962 * for later insertion.
964 adapter->sfp_probe = true;
966 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
967 device_printf(dev, "Unsupported SFP+ module detected!\n");
971 device_printf(dev, "Hardware initialization failed\n");
976 /* Make sure we have a good EEPROM before we read from it */
977 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
978 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
983 error = ixgbe_start_hw(hw);
985 case IXGBE_ERR_EEPROM_VERSION:
986 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
988 case IXGBE_ERR_SFP_NOT_SUPPORTED:
989 device_printf(dev, "Unsupported SFP+ Module\n");
992 case IXGBE_ERR_SFP_NOT_PRESENT:
993 device_printf(dev, "No SFP+ Module found\n");
999 /* Most of the iflib initialization... */
1001 iflib_set_mac(ctx, hw->mac.addr);
1002 switch (adapter->hw.mac.type) {
1003 case ixgbe_mac_X550:
1004 case ixgbe_mac_X550EM_x:
1005 case ixgbe_mac_X550EM_a:
1006 scctx->isc_rss_table_size = 512;
1007 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1010 scctx->isc_rss_table_size = 128;
1011 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1014 /* Allow legacy interrupts */
1015 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1017 scctx->isc_txqsizes[0] =
1018 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1019 sizeof(u32), DBA_ALIGN),
1020 scctx->isc_rxqsizes[0] =
1021 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1025 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1026 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1027 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1028 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1030 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1031 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1034 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1036 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1037 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1038 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1040 scctx->isc_txrx = &ixgbe_txrx;
1042 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1047 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1048 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1049 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1050 ixgbe_free_pci_resources(ctx);
1053 } /* ixgbe_if_attach_pre */
1055 /*********************************************************************
1056 * ixgbe_if_attach_post - Device initialization routine, part 2
1058 * Called during driver load, but after interrupts and
1059 * resources have been allocated and configured.
1060 * Sets up some data structures not relevant to iflib.
1062 * return 0 on success, positive on failure
1063 *********************************************************************/
1065 ixgbe_if_attach_post(if_ctx_t ctx)
1068 struct adapter *adapter;
1069 struct ixgbe_hw *hw;
1072 dev = iflib_get_dev(ctx);
1073 adapter = iflib_get_softc(ctx);
1077 if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1078 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1079 device_printf(dev, "Device does not support legacy interrupts");
1084 /* Allocate multicast array memory. */
1085 adapter->mta = malloc(sizeof(*adapter->mta) *
1086 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1087 if (adapter->mta == NULL) {
1088 device_printf(dev, "Can not allocate multicast setup array\n");
1093 /* hw.ix defaults init */
1094 ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1096 /* Enable the optics for 82599 SFP+ fiber */
1097 ixgbe_enable_tx_laser(hw);
1099 /* Enable power to the phy. */
1100 ixgbe_set_phy_power(hw, true);
1102 ixgbe_initialize_iov(adapter);
1104 error = ixgbe_setup_interface(ctx);
1106 device_printf(dev, "Interface setup failed: %d\n", error);
1110 ixgbe_if_update_admin_status(ctx);
1112 /* Initialize statistics */
1113 ixgbe_update_stats_counters(adapter);
1114 ixgbe_add_hw_stats(adapter);
1116 /* Check PCIE slot type/speed/width */
1117 ixgbe_get_slot_info(adapter);
1120 * Do time init and sysctl init here, but
1121 * only on the first port of a bypass adapter.
1123 ixgbe_bypass_init(adapter);
1125 /* Display NVM and Option ROM versions */
1126 ixgbe_print_fw_version(ctx);
1128 /* Set an initial dmac value */
1130 /* Set initial advertised speeds (if applicable) */
1131 adapter->advertise = ixgbe_get_advertise(adapter);
1133 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1134 ixgbe_define_iov_schemas(dev, &error);
1137 ixgbe_add_device_sysctls(ctx);
1142 } /* ixgbe_if_attach_post */
1144 /************************************************************************
1145 * ixgbe_check_wol_support
1147 * Checks whether the adapter's ports are capable of
1148 * Wake On LAN by reading the adapter's NVM.
1150 * Sets each port's hw->wol_enabled value depending
1151 * on the value read here.
1152 ************************************************************************/
1154 ixgbe_check_wol_support(struct adapter *adapter)
1156 struct ixgbe_hw *hw = &adapter->hw;
1159 /* Find out WoL support for port */
1160 adapter->wol_support = hw->wol_enabled = 0;
1161 ixgbe_get_device_caps(hw, &dev_caps);
1162 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1163 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1165 adapter->wol_support = hw->wol_enabled = 1;
1167 /* Save initial wake up filter configuration */
1168 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1171 } /* ixgbe_check_wol_support */
1173 /************************************************************************
1174 * ixgbe_setup_interface
1176 * Setup networking device structure and register an interface.
1177 ************************************************************************/
1179 ixgbe_setup_interface(if_ctx_t ctx)
1181 struct ifnet *ifp = iflib_get_ifp(ctx);
1182 struct adapter *adapter = iflib_get_softc(ctx);
1184 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1186 if_setbaudrate(ifp, IF_Gbps(10));
1188 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1190 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1192 ixgbe_add_media_types(ctx);
1194 /* Autoselect media by default */
1195 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1198 } /* ixgbe_setup_interface */
1200 /************************************************************************
1201 * ixgbe_if_get_counter
1202 ************************************************************************/
1204 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1206 struct adapter *adapter = iflib_get_softc(ctx);
1207 if_t ifp = iflib_get_ifp(ctx);
1210 case IFCOUNTER_IPACKETS:
1211 return (adapter->ipackets);
1212 case IFCOUNTER_OPACKETS:
1213 return (adapter->opackets);
1214 case IFCOUNTER_IBYTES:
1215 return (adapter->ibytes);
1216 case IFCOUNTER_OBYTES:
1217 return (adapter->obytes);
1218 case IFCOUNTER_IMCASTS:
1219 return (adapter->imcasts);
1220 case IFCOUNTER_OMCASTS:
1221 return (adapter->omcasts);
1222 case IFCOUNTER_COLLISIONS:
1224 case IFCOUNTER_IQDROPS:
1225 return (adapter->iqdrops);
1226 case IFCOUNTER_OQDROPS:
1228 case IFCOUNTER_IERRORS:
1229 return (adapter->ierrors);
1231 return (if_get_counter_default(ifp, cnt));
1233 } /* ixgbe_if_get_counter */
1235 /************************************************************************
1237 ************************************************************************/
1239 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1241 struct adapter *adapter = iflib_get_softc(ctx);
1242 struct ixgbe_hw *hw = &adapter->hw;
1246 if (hw->phy.ops.read_i2c_byte == NULL)
1248 for (i = 0; i < req->len; i++)
1249 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1250 req->dev_addr, &req->data[i]);
1252 } /* ixgbe_if_i2c_req */
1254 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1255 * @ctx: iflib context
1256 * @event: event code to check
1258 * Defaults to returning true for unknown events.
1260 * @returns true if iflib needs to reinit the interface
1263 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1266 case IFLIB_RESTART_VLAN_CONFIG:
1273 /************************************************************************
1274 * ixgbe_add_media_types
1275 ************************************************************************/
1277 ixgbe_add_media_types(if_ctx_t ctx)
1279 struct adapter *adapter = iflib_get_softc(ctx);
1280 struct ixgbe_hw *hw = &adapter->hw;
1281 device_t dev = iflib_get_dev(ctx);
1284 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1286 /* Media types with matching FreeBSD media defines */
1287 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1288 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1289 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1290 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1291 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1292 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1293 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1294 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1296 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1297 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1298 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1301 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1302 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1303 if (hw->phy.multispeed_fiber)
1304 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1307 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1308 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1309 if (hw->phy.multispeed_fiber)
1310 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1312 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1313 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1314 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1315 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1317 #ifdef IFM_ETH_XTYPE
1318 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1319 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1320 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1321 ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1322 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1323 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1324 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1325 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1327 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1328 device_printf(dev, "Media supported: 10GbaseKR\n");
1329 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1330 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1332 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1333 device_printf(dev, "Media supported: 10GbaseKX4\n");
1334 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1335 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1337 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1338 device_printf(dev, "Media supported: 1000baseKX\n");
1339 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1340 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1342 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1343 device_printf(dev, "Media supported: 2500baseKX\n");
1344 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1345 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1348 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1349 device_printf(dev, "Media supported: 1000baseBX\n");
1351 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1352 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1354 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1357 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1358 } /* ixgbe_add_media_types */
1360 /************************************************************************
1362 ************************************************************************/
1364 ixgbe_is_sfp(struct ixgbe_hw *hw)
1366 switch (hw->mac.type) {
1367 case ixgbe_mac_82598EB:
1368 if (hw->phy.type == ixgbe_phy_nl)
1371 case ixgbe_mac_82599EB:
1372 switch (hw->mac.ops.get_media_type(hw)) {
1373 case ixgbe_media_type_fiber:
1374 case ixgbe_media_type_fiber_qsfp:
1379 case ixgbe_mac_X550EM_x:
1380 case ixgbe_mac_X550EM_a:
1381 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1387 } /* ixgbe_is_sfp */
1389 /************************************************************************
1391 ************************************************************************/
1393 ixgbe_config_link(if_ctx_t ctx)
1395 struct adapter *adapter = iflib_get_softc(ctx);
1396 struct ixgbe_hw *hw = &adapter->hw;
1397 u32 autoneg, err = 0;
1398 bool sfp, negotiate;
1400 sfp = ixgbe_is_sfp(hw);
1403 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1404 iflib_admin_intr_deferred(ctx);
1406 if (hw->mac.ops.check_link)
1407 err = ixgbe_check_link(hw, &adapter->link_speed,
1408 &adapter->link_up, false);
1411 autoneg = hw->phy.autoneg_advertised;
1412 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1413 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1417 if (hw->mac.ops.setup_link)
1418 err = hw->mac.ops.setup_link(hw, autoneg,
1421 } /* ixgbe_config_link */
1423 /************************************************************************
1424 * ixgbe_update_stats_counters - Update board statistics counters.
1425 ************************************************************************/
1427 ixgbe_update_stats_counters(struct adapter *adapter)
1429 struct ixgbe_hw *hw = &adapter->hw;
1430 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1431 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1433 u64 total_missed_rx = 0;
1435 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1436 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1437 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1438 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1439 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1441 for (int i = 0; i < 16; i++) {
1442 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1443 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1444 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1446 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1447 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1448 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1450 /* Hardware workaround, gprc counts missed packets */
1451 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1452 stats->gprc -= missed_rx;
1454 if (hw->mac.type != ixgbe_mac_82598EB) {
1455 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1456 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1457 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1458 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1459 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1460 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1461 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1462 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1463 stats->lxoffrxc += lxoffrxc;
1465 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1466 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1467 stats->lxoffrxc += lxoffrxc;
1468 /* 82598 only has a counter in the high register */
1469 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1470 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1471 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1475 * For watchdog management we need to know if we have been paused
1476 * during the last interval, so capture that here.
1479 adapter->shared->isc_pause_frames = 1;
1482 * Workaround: mprc hardware is incorrectly counting
1483 * broadcasts, so for now we subtract those.
1485 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1486 stats->bprc += bprc;
1487 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1488 if (hw->mac.type == ixgbe_mac_82598EB)
1489 stats->mprc -= bprc;
1491 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1492 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1493 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1494 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1495 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1496 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1498 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1499 stats->lxontxc += lxon;
1500 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1501 stats->lxofftxc += lxoff;
1502 total = lxon + lxoff;
1504 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1505 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1506 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1507 stats->gptc -= total;
1508 stats->mptc -= total;
1509 stats->ptc64 -= total;
1510 stats->gotc -= total * ETHER_MIN_LEN;
1512 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1513 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1514 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1515 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1516 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1517 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1518 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1519 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1520 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1521 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1522 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1523 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1524 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1525 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1526 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1527 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1528 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1529 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1530 /* Only read FCOE on 82599 */
1531 if (hw->mac.type != ixgbe_mac_82598EB) {
1532 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1533 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1534 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1535 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1536 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1539 /* Fill out the OS statistics structure */
1540 IXGBE_SET_IPACKETS(adapter, stats->gprc);
1541 IXGBE_SET_OPACKETS(adapter, stats->gptc);
1542 IXGBE_SET_IBYTES(adapter, stats->gorc);
1543 IXGBE_SET_OBYTES(adapter, stats->gotc);
1544 IXGBE_SET_IMCASTS(adapter, stats->mprc);
1545 IXGBE_SET_OMCASTS(adapter, stats->mptc);
1546 IXGBE_SET_COLLISIONS(adapter, 0);
1547 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1550 * Aggregate following types of errors as RX errors:
1551 * - CRC error count,
1552 * - illegal byte error count,
1553 * - checksum error count,
1554 * - missed packets count,
1555 * - length error count,
1556 * - undersized packets count,
1557 * - fragmented packets count,
1558 * - oversized packets count,
1561 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->illerrc + stats->xec +
1562 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1564 } /* ixgbe_update_stats_counters */
1566 /************************************************************************
1567 * ixgbe_add_hw_stats
1569 * Add sysctl variables, one per statistic, to the system.
1570 ************************************************************************/
1572 ixgbe_add_hw_stats(struct adapter *adapter)
1574 device_t dev = iflib_get_dev(adapter->ctx);
1575 struct ix_rx_queue *rx_que;
1576 struct ix_tx_queue *tx_que;
1577 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1578 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1579 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1580 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1581 struct sysctl_oid *stat_node, *queue_node;
1582 struct sysctl_oid_list *stat_list, *queue_list;
1585 #define QUEUE_NAME_LEN 32
1586 char namebuf[QUEUE_NAME_LEN];
1588 /* Driver Statistics */
1589 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1590 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1591 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1592 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1593 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1594 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1596 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1597 struct tx_ring *txr = &tx_que->txr;
1598 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1599 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1600 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1601 queue_list = SYSCTL_CHILDREN(queue_node);
1603 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1604 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1605 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1606 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1607 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1608 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1609 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1610 CTLFLAG_RD, &txr->tso_tx, "TSO");
1611 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1612 CTLFLAG_RD, &txr->total_packets,
1613 "Queue Packets Transmitted");
1616 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1617 struct rx_ring *rxr = &rx_que->rxr;
1618 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1619 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1620 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1621 queue_list = SYSCTL_CHILDREN(queue_node);
1623 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1624 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1625 &adapter->rx_queues[i], 0,
1626 ixgbe_sysctl_interrupt_rate_handler, "IU",
1628 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1629 CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1630 "irqs on this queue");
1631 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1632 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1633 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1634 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1635 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1636 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1637 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1638 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1639 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1640 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1641 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1642 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1643 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1644 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1647 /* MAC stats get their own sub node */
1649 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1650 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1651 stat_list = SYSCTL_CHILDREN(stat_node);
1653 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1654 CTLFLAG_RD, &adapter->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1655 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1656 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1657 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1658 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1659 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1660 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1661 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1662 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1663 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1664 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1665 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1666 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1667 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1668 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1669 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1670 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1672 /* Flow Control stats */
1673 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1674 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1675 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1676 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1677 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1678 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1679 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1680 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1682 /* Packet Reception Stats */
1683 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1684 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1685 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1686 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1687 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1688 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1689 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1690 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1691 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1692 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1693 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1694 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1695 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1696 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1697 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1698 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1699 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1700 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1701 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1702 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1703 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1704 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1705 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1706 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1707 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1708 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1709 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1710 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1711 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1712 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1713 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1714 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1715 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1716 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1717 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1718 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1719 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1720 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1722 /* Packet Transmission Stats */
1723 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1724 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1725 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1726 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1728 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1729 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1730 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1731 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1732 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1733 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1734 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1735 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1736 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1737 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1738 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1739 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1740 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1741 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1742 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1743 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1744 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1745 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1746 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1747 } /* ixgbe_add_hw_stats */
1749 /************************************************************************
1750 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1752 * Retrieves the TDH value from the hardware
1753 ************************************************************************/
1755 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1757 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1764 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1765 error = sysctl_handle_int(oidp, &val, 0, req);
1766 if (error || !req->newptr)
1770 } /* ixgbe_sysctl_tdh_handler */
1772 /************************************************************************
1773 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1775 * Retrieves the TDT value from the hardware
1776 ************************************************************************/
1778 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1780 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1787 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1788 error = sysctl_handle_int(oidp, &val, 0, req);
1789 if (error || !req->newptr)
1793 } /* ixgbe_sysctl_tdt_handler */
1795 /************************************************************************
1796 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1798 * Retrieves the RDH value from the hardware
1799 ************************************************************************/
1801 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1803 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1810 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1811 error = sysctl_handle_int(oidp, &val, 0, req);
1812 if (error || !req->newptr)
1816 } /* ixgbe_sysctl_rdh_handler */
1818 /************************************************************************
1819 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1821 * Retrieves the RDT value from the hardware
1822 ************************************************************************/
1824 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1826 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1833 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1834 error = sysctl_handle_int(oidp, &val, 0, req);
1835 if (error || !req->newptr)
1839 } /* ixgbe_sysctl_rdt_handler */
1841 /************************************************************************
1842 * ixgbe_if_vlan_register
1844 * Run via vlan config EVENT, it enables us to use the
1845 * HW Filter table since we can get the vlan id. This
1846 * just creates the entry in the soft version of the
1847 * VFTA, init will repopulate the real table.
1848 ************************************************************************/
1850 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1852 struct adapter *adapter = iflib_get_softc(ctx);
1855 index = (vtag >> 5) & 0x7F;
1857 adapter->shadow_vfta[index] |= (1 << bit);
1858 ++adapter->num_vlans;
1859 ixgbe_setup_vlan_hw_support(ctx);
1860 } /* ixgbe_if_vlan_register */
1862 /************************************************************************
1863 * ixgbe_if_vlan_unregister
1865 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1866 ************************************************************************/
1868 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1870 struct adapter *adapter = iflib_get_softc(ctx);
1873 index = (vtag >> 5) & 0x7F;
1875 adapter->shadow_vfta[index] &= ~(1 << bit);
1876 --adapter->num_vlans;
1877 /* Re-init to load the changes */
1878 ixgbe_setup_vlan_hw_support(ctx);
1879 } /* ixgbe_if_vlan_unregister */
1881 /************************************************************************
1882 * ixgbe_setup_vlan_hw_support
1883 ************************************************************************/
1885 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1887 struct ifnet *ifp = iflib_get_ifp(ctx);
1888 struct adapter *adapter = iflib_get_softc(ctx);
1889 struct ixgbe_hw *hw = &adapter->hw;
1890 struct rx_ring *rxr;
1896 * We get here thru init_locked, meaning
1897 * a soft reset, this has already cleared
1898 * the VFTA and other state, so if there
1899 * have been no vlan's registered do nothing.
1901 if (adapter->num_vlans == 0)
1904 /* Setup the queues for vlans */
1905 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1906 for (i = 0; i < adapter->num_rx_queues; i++) {
1907 rxr = &adapter->rx_queues[i].rxr;
1908 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1909 if (hw->mac.type != ixgbe_mac_82598EB) {
1910 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1911 ctrl |= IXGBE_RXDCTL_VME;
1912 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1914 rxr->vtag_strip = true;
1918 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1921 * A soft reset zero's out the VFTA, so
1922 * we need to repopulate it now.
1924 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1925 if (adapter->shadow_vfta[i] != 0)
1926 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1927 adapter->shadow_vfta[i]);
1929 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1930 /* Enable the Filter Table if enabled */
1931 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1932 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1933 ctrl |= IXGBE_VLNCTRL_VFE;
1935 if (hw->mac.type == ixgbe_mac_82598EB)
1936 ctrl |= IXGBE_VLNCTRL_VME;
1937 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1938 } /* ixgbe_setup_vlan_hw_support */
1940 /************************************************************************
1941 * ixgbe_get_slot_info
1943 * Get the width and transaction speed of
1944 * the slot this adapter is plugged into.
1945 ************************************************************************/
1947 ixgbe_get_slot_info(struct adapter *adapter)
1949 device_t dev = iflib_get_dev(adapter->ctx);
1950 struct ixgbe_hw *hw = &adapter->hw;
1951 int bus_info_valid = true;
1955 /* Some devices are behind an internal bridge */
1956 switch (hw->device_id) {
1957 case IXGBE_DEV_ID_82599_SFP_SF_QP:
1958 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1959 goto get_parent_info;
1964 ixgbe_get_bus_info(hw);
1967 * Some devices don't use PCI-E, but there is no need
1968 * to display "Unknown" for bus speed and width.
1970 switch (hw->mac.type) {
1971 case ixgbe_mac_X550EM_x:
1972 case ixgbe_mac_X550EM_a:
1980 * For the Quad port adapter we need to parse back
1981 * up the PCI tree to find the speed of the expansion
1982 * slot into which this adapter is plugged. A bit more work.
1984 dev = device_get_parent(device_get_parent(dev));
1986 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1987 pci_get_slot(dev), pci_get_function(dev));
1989 dev = device_get_parent(device_get_parent(dev));
1991 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1992 pci_get_slot(dev), pci_get_function(dev));
1994 /* Now get the PCI Express Capabilities offset */
1995 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1997 * Hmm...can't get PCI-Express capabilities.
1998 * Falling back to default method.
2000 bus_info_valid = false;
2001 ixgbe_get_bus_info(hw);
2004 /* ...and read the Link Status Register */
2005 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2006 ixgbe_set_pci_config_data_generic(hw, link);
2009 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
2010 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2011 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2012 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2014 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2015 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2016 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2019 if (bus_info_valid) {
2020 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2021 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2022 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2023 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2024 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2026 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2027 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2028 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2029 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2030 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2033 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2036 } /* ixgbe_get_slot_info */
2038 /************************************************************************
2039 * ixgbe_if_msix_intr_assign
2041 * Setup MSI-X Interrupt resources and handlers
2042 ************************************************************************/
2044 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2046 struct adapter *adapter = iflib_get_softc(ctx);
2047 struct ix_rx_queue *rx_que = adapter->rx_queues;
2048 struct ix_tx_queue *tx_que;
2049 int error, rid, vector = 0;
2053 /* Admin Que is vector 0*/
2055 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2058 snprintf(buf, sizeof(buf), "rxq%d", i);
2059 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2060 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2063 device_printf(iflib_get_dev(ctx),
2064 "Failed to allocate que int %d err: %d", i, error);
2065 adapter->num_rx_queues = i + 1;
2069 rx_que->msix = vector;
2070 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2072 * The queue ID is used as the RSS layer bucket ID.
2073 * We look up the queue ID -> RSS CPU ID and select
2076 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2079 * Bind the MSI-X vector, and thus the
2080 * rings to the corresponding cpu.
2082 * This just happens to match the default RSS
2083 * round-robin bucket -> queue -> CPU allocation.
2085 if (adapter->num_rx_queues > 1)
2090 for (int i = 0; i < adapter->num_tx_queues; i++) {
2091 snprintf(buf, sizeof(buf), "txq%d", i);
2092 tx_que = &adapter->tx_queues[i];
2093 tx_que->msix = i % adapter->num_rx_queues;
2094 iflib_softirq_alloc_generic(ctx,
2095 &adapter->rx_queues[tx_que->msix].que_irq,
2096 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2099 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2100 IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2102 device_printf(iflib_get_dev(ctx),
2103 "Failed to register admin handler");
2107 adapter->vector = vector;
2111 iflib_irq_free(ctx, &adapter->irq);
2112 rx_que = adapter->rx_queues;
2113 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2114 iflib_irq_free(ctx, &rx_que->que_irq);
2117 } /* ixgbe_if_msix_intr_assign */
2120 ixgbe_perform_aim(struct adapter *adapter, struct ix_rx_queue *que)
2122 uint32_t newitr = 0;
2123 struct rx_ring *rxr = &que->rxr;
2126 * Do Adaptive Interrupt Moderation:
2127 * - Write out last calculated setting
2128 * - Calculate based on average size over
2129 * the last interval.
2131 if (que->eitr_setting) {
2132 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
2136 que->eitr_setting = 0;
2137 /* Idle, do nothing */
2138 if (rxr->bytes == 0) {
2142 if ((rxr->bytes) && (rxr->packets)) {
2143 newitr = (rxr->bytes / rxr->packets);
2146 newitr += 24; /* account for hardware frame, crc */
2147 /* set an upper boundary */
2148 newitr = min(newitr, 3000);
2150 /* Be nice to the mid range */
2151 if ((newitr > 300) && (newitr < 1200)) {
2152 newitr = (newitr / 3);
2154 newitr = (newitr / 2);
2157 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2158 newitr |= newitr << 16;
2160 newitr |= IXGBE_EITR_CNT_WDIS;
2163 /* save for next interrupt */
2164 que->eitr_setting = newitr;
2173 /*********************************************************************
2174 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2175 **********************************************************************/
2177 ixgbe_msix_que(void *arg)
2179 struct ix_rx_queue *que = arg;
2180 struct adapter *adapter = que->adapter;
2181 struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx);
2183 /* Protect against spurious interrupts */
2184 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2185 return (FILTER_HANDLED);
2187 ixgbe_disable_queue(adapter, que->msix);
2191 if (adapter->enable_aim) {
2192 ixgbe_perform_aim(adapter, que);
2195 return (FILTER_SCHEDULE_THREAD);
2196 } /* ixgbe_msix_que */
2198 /************************************************************************
2199 * ixgbe_media_status - Media Ioctl callback
2201 * Called whenever the user queries the status of
2202 * the interface using ifconfig.
2203 ************************************************************************/
2205 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2207 struct adapter *adapter = iflib_get_softc(ctx);
2208 struct ixgbe_hw *hw = &adapter->hw;
2211 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2213 ifmr->ifm_status = IFM_AVALID;
2214 ifmr->ifm_active = IFM_ETHER;
2216 if (!adapter->link_active)
2219 ifmr->ifm_status |= IFM_ACTIVE;
2220 layer = adapter->phy_layer;
2222 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2223 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2224 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2225 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2226 switch (adapter->link_speed) {
2227 case IXGBE_LINK_SPEED_10GB_FULL:
2228 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2230 case IXGBE_LINK_SPEED_1GB_FULL:
2231 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2233 case IXGBE_LINK_SPEED_100_FULL:
2234 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2236 case IXGBE_LINK_SPEED_10_FULL:
2237 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2240 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2241 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2242 switch (adapter->link_speed) {
2243 case IXGBE_LINK_SPEED_10GB_FULL:
2244 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2247 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2248 switch (adapter->link_speed) {
2249 case IXGBE_LINK_SPEED_10GB_FULL:
2250 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2252 case IXGBE_LINK_SPEED_1GB_FULL:
2253 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2256 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2257 switch (adapter->link_speed) {
2258 case IXGBE_LINK_SPEED_10GB_FULL:
2259 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2261 case IXGBE_LINK_SPEED_1GB_FULL:
2262 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2265 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2266 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2267 switch (adapter->link_speed) {
2268 case IXGBE_LINK_SPEED_10GB_FULL:
2269 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2271 case IXGBE_LINK_SPEED_1GB_FULL:
2272 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2275 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2276 switch (adapter->link_speed) {
2277 case IXGBE_LINK_SPEED_10GB_FULL:
2278 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2282 * XXX: These need to use the proper media types once
2285 #ifndef IFM_ETH_XTYPE
2286 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2287 switch (adapter->link_speed) {
2288 case IXGBE_LINK_SPEED_10GB_FULL:
2289 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2291 case IXGBE_LINK_SPEED_2_5GB_FULL:
2292 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2294 case IXGBE_LINK_SPEED_1GB_FULL:
2295 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2298 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2299 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2300 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2301 switch (adapter->link_speed) {
2302 case IXGBE_LINK_SPEED_10GB_FULL:
2303 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2305 case IXGBE_LINK_SPEED_2_5GB_FULL:
2306 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2308 case IXGBE_LINK_SPEED_1GB_FULL:
2309 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2313 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2314 switch (adapter->link_speed) {
2315 case IXGBE_LINK_SPEED_10GB_FULL:
2316 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2318 case IXGBE_LINK_SPEED_2_5GB_FULL:
2319 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2321 case IXGBE_LINK_SPEED_1GB_FULL:
2322 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2325 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2326 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2327 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2328 switch (adapter->link_speed) {
2329 case IXGBE_LINK_SPEED_10GB_FULL:
2330 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2332 case IXGBE_LINK_SPEED_2_5GB_FULL:
2333 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2335 case IXGBE_LINK_SPEED_1GB_FULL:
2336 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2341 /* If nothing is recognized... */
2342 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2343 ifmr->ifm_active |= IFM_UNKNOWN;
2345 /* Display current flow control setting used on link */
2346 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2347 hw->fc.current_mode == ixgbe_fc_full)
2348 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2349 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2350 hw->fc.current_mode == ixgbe_fc_full)
2351 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2352 } /* ixgbe_media_status */
2354 /************************************************************************
2355 * ixgbe_media_change - Media Ioctl callback
2357 * Called when the user changes speed/duplex using
2358 * media/mediopt option with ifconfig.
2359 ************************************************************************/
2361 ixgbe_if_media_change(if_ctx_t ctx)
2363 struct adapter *adapter = iflib_get_softc(ctx);
2364 struct ifmedia *ifm = iflib_get_media(ctx);
2365 struct ixgbe_hw *hw = &adapter->hw;
2366 ixgbe_link_speed speed = 0;
2368 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2370 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2373 if (hw->phy.media_type == ixgbe_media_type_backplane)
2377 * We don't actually need to check against the supported
2378 * media types of the adapter; ifmedia will take care of
2381 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2384 speed |= IXGBE_LINK_SPEED_100_FULL;
2385 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2386 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2390 #ifndef IFM_ETH_XTYPE
2391 case IFM_10G_SR: /* KR, too */
2392 case IFM_10G_CX4: /* KX4 */
2397 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2398 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2400 #ifndef IFM_ETH_XTYPE
2401 case IFM_1000_CX: /* KX */
2407 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2410 speed |= IXGBE_LINK_SPEED_100_FULL;
2411 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2413 case IFM_10G_TWINAX:
2414 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2417 speed |= IXGBE_LINK_SPEED_100_FULL;
2420 speed |= IXGBE_LINK_SPEED_10_FULL;
2426 hw->mac.autotry_restart = true;
2427 hw->mac.ops.setup_link(hw, speed, true);
2428 adapter->advertise =
2429 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2430 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
2431 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
2432 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
2437 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2440 } /* ixgbe_if_media_change */
2442 /************************************************************************
2444 ************************************************************************/
2446 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2448 struct adapter *adapter = iflib_get_softc(ctx);
2449 struct ifnet *ifp = iflib_get_ifp(ctx);
2453 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2454 rctl &= (~IXGBE_FCTRL_UPE);
2455 if (ifp->if_flags & IFF_ALLMULTI)
2456 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2458 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2460 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2461 rctl &= (~IXGBE_FCTRL_MPE);
2462 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2464 if (ifp->if_flags & IFF_PROMISC) {
2465 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2466 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2467 } else if (ifp->if_flags & IFF_ALLMULTI) {
2468 rctl |= IXGBE_FCTRL_MPE;
2469 rctl &= ~IXGBE_FCTRL_UPE;
2470 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2473 } /* ixgbe_if_promisc_set */
2475 /************************************************************************
2476 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2477 ************************************************************************/
2479 ixgbe_msix_link(void *arg)
2481 struct adapter *adapter = arg;
2482 struct ixgbe_hw *hw = &adapter->hw;
2483 u32 eicr, eicr_mask;
2486 ++adapter->link_irq;
2488 /* Pause other interrupts */
2489 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2491 /* First get the cause */
2492 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2493 /* Be sure the queue bits are not cleared */
2494 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2495 /* Clear interrupt with write */
2496 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2498 /* Link status change */
2499 if (eicr & IXGBE_EICR_LSC) {
2500 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2501 adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2504 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2505 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2506 (eicr & IXGBE_EICR_FLOW_DIR)) {
2507 /* This is probably overkill :) */
2508 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2509 return (FILTER_HANDLED);
2510 /* Disable the interrupt */
2511 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2512 adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2514 if (eicr & IXGBE_EICR_ECC) {
2515 device_printf(iflib_get_dev(adapter->ctx),
2516 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2517 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2520 /* Check for over temp condition */
2521 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2522 switch (adapter->hw.mac.type) {
2523 case ixgbe_mac_X550EM_a:
2524 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2526 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2527 IXGBE_EICR_GPI_SDP0_X550EM_a);
2528 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2529 IXGBE_EICR_GPI_SDP0_X550EM_a);
2530 retval = hw->phy.ops.check_overtemp(hw);
2531 if (retval != IXGBE_ERR_OVERTEMP)
2533 device_printf(iflib_get_dev(adapter->ctx),
2534 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2535 device_printf(iflib_get_dev(adapter->ctx),
2536 "System shutdown required!\n");
2539 if (!(eicr & IXGBE_EICR_TS))
2541 retval = hw->phy.ops.check_overtemp(hw);
2542 if (retval != IXGBE_ERR_OVERTEMP)
2544 device_printf(iflib_get_dev(adapter->ctx),
2545 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2546 device_printf(iflib_get_dev(adapter->ctx),
2547 "System shutdown required!\n");
2548 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2553 /* Check for VF message */
2554 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2555 (eicr & IXGBE_EICR_MAILBOX))
2556 adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2559 if (ixgbe_is_sfp(hw)) {
2560 /* Pluggable optics-related interrupt */
2561 if (hw->mac.type >= ixgbe_mac_X540)
2562 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2564 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2566 if (eicr & eicr_mask) {
2567 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2568 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2571 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2572 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2573 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2574 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2575 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2579 /* Check for fan failure */
2580 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2581 ixgbe_check_fan_failure(adapter, eicr, true);
2582 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2585 /* External PHY interrupt */
2586 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2587 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2588 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2589 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2592 return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2593 } /* ixgbe_msix_link */
2595 /************************************************************************
2596 * ixgbe_sysctl_interrupt_rate_handler
2597 ************************************************************************/
2599 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2601 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2603 unsigned int reg, usec, rate;
2605 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2606 usec = ((reg & 0x0FF8) >> 3);
2608 rate = 500000 / usec;
2611 error = sysctl_handle_int(oidp, &rate, 0, req);
2612 if (error || !req->newptr)
2614 reg &= ~0xfff; /* default, no limitation */
2615 ixgbe_max_interrupt_rate = 0;
2616 if (rate > 0 && rate < 500000) {
2619 ixgbe_max_interrupt_rate = rate;
2620 reg |= ((4000000/rate) & 0xff8);
2622 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2625 } /* ixgbe_sysctl_interrupt_rate_handler */
2627 /************************************************************************
2628 * ixgbe_add_device_sysctls
2629 ************************************************************************/
2631 ixgbe_add_device_sysctls(if_ctx_t ctx)
2633 struct adapter *adapter = iflib_get_softc(ctx);
2634 device_t dev = iflib_get_dev(ctx);
2635 struct ixgbe_hw *hw = &adapter->hw;
2636 struct sysctl_oid_list *child;
2637 struct sysctl_ctx_list *ctx_list;
2639 ctx_list = device_get_sysctl_ctx(dev);
2640 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2642 /* Sysctls for all devices */
2643 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2644 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2645 adapter, 0, ixgbe_sysctl_flowcntl, "I",
2646 IXGBE_SYSCTL_DESC_SET_FC);
2648 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2649 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2650 adapter, 0, ixgbe_sysctl_advertise, "I",
2651 IXGBE_SYSCTL_DESC_ADV_SPEED);
2653 adapter->enable_aim = ixgbe_enable_aim;
2654 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2655 &adapter->enable_aim, 0, "Interrupt Moderation");
2657 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2658 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2659 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2662 /* testing sysctls (for all devices) */
2663 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2664 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2665 adapter, 0, ixgbe_sysctl_power_state,
2666 "I", "PCI Power State");
2668 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2669 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2670 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2672 /* for X550 series devices */
2673 if (hw->mac.type >= ixgbe_mac_X550)
2674 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2675 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2676 adapter, 0, ixgbe_sysctl_dmac,
2677 "I", "DMA Coalesce");
2679 /* for WoL-capable devices */
2680 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2681 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2682 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2683 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2685 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2686 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2687 adapter, 0, ixgbe_sysctl_wufc,
2688 "I", "Enable/Disable Wake Up Filters");
2691 /* for X552/X557-AT devices */
2692 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2693 struct sysctl_oid *phy_node;
2694 struct sysctl_oid_list *phy_list;
2696 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2697 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2698 phy_list = SYSCTL_CHILDREN(phy_node);
2700 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2701 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2702 adapter, 0, ixgbe_sysctl_phy_temp,
2703 "I", "Current External PHY Temperature (Celsius)");
2705 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2706 "overtemp_occurred",
2707 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2708 ixgbe_sysctl_phy_overtemp_occurred, "I",
2709 "External PHY High Temperature Event Occurred");
2712 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2713 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2714 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2715 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2717 } /* ixgbe_add_device_sysctls */
2719 /************************************************************************
2720 * ixgbe_allocate_pci_resources
2721 ************************************************************************/
2723 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2725 struct adapter *adapter = iflib_get_softc(ctx);
2726 device_t dev = iflib_get_dev(ctx);
2730 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2733 if (!(adapter->pci_mem)) {
2734 device_printf(dev, "Unable to allocate bus resource: memory\n");
2738 /* Save bus_space values for READ/WRITE_REG macros */
2739 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2740 adapter->osdep.mem_bus_space_handle =
2741 rman_get_bushandle(adapter->pci_mem);
2742 /* Set hw values for shared code */
2743 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2746 } /* ixgbe_allocate_pci_resources */
2748 /************************************************************************
2749 * ixgbe_detach - Device removal routine
2751 * Called when the driver is being removed.
2752 * Stops the adapter and deallocates all the resources
2753 * that were allocated for driver operation.
2755 * return 0 on success, positive on failure
2756 ************************************************************************/
2758 ixgbe_if_detach(if_ctx_t ctx)
2760 struct adapter *adapter = iflib_get_softc(ctx);
2761 device_t dev = iflib_get_dev(ctx);
2764 INIT_DEBUGOUT("ixgbe_detach: begin");
2766 if (ixgbe_pci_iov_detach(dev) != 0) {
2767 device_printf(dev, "SR-IOV in use; detach first.\n");
2771 ixgbe_setup_low_power_mode(ctx);
2773 /* let hardware know driver is unloading */
2774 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2775 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2776 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2778 ixgbe_free_pci_resources(ctx);
2779 free(adapter->mta, M_IXGBE);
2782 } /* ixgbe_if_detach */
2784 /************************************************************************
2785 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2787 * Prepare the adapter/port for LPLU and/or WoL
2788 ************************************************************************/
2790 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2792 struct adapter *adapter = iflib_get_softc(ctx);
2793 struct ixgbe_hw *hw = &adapter->hw;
2794 device_t dev = iflib_get_dev(ctx);
2797 if (!hw->wol_enabled)
2798 ixgbe_set_phy_power(hw, false);
2800 /* Limit power management flow to X550EM baseT */
2801 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2802 hw->phy.ops.enter_lplu) {
2803 /* Turn off support for APM wakeup. (Using ACPI instead) */
2804 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2805 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2808 * Clear Wake Up Status register to prevent any previous wakeup
2809 * events from waking us up immediately after we suspend.
2811 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2814 * Program the Wakeup Filter Control register with user filter
2817 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2819 /* Enable wakeups and power management in Wakeup Control */
2820 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2821 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2823 /* X550EM baseT adapters need a special LPLU flow */
2824 hw->phy.reset_disable = true;
2826 error = hw->phy.ops.enter_lplu(hw);
2828 device_printf(dev, "Error entering LPLU: %d\n", error);
2829 hw->phy.reset_disable = false;
2831 /* Just stop for other adapters */
2836 } /* ixgbe_setup_low_power_mode */
2838 /************************************************************************
2839 * ixgbe_shutdown - Shutdown entry point
2840 ************************************************************************/
2842 ixgbe_if_shutdown(if_ctx_t ctx)
2846 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2848 error = ixgbe_setup_low_power_mode(ctx);
2851 } /* ixgbe_if_shutdown */
2853 /************************************************************************
2857 ************************************************************************/
2859 ixgbe_if_suspend(if_ctx_t ctx)
2863 INIT_DEBUGOUT("ixgbe_suspend: begin");
2865 error = ixgbe_setup_low_power_mode(ctx);
2868 } /* ixgbe_if_suspend */
2870 /************************************************************************
2874 ************************************************************************/
2876 ixgbe_if_resume(if_ctx_t ctx)
2878 struct adapter *adapter = iflib_get_softc(ctx);
2879 device_t dev = iflib_get_dev(ctx);
2880 struct ifnet *ifp = iflib_get_ifp(ctx);
2881 struct ixgbe_hw *hw = &adapter->hw;
2884 INIT_DEBUGOUT("ixgbe_resume: begin");
2886 /* Read & clear WUS register */
2887 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2889 device_printf(dev, "Woken up by (WUS): %#010x\n",
2890 IXGBE_READ_REG(hw, IXGBE_WUS));
2891 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2892 /* And clear WUFC until next low-power transition */
2893 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2896 * Required after D3->D0 transition;
2897 * will re-advertise all previous advertised speeds
2899 if (ifp->if_flags & IFF_UP)
2903 } /* ixgbe_if_resume */
2905 /************************************************************************
2906 * ixgbe_if_mtu_set - Ioctl mtu entry point
2908 * Return 0 on success, EINVAL on failure
2909 ************************************************************************/
2911 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2913 struct adapter *adapter = iflib_get_softc(ctx);
2916 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2918 if (mtu > IXGBE_MAX_MTU) {
2921 adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2925 } /* ixgbe_if_mtu_set */
2927 /************************************************************************
2928 * ixgbe_if_crcstrip_set
2929 ************************************************************************/
2931 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2933 struct adapter *sc = iflib_get_softc(ctx);
2934 struct ixgbe_hw *hw = &sc->hw;
2935 /* crc stripping is set in two places:
2936 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2937 * IXGBE_RDRXCTL (set by the original driver in
2938 * ixgbe_setup_hw_rsc() called in init_locked.
2939 * We disable the setting when netmap is compiled in).
2940 * We update the values here, but also in ixgbe.c because
2941 * init_locked sometimes is called outside our control.
2945 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2946 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2949 D("%s read HLREG 0x%x rxc 0x%x",
2950 onoff ? "enter" : "exit", hl, rxc);
2952 /* hw requirements ... */
2953 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2954 rxc |= IXGBE_RDRXCTL_RSCACKC;
2955 if (onoff && !crcstrip) {
2956 /* keep the crc. Fast rx */
2957 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2958 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2960 /* reset default mode */
2961 hl |= IXGBE_HLREG0_RXCRCSTRP;
2962 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2966 D("%s write HLREG 0x%x rxc 0x%x",
2967 onoff ? "enter" : "exit", hl, rxc);
2969 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2970 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2971 } /* ixgbe_if_crcstrip_set */
2973 /*********************************************************************
2974 * ixgbe_if_init - Init entry point
2976 * Used in two ways: It is used by the stack as an init
2977 * entry point in network interface structure. It is also
2978 * used by the driver as a hw/sw initialization routine to
2979 * get to a consistent state.
2981 * Return 0 on success, positive on failure
2982 **********************************************************************/
2984 ixgbe_if_init(if_ctx_t ctx)
2986 struct adapter *adapter = iflib_get_softc(ctx);
2987 struct ifnet *ifp = iflib_get_ifp(ctx);
2988 device_t dev = iflib_get_dev(ctx);
2989 struct ixgbe_hw *hw = &adapter->hw;
2990 struct ix_rx_queue *rx_que;
2991 struct ix_tx_queue *tx_que;
2998 INIT_DEBUGOUT("ixgbe_if_init: begin");
3000 /* Queue indices may change with IOV mode */
3001 ixgbe_align_all_queue_indices(adapter);
3003 /* reprogram the RAR[0] in case user changed it. */
3004 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3006 /* Get the latest mac address, User can use a LAA */
3007 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3008 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3009 hw->addr_ctrl.rar_used_count = 1;
3013 ixgbe_initialize_iov(adapter);
3015 ixgbe_initialize_transmit_units(ctx);
3017 /* Setup Multicast table */
3018 ixgbe_if_multi_set(ctx);
3020 /* Determine the correct mbuf pool, based on frame size */
3021 adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3023 /* Configure RX settings */
3024 ixgbe_initialize_receive_units(ctx);
3027 * Initialize variable holding task enqueue requests
3028 * from MSI-X interrupts
3030 adapter->task_requests = 0;
3032 /* Enable SDP & MSI-X interrupts based on adapter */
3033 ixgbe_config_gpie(adapter);
3036 if (ifp->if_mtu > ETHERMTU) {
3037 /* aka IXGBE_MAXFRS on 82599 and newer */
3038 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3039 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3040 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3041 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3044 /* Now enable all the queues */
3045 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
3046 struct tx_ring *txr = &tx_que->txr;
3048 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3049 txdctl |= IXGBE_TXDCTL_ENABLE;
3050 /* Set WTHRESH to 8, burst writeback */
3051 txdctl |= (8 << 16);
3053 * When the internal queue falls below PTHRESH (32),
3054 * start prefetching as long as there are at least
3055 * HTHRESH (1) buffers ready. The values are taken
3056 * from the Intel linux driver 3.8.21.
3057 * Prefetching enables tx line rate even with 1 queue.
3059 txdctl |= (32 << 0) | (1 << 8);
3060 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3063 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
3064 struct rx_ring *rxr = &rx_que->rxr;
3066 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3067 if (hw->mac.type == ixgbe_mac_82598EB) {
3073 rxdctl &= ~0x3FFFFF;
3076 rxdctl |= IXGBE_RXDCTL_ENABLE;
3077 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3078 for (j = 0; j < 10; j++) {
3079 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3080 IXGBE_RXDCTL_ENABLE)
3088 /* Enable Receive engine */
3089 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3090 if (hw->mac.type == ixgbe_mac_82598EB)
3091 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3092 rxctrl |= IXGBE_RXCTRL_RXEN;
3093 ixgbe_enable_rx_dma(hw, rxctrl);
3095 /* Set up MSI/MSI-X routing */
3096 if (ixgbe_enable_msix) {
3097 ixgbe_configure_ivars(adapter);
3098 /* Set up auto-mask */
3099 if (hw->mac.type == ixgbe_mac_82598EB)
3100 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3102 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3103 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3105 } else { /* Simple settings for Legacy/MSI */
3106 ixgbe_set_ivar(adapter, 0, 0, 0);
3107 ixgbe_set_ivar(adapter, 0, 0, 1);
3108 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3111 ixgbe_init_fdir(adapter);
3114 * Check on any SFP devices that
3115 * need to be kick-started
3117 if (hw->phy.type == ixgbe_phy_none) {
3118 err = hw->phy.ops.identify(hw);
3119 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3121 "Unsupported SFP+ module type was detected.\n");
3126 /* Set moderation on the Link interrupt */
3127 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3129 /* Enable power to the phy. */
3130 ixgbe_set_phy_power(hw, true);
3132 /* Config/Enable Link */
3133 ixgbe_config_link(ctx);
3135 /* Hardware Packet Buffer & Flow Control setup */
3136 ixgbe_config_delay_values(adapter);
3138 /* Initialize the FC settings */
3141 /* Set up VLAN support and filter */
3142 ixgbe_setup_vlan_hw_support(ctx);
3144 /* Setup DMA Coalescing */
3145 ixgbe_config_dmac(adapter);
3147 /* And now turn on interrupts */
3148 ixgbe_if_enable_intr(ctx);
3150 /* Enable the use of the MBX by the VF's */
3151 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3152 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3153 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3154 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3157 } /* ixgbe_init_locked */
3159 /************************************************************************
3162 * Setup the correct IVAR register for a particular MSI-X interrupt
3163 * (yes this is all very magic and confusing :)
3164 * - entry is the register array entry
3165 * - vector is the MSI-X vector for this queue
3166 * - type is RX/TX/MISC
3167 ************************************************************************/
3169 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3171 struct ixgbe_hw *hw = &adapter->hw;
3174 vector |= IXGBE_IVAR_ALLOC_VAL;
3176 switch (hw->mac.type) {
3177 case ixgbe_mac_82598EB:
3179 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3181 entry += (type * 64);
3182 index = (entry >> 2) & 0x1F;
3183 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3184 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3185 ivar |= (vector << (8 * (entry & 0x3)));
3186 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3188 case ixgbe_mac_82599EB:
3189 case ixgbe_mac_X540:
3190 case ixgbe_mac_X550:
3191 case ixgbe_mac_X550EM_x:
3192 case ixgbe_mac_X550EM_a:
3193 if (type == -1) { /* MISC IVAR */
3194 index = (entry & 1) * 8;
3195 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3196 ivar &= ~(0xFF << index);
3197 ivar |= (vector << index);
3198 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3199 } else { /* RX/TX IVARS */
3200 index = (16 * (entry & 1)) + (8 * type);
3201 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3202 ivar &= ~(0xFF << index);
3203 ivar |= (vector << index);
3204 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3209 } /* ixgbe_set_ivar */
3211 /************************************************************************
3212 * ixgbe_configure_ivars
3213 ************************************************************************/
3215 ixgbe_configure_ivars(struct adapter *adapter)
3217 struct ix_rx_queue *rx_que = adapter->rx_queues;
3218 struct ix_tx_queue *tx_que = adapter->tx_queues;
3221 if (ixgbe_max_interrupt_rate > 0)
3222 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3225 * Disable DMA coalescing if interrupt moderation is
3232 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3233 struct rx_ring *rxr = &rx_que->rxr;
3235 /* First the RX queue entry */
3236 ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3238 /* Set an Initial EITR value */
3239 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3241 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3242 struct tx_ring *txr = &tx_que->txr;
3244 /* ... and the TX */
3245 ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3247 /* For the Link interrupt */
3248 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3249 } /* ixgbe_configure_ivars */
3251 /************************************************************************
3253 ************************************************************************/
3255 ixgbe_config_gpie(struct adapter *adapter)
3257 struct ixgbe_hw *hw = &adapter->hw;
3260 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3262 if (adapter->intr_type == IFLIB_INTR_MSIX) {
3263 /* Enable Enhanced MSI-X mode */
3264 gpie |= IXGBE_GPIE_MSIX_MODE
3266 | IXGBE_GPIE_PBA_SUPPORT
3270 /* Fan Failure Interrupt */
3271 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3272 gpie |= IXGBE_SDP1_GPIEN;
3274 /* Thermal Sensor Interrupt */
3275 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3276 gpie |= IXGBE_SDP0_GPIEN_X540;
3278 /* Link detection */
3279 switch (hw->mac.type) {
3280 case ixgbe_mac_82599EB:
3281 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3283 case ixgbe_mac_X550EM_x:
3284 case ixgbe_mac_X550EM_a:
3285 gpie |= IXGBE_SDP0_GPIEN_X540;
3291 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3293 } /* ixgbe_config_gpie */
3295 /************************************************************************
3296 * ixgbe_config_delay_values
3298 * Requires adapter->max_frame_size to be set.
3299 ************************************************************************/
3301 ixgbe_config_delay_values(struct adapter *adapter)
3303 struct ixgbe_hw *hw = &adapter->hw;
3304 u32 rxpb, frame, size, tmp;
3306 frame = adapter->max_frame_size;
3308 /* Calculate High Water */
3309 switch (hw->mac.type) {
3310 case ixgbe_mac_X540:
3311 case ixgbe_mac_X550:
3312 case ixgbe_mac_X550EM_x:
3313 case ixgbe_mac_X550EM_a:
3314 tmp = IXGBE_DV_X540(frame, frame);
3317 tmp = IXGBE_DV(frame, frame);
3320 size = IXGBE_BT2KB(tmp);
3321 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3322 hw->fc.high_water[0] = rxpb - size;
3324 /* Now calculate Low Water */
3325 switch (hw->mac.type) {
3326 case ixgbe_mac_X540:
3327 case ixgbe_mac_X550:
3328 case ixgbe_mac_X550EM_x:
3329 case ixgbe_mac_X550EM_a:
3330 tmp = IXGBE_LOW_DV_X540(frame);
3333 tmp = IXGBE_LOW_DV(frame);
3336 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3338 hw->fc.pause_time = IXGBE_FC_PAUSE;
3339 hw->fc.send_xon = true;
3340 } /* ixgbe_config_delay_values */
3342 /************************************************************************
3343 * ixgbe_set_multi - Multicast Update
3345 * Called whenever multicast address list is updated.
3346 ************************************************************************/
3348 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3350 struct adapter *adapter = arg;
3351 struct ixgbe_mc_addr *mta = adapter->mta;
3353 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3355 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3356 mta[idx].vmdq = adapter->pool;
3359 } /* ixgbe_mc_filter_apply */
3362 ixgbe_if_multi_set(if_ctx_t ctx)
3364 struct adapter *adapter = iflib_get_softc(ctx);
3365 struct ixgbe_mc_addr *mta;
3366 struct ifnet *ifp = iflib_get_ifp(ctx);
3371 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3374 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3376 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3379 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3381 if (ifp->if_flags & IFF_PROMISC)
3382 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3383 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3384 ifp->if_flags & IFF_ALLMULTI) {
3385 fctrl |= IXGBE_FCTRL_MPE;
3386 fctrl &= ~IXGBE_FCTRL_UPE;
3388 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3390 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3392 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3393 update_ptr = (u8 *)mta;
3394 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3395 ixgbe_mc_array_itr, true);
3398 } /* ixgbe_if_multi_set */
3400 /************************************************************************
3401 * ixgbe_mc_array_itr
3403 * An iterator function needed by the multicast shared code.
3404 * It feeds the shared code routine the addresses in the
3405 * array of ixgbe_set_multi() one by one.
3406 ************************************************************************/
3408 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3410 struct ixgbe_mc_addr *mta;
3412 mta = (struct ixgbe_mc_addr *)*update_ptr;
3415 *update_ptr = (u8*)(mta + 1);
3418 } /* ixgbe_mc_array_itr */
3420 /************************************************************************
3421 * ixgbe_local_timer - Timer routine
3423 * Checks for link status, updates statistics,
3424 * and runs the watchdog check.
3425 ************************************************************************/
3427 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3429 struct adapter *adapter = iflib_get_softc(ctx);
3434 /* Check for pluggable optics */
3435 if (adapter->sfp_probe)
3436 if (!ixgbe_sfp_probe(ctx))
3437 return; /* Nothing to do */
3439 ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3440 &adapter->link_up, 0);
3442 /* Fire off the adminq task */
3443 iflib_admin_intr_deferred(ctx);
3445 } /* ixgbe_if_timer */
3447 /************************************************************************
3450 * Determine if a port had optics inserted.
3451 ************************************************************************/
3453 ixgbe_sfp_probe(if_ctx_t ctx)
3455 struct adapter *adapter = iflib_get_softc(ctx);
3456 struct ixgbe_hw *hw = &adapter->hw;
3457 device_t dev = iflib_get_dev(ctx);
3458 bool result = false;
3460 if ((hw->phy.type == ixgbe_phy_nl) &&
3461 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3462 s32 ret = hw->phy.ops.identify_sfp(hw);
3465 ret = hw->phy.ops.reset(hw);
3466 adapter->sfp_probe = false;
3467 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3468 device_printf(dev, "Unsupported SFP+ module detected!");
3470 "Reload driver with supported module.\n");
3473 device_printf(dev, "SFP+ module detected!\n");
3474 /* We now have supported optics */
3480 } /* ixgbe_sfp_probe */
3482 /************************************************************************
3483 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3484 ************************************************************************/
3486 ixgbe_handle_mod(void *context)
3488 if_ctx_t ctx = context;
3489 struct adapter *adapter = iflib_get_softc(ctx);
3490 struct ixgbe_hw *hw = &adapter->hw;
3491 device_t dev = iflib_get_dev(ctx);
3492 u32 err, cage_full = 0;
3494 if (adapter->hw.need_crosstalk_fix) {
3495 switch (hw->mac.type) {
3496 case ixgbe_mac_82599EB:
3497 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3500 case ixgbe_mac_X550EM_x:
3501 case ixgbe_mac_X550EM_a:
3502 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3510 goto handle_mod_out;
3513 err = hw->phy.ops.identify_sfp(hw);
3514 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3516 "Unsupported SFP+ module type was detected.\n");
3517 goto handle_mod_out;
3520 if (hw->mac.type == ixgbe_mac_82598EB)
3521 err = hw->phy.ops.reset(hw);
3523 err = hw->mac.ops.setup_sfp(hw);
3525 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3527 "Setup failure - unsupported SFP+ module type.\n");
3528 goto handle_mod_out;
3530 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3534 adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3535 } /* ixgbe_handle_mod */
3538 /************************************************************************
3539 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3540 ************************************************************************/
3542 ixgbe_handle_msf(void *context)
3544 if_ctx_t ctx = context;
3545 struct adapter *adapter = iflib_get_softc(ctx);
3546 struct ixgbe_hw *hw = &adapter->hw;
3550 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3551 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3553 autoneg = hw->phy.autoneg_advertised;
3554 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3555 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3556 if (hw->mac.ops.setup_link)
3557 hw->mac.ops.setup_link(hw, autoneg, true);
3559 /* Adjust media types shown in ifconfig */
3560 ifmedia_removeall(adapter->media);
3561 ixgbe_add_media_types(adapter->ctx);
3562 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3563 } /* ixgbe_handle_msf */
3565 /************************************************************************
3566 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3567 ************************************************************************/
3569 ixgbe_handle_phy(void *context)
3571 if_ctx_t ctx = context;
3572 struct adapter *adapter = iflib_get_softc(ctx);
3573 struct ixgbe_hw *hw = &adapter->hw;
3576 error = hw->phy.ops.handle_lasi(hw);
3577 if (error == IXGBE_ERR_OVERTEMP)
3578 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3580 device_printf(adapter->dev,
3581 "Error handling LASI interrupt: %d\n", error);
3582 } /* ixgbe_handle_phy */
3584 /************************************************************************
3585 * ixgbe_if_stop - Stop the hardware
3587 * Disables all traffic on the adapter by issuing a
3588 * global reset on the MAC and deallocates TX/RX buffers.
3589 ************************************************************************/
3591 ixgbe_if_stop(if_ctx_t ctx)
3593 struct adapter *adapter = iflib_get_softc(ctx);
3594 struct ixgbe_hw *hw = &adapter->hw;
3596 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3599 hw->adapter_stopped = false;
3600 ixgbe_stop_adapter(hw);
3601 if (hw->mac.type == ixgbe_mac_82599EB)
3602 ixgbe_stop_mac_link_on_d3_82599(hw);
3603 /* Turn off the laser - noop with no optics */
3604 ixgbe_disable_tx_laser(hw);
3606 /* Update the stack */
3607 adapter->link_up = false;
3608 ixgbe_if_update_admin_status(ctx);
3610 /* reprogram the RAR[0] in case user changed it. */
3611 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3614 } /* ixgbe_if_stop */
3616 /************************************************************************
3617 * ixgbe_update_link_status - Update OS on link state
3619 * Note: Only updates the OS on the cached link state.
3620 * The real check of the hardware only happens with
3622 ************************************************************************/
3624 ixgbe_if_update_admin_status(if_ctx_t ctx)
3626 struct adapter *adapter = iflib_get_softc(ctx);
3627 device_t dev = iflib_get_dev(ctx);
3629 if (adapter->link_up) {
3630 if (adapter->link_active == false) {
3632 device_printf(dev, "Link is up %d Gbps %s \n",
3633 ((adapter->link_speed == 128) ? 10 : 1),
3635 adapter->link_active = true;
3636 /* Update any Flow Control changes */
3637 ixgbe_fc_enable(&adapter->hw);
3638 /* Update DMA coalescing config */
3639 ixgbe_config_dmac(adapter);
3640 /* should actually be negotiated value */
3641 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3643 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3644 ixgbe_ping_all_vfs(adapter);
3646 } else { /* Link down */
3647 if (adapter->link_active == true) {
3649 device_printf(dev, "Link is Down\n");
3650 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3651 adapter->link_active = false;
3652 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3653 ixgbe_ping_all_vfs(adapter);
3657 /* Handle task requests from msix_link() */
3658 if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3659 ixgbe_handle_mod(ctx);
3660 if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3661 ixgbe_handle_msf(ctx);
3662 if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3663 ixgbe_handle_mbx(ctx);
3664 if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3665 ixgbe_reinit_fdir(ctx);
3666 if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3667 ixgbe_handle_phy(ctx);
3668 adapter->task_requests = 0;
3670 ixgbe_update_stats_counters(adapter);
3671 } /* ixgbe_if_update_admin_status */
3673 /************************************************************************
3674 * ixgbe_config_dmac - Configure DMA Coalescing
3675 ************************************************************************/
3677 ixgbe_config_dmac(struct adapter *adapter)
3679 struct ixgbe_hw *hw = &adapter->hw;
3680 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3682 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3685 if (dcfg->watchdog_timer ^ adapter->dmac ||
3686 dcfg->link_speed ^ adapter->link_speed) {
3687 dcfg->watchdog_timer = adapter->dmac;
3688 dcfg->fcoe_en = false;
3689 dcfg->link_speed = adapter->link_speed;
3692 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3693 dcfg->watchdog_timer, dcfg->link_speed);
3695 hw->mac.ops.dmac_config(hw);
3697 } /* ixgbe_config_dmac */
3699 /************************************************************************
3700 * ixgbe_if_enable_intr
3701 ************************************************************************/
3703 ixgbe_if_enable_intr(if_ctx_t ctx)
3705 struct adapter *adapter = iflib_get_softc(ctx);
3706 struct ixgbe_hw *hw = &adapter->hw;
3707 struct ix_rx_queue *que = adapter->rx_queues;
3710 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3712 switch (adapter->hw.mac.type) {
3713 case ixgbe_mac_82599EB:
3714 mask |= IXGBE_EIMS_ECC;
3715 /* Temperature sensor on some adapters */
3716 mask |= IXGBE_EIMS_GPI_SDP0;
3717 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3718 mask |= IXGBE_EIMS_GPI_SDP1;
3719 mask |= IXGBE_EIMS_GPI_SDP2;
3721 case ixgbe_mac_X540:
3722 /* Detect if Thermal Sensor is enabled */
3723 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3724 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3725 mask |= IXGBE_EIMS_TS;
3726 mask |= IXGBE_EIMS_ECC;
3728 case ixgbe_mac_X550:
3729 /* MAC thermal sensor is automatically enabled */
3730 mask |= IXGBE_EIMS_TS;
3731 mask |= IXGBE_EIMS_ECC;
3733 case ixgbe_mac_X550EM_x:
3734 case ixgbe_mac_X550EM_a:
3735 /* Some devices use SDP0 for important information */
3736 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3737 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3738 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3739 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3740 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3741 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3742 mask |= IXGBE_EICR_GPI_SDP0_X540;
3743 mask |= IXGBE_EIMS_ECC;
3749 /* Enable Fan Failure detection */
3750 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3751 mask |= IXGBE_EIMS_GPI_SDP1;
3753 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3754 mask |= IXGBE_EIMS_MAILBOX;
3755 /* Enable Flow Director */
3756 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3757 mask |= IXGBE_EIMS_FLOW_DIR;
3759 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3761 /* With MSI-X we use auto clear */
3762 if (adapter->intr_type == IFLIB_INTR_MSIX) {
3763 mask = IXGBE_EIMS_ENABLE_MASK;
3764 /* Don't autoclear Link */
3765 mask &= ~IXGBE_EIMS_OTHER;
3766 mask &= ~IXGBE_EIMS_LSC;
3767 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3768 mask &= ~IXGBE_EIMS_MAILBOX;
3769 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3773 * Now enable all queues, this is done separately to
3774 * allow for handling the extended (beyond 32) MSI-X
3775 * vectors that can be used by 82599
3777 for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3778 ixgbe_enable_queue(adapter, que->msix);
3780 IXGBE_WRITE_FLUSH(hw);
3782 } /* ixgbe_if_enable_intr */
3784 /************************************************************************
3785 * ixgbe_disable_intr
3786 ************************************************************************/
3788 ixgbe_if_disable_intr(if_ctx_t ctx)
3790 struct adapter *adapter = iflib_get_softc(ctx);
3792 if (adapter->intr_type == IFLIB_INTR_MSIX)
3793 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3794 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3795 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3797 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3798 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3799 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3801 IXGBE_WRITE_FLUSH(&adapter->hw);
3803 } /* ixgbe_if_disable_intr */
3805 /************************************************************************
3806 * ixgbe_link_intr_enable
3807 ************************************************************************/
3809 ixgbe_link_intr_enable(if_ctx_t ctx)
3811 struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3813 /* Re-enable other interrupts */
3814 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3815 } /* ixgbe_link_intr_enable */
3817 /************************************************************************
3818 * ixgbe_if_rx_queue_intr_enable
3819 ************************************************************************/
3821 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3823 struct adapter *adapter = iflib_get_softc(ctx);
3824 struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3826 ixgbe_enable_queue(adapter, que->msix);
3829 } /* ixgbe_if_rx_queue_intr_enable */
3831 /************************************************************************
3832 * ixgbe_enable_queue
3833 ************************************************************************/
3835 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3837 struct ixgbe_hw *hw = &adapter->hw;
3838 u64 queue = 1ULL << vector;
3841 if (hw->mac.type == ixgbe_mac_82598EB) {
3842 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3843 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3845 mask = (queue & 0xFFFFFFFF);
3847 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3848 mask = (queue >> 32);
3850 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3852 } /* ixgbe_enable_queue */
3854 /************************************************************************
3855 * ixgbe_disable_queue
3856 ************************************************************************/
3858 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3860 struct ixgbe_hw *hw = &adapter->hw;
3861 u64 queue = 1ULL << vector;
3864 if (hw->mac.type == ixgbe_mac_82598EB) {
3865 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3866 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3868 mask = (queue & 0xFFFFFFFF);
3870 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3871 mask = (queue >> 32);
3873 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3875 } /* ixgbe_disable_queue */
3877 /************************************************************************
3878 * ixgbe_intr - Legacy Interrupt Service Routine
3879 ************************************************************************/
3881 ixgbe_intr(void *arg)
3883 struct adapter *adapter = arg;
3884 struct ix_rx_queue *que = adapter->rx_queues;
3885 struct ixgbe_hw *hw = &adapter->hw;
3886 if_ctx_t ctx = adapter->ctx;
3887 u32 eicr, eicr_mask;
3889 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3893 ixgbe_if_enable_intr(ctx);
3894 return (FILTER_HANDLED);
3897 /* Check for fan failure */
3898 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3899 (eicr & IXGBE_EICR_GPI_SDP1)) {
3900 device_printf(adapter->dev,
3901 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3902 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3905 /* Link status change */
3906 if (eicr & IXGBE_EICR_LSC) {
3907 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3908 iflib_admin_intr_deferred(ctx);
3911 if (ixgbe_is_sfp(hw)) {
3912 /* Pluggable optics-related interrupt */
3913 if (hw->mac.type >= ixgbe_mac_X540)
3914 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3916 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3918 if (eicr & eicr_mask) {
3919 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3920 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3923 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3924 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3925 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3926 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3927 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3931 /* External PHY interrupt */
3932 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3933 (eicr & IXGBE_EICR_GPI_SDP0_X540))
3934 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3936 return (FILTER_SCHEDULE_THREAD);
3939 /************************************************************************
3940 * ixgbe_free_pci_resources
3941 ************************************************************************/
3943 ixgbe_free_pci_resources(if_ctx_t ctx)
3945 struct adapter *adapter = iflib_get_softc(ctx);
3946 struct ix_rx_queue *que = adapter->rx_queues;
3947 device_t dev = iflib_get_dev(ctx);
3949 /* Release all MSI-X queue resources */
3950 if (adapter->intr_type == IFLIB_INTR_MSIX)
3951 iflib_irq_free(ctx, &adapter->irq);
3954 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3955 iflib_irq_free(ctx, &que->que_irq);
3959 if (adapter->pci_mem != NULL)
3960 bus_release_resource(dev, SYS_RES_MEMORY,
3961 rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3962 } /* ixgbe_free_pci_resources */
3964 /************************************************************************
3965 * ixgbe_sysctl_flowcntl
3967 * SYSCTL wrapper around setting Flow Control
3968 ************************************************************************/
3970 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3972 struct adapter *adapter;
3975 adapter = (struct adapter *)arg1;
3976 fc = adapter->hw.fc.current_mode;
3978 error = sysctl_handle_int(oidp, &fc, 0, req);
3979 if ((error) || (req->newptr == NULL))
3982 /* Don't bother if it's not changed */
3983 if (fc == adapter->hw.fc.current_mode)
3986 return ixgbe_set_flowcntl(adapter, fc);
3987 } /* ixgbe_sysctl_flowcntl */
3989 /************************************************************************
3990 * ixgbe_set_flowcntl - Set flow control
3992 * Flow control values:
3997 ************************************************************************/
3999 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4002 case ixgbe_fc_rx_pause:
4003 case ixgbe_fc_tx_pause:
4005 adapter->hw.fc.requested_mode = fc;
4006 if (adapter->num_rx_queues > 1)
4007 ixgbe_disable_rx_drop(adapter);
4010 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4011 if (adapter->num_rx_queues > 1)
4012 ixgbe_enable_rx_drop(adapter);
4018 /* Don't autoneg if forcing a value */
4019 adapter->hw.fc.disable_fc_autoneg = true;
4020 ixgbe_fc_enable(&adapter->hw);
4023 } /* ixgbe_set_flowcntl */
4025 /************************************************************************
4026 * ixgbe_enable_rx_drop
4028 * Enable the hardware to drop packets when the buffer is
4029 * full. This is useful with multiqueue, so that no single
4030 * queue being full stalls the entire RX engine. We only
4031 * enable this when Multiqueue is enabled AND Flow Control
4033 ************************************************************************/
4035 ixgbe_enable_rx_drop(struct adapter *adapter)
4037 struct ixgbe_hw *hw = &adapter->hw;
4038 struct rx_ring *rxr;
4041 for (int i = 0; i < adapter->num_rx_queues; i++) {
4042 rxr = &adapter->rx_queues[i].rxr;
4043 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4044 srrctl |= IXGBE_SRRCTL_DROP_EN;
4045 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4048 /* enable drop for each vf */
4049 for (int i = 0; i < adapter->num_vfs; i++) {
4050 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4051 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4054 } /* ixgbe_enable_rx_drop */
4056 /************************************************************************
4057 * ixgbe_disable_rx_drop
4058 ************************************************************************/
4060 ixgbe_disable_rx_drop(struct adapter *adapter)
4062 struct ixgbe_hw *hw = &adapter->hw;
4063 struct rx_ring *rxr;
4066 for (int i = 0; i < adapter->num_rx_queues; i++) {
4067 rxr = &adapter->rx_queues[i].rxr;
4068 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4069 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4070 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4073 /* disable drop for each vf */
4074 for (int i = 0; i < adapter->num_vfs; i++) {
4075 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4076 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4078 } /* ixgbe_disable_rx_drop */
4080 /************************************************************************
4081 * ixgbe_sysctl_advertise
4083 * SYSCTL wrapper around setting advertised speed
4084 ************************************************************************/
4086 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4088 struct adapter *adapter;
4089 int error, advertise;
4091 adapter = (struct adapter *)arg1;
4092 advertise = adapter->advertise;
4094 error = sysctl_handle_int(oidp, &advertise, 0, req);
4095 if ((error) || (req->newptr == NULL))
4098 return ixgbe_set_advertise(adapter, advertise);
4099 } /* ixgbe_sysctl_advertise */
4101 /************************************************************************
4102 * ixgbe_set_advertise - Control advertised link speed
4105 * 0x1 - advertise 100 Mb
4106 * 0x2 - advertise 1G
4107 * 0x4 - advertise 10G
4108 * 0x8 - advertise 10 Mb (yes, Mb)
4109 ************************************************************************/
4111 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4113 device_t dev = iflib_get_dev(adapter->ctx);
4114 struct ixgbe_hw *hw;
4115 ixgbe_link_speed speed = 0;
4116 ixgbe_link_speed link_caps = 0;
4117 s32 err = IXGBE_NOT_IMPLEMENTED;
4118 bool negotiate = false;
4120 /* Checks to validate new value */
4121 if (adapter->advertise == advertise) /* no change */
4126 /* No speed changes for backplane media */
4127 if (hw->phy.media_type == ixgbe_media_type_backplane)
4130 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4131 (hw->phy.multispeed_fiber))) {
4132 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4136 if (advertise < 0x1 || advertise > 0xF) {
4137 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4141 if (hw->mac.ops.get_link_capabilities) {
4142 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4144 if (err != IXGBE_SUCCESS) {
4145 device_printf(dev, "Unable to determine supported advertise speeds\n");
4150 /* Set new value and report new advertised mode */
4151 if (advertise & 0x1) {
4152 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4153 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4156 speed |= IXGBE_LINK_SPEED_100_FULL;
4158 if (advertise & 0x2) {
4159 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4160 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4163 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4165 if (advertise & 0x4) {
4166 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4167 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4170 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4172 if (advertise & 0x8) {
4173 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4174 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4177 speed |= IXGBE_LINK_SPEED_10_FULL;
4180 hw->mac.autotry_restart = true;
4181 hw->mac.ops.setup_link(hw, speed, true);
4182 adapter->advertise = advertise;
4185 } /* ixgbe_set_advertise */
4187 /************************************************************************
4188 * ixgbe_get_advertise - Get current advertised speed settings
4190 * Formatted for sysctl usage.
4192 * 0x1 - advertise 100 Mb
4193 * 0x2 - advertise 1G
4194 * 0x4 - advertise 10G
4195 * 0x8 - advertise 10 Mb (yes, Mb)
4196 ************************************************************************/
4198 ixgbe_get_advertise(struct adapter *adapter)
4200 struct ixgbe_hw *hw = &adapter->hw;
4202 ixgbe_link_speed link_caps = 0;
4204 bool negotiate = false;
4207 * Advertised speed means nothing unless it's copper or
4210 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4211 !(hw->phy.multispeed_fiber))
4214 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4215 if (err != IXGBE_SUCCESS)
4219 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4220 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
4221 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
4222 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
4225 } /* ixgbe_get_advertise */
4227 /************************************************************************
4228 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4231 * 0/1 - off / on (use default value of 1000)
4233 * Legal timer values are:
4234 * 50,100,250,500,1000,2000,5000,10000
4236 * Turning off interrupt moderation will also turn this off.
4237 ************************************************************************/
4239 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4241 struct adapter *adapter = (struct adapter *)arg1;
4242 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
4246 newval = adapter->dmac;
4247 error = sysctl_handle_16(oidp, &newval, 0, req);
4248 if ((error) || (req->newptr == NULL))
4257 /* Enable and use default */
4258 adapter->dmac = 1000;
4268 /* Legal values - allow */
4269 adapter->dmac = newval;
4272 /* Do nothing, illegal value */
4276 /* Re-initialize hardware if it's already running */
4277 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4281 } /* ixgbe_sysctl_dmac */
4284 /************************************************************************
4285 * ixgbe_sysctl_power_state
4287 * Sysctl to test power states
4289 * 0 - set device to D0
4290 * 3 - set device to D3
4291 * (none) - get current device power state
4292 ************************************************************************/
4294 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4296 struct adapter *adapter = (struct adapter *)arg1;
4297 device_t dev = adapter->dev;
4298 int curr_ps, new_ps, error = 0;
4300 curr_ps = new_ps = pci_get_powerstate(dev);
4302 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4303 if ((error) || (req->newptr == NULL))
4306 if (new_ps == curr_ps)
4309 if (new_ps == 3 && curr_ps == 0)
4310 error = DEVICE_SUSPEND(dev);
4311 else if (new_ps == 0 && curr_ps == 3)
4312 error = DEVICE_RESUME(dev);
4316 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4319 } /* ixgbe_sysctl_power_state */
4322 /************************************************************************
4323 * ixgbe_sysctl_wol_enable
4325 * Sysctl to enable/disable the WoL capability,
4326 * if supported by the adapter.
4331 ************************************************************************/
4333 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4335 struct adapter *adapter = (struct adapter *)arg1;
4336 struct ixgbe_hw *hw = &adapter->hw;
4337 int new_wol_enabled;
4340 new_wol_enabled = hw->wol_enabled;
4341 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4342 if ((error) || (req->newptr == NULL))
4344 new_wol_enabled = !!(new_wol_enabled);
4345 if (new_wol_enabled == hw->wol_enabled)
4348 if (new_wol_enabled > 0 && !adapter->wol_support)
4351 hw->wol_enabled = new_wol_enabled;
4354 } /* ixgbe_sysctl_wol_enable */
4356 /************************************************************************
4357 * ixgbe_sysctl_wufc - Wake Up Filter Control
4359 * Sysctl to enable/disable the types of packets that the
4360 * adapter will wake up on upon receipt.
4362 * 0x1 - Link Status Change
4363 * 0x2 - Magic Packet
4364 * 0x4 - Direct Exact
4365 * 0x8 - Directed Multicast
4367 * 0x20 - ARP/IPv4 Request Packet
4368 * 0x40 - Direct IPv4 Packet
4369 * 0x80 - Direct IPv6 Packet
4371 * Settings not listed above will cause the sysctl to return an error.
4372 ************************************************************************/
4374 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4376 struct adapter *adapter = (struct adapter *)arg1;
4380 new_wufc = adapter->wufc;
4382 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4383 if ((error) || (req->newptr == NULL))
4385 if (new_wufc == adapter->wufc)
4388 if (new_wufc & 0xffffff00)
4392 new_wufc |= (0xffffff & adapter->wufc);
4393 adapter->wufc = new_wufc;
4396 } /* ixgbe_sysctl_wufc */
4399 /************************************************************************
4400 * ixgbe_sysctl_print_rss_config
4401 ************************************************************************/
4403 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4405 struct adapter *adapter = (struct adapter *)arg1;
4406 struct ixgbe_hw *hw = &adapter->hw;
4407 device_t dev = adapter->dev;
4409 int error = 0, reta_size;
4412 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4414 device_printf(dev, "Could not allocate sbuf for output.\n");
4418 // TODO: use sbufs to make a string to print out
4419 /* Set multiplier for RETA setup and table size based on MAC */
4420 switch (adapter->hw.mac.type) {
4421 case ixgbe_mac_X550:
4422 case ixgbe_mac_X550EM_x:
4423 case ixgbe_mac_X550EM_a:
4431 /* Print out the redirection table */
4432 sbuf_cat(buf, "\n");
4433 for (int i = 0; i < reta_size; i++) {
4435 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4436 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4438 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4439 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4443 // TODO: print more config
4445 error = sbuf_finish(buf);
4447 device_printf(dev, "Error finishing sbuf: %d\n", error);
4452 } /* ixgbe_sysctl_print_rss_config */
4453 #endif /* IXGBE_DEBUG */
4455 /************************************************************************
4456 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4458 * For X552/X557-AT devices using an external PHY
4459 ************************************************************************/
4461 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4463 struct adapter *adapter = (struct adapter *)arg1;
4464 struct ixgbe_hw *hw = &adapter->hw;
4467 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4468 device_printf(iflib_get_dev(adapter->ctx),
4469 "Device has no supported external thermal sensor.\n");
4473 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4474 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4475 device_printf(iflib_get_dev(adapter->ctx),
4476 "Error reading from PHY's current temperature register\n");
4480 /* Shift temp for output */
4483 return (sysctl_handle_16(oidp, NULL, reg, req));
4484 } /* ixgbe_sysctl_phy_temp */
4486 /************************************************************************
4487 * ixgbe_sysctl_phy_overtemp_occurred
4489 * Reports (directly from the PHY) whether the current PHY
4490 * temperature is over the overtemp threshold.
4491 ************************************************************************/
4493 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4495 struct adapter *adapter = (struct adapter *)arg1;
4496 struct ixgbe_hw *hw = &adapter->hw;
4499 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4500 device_printf(iflib_get_dev(adapter->ctx),
4501 "Device has no supported external thermal sensor.\n");
4505 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4506 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4507 device_printf(iflib_get_dev(adapter->ctx),
4508 "Error reading from PHY's temperature status register\n");
4512 /* Get occurrence bit */
4513 reg = !!(reg & 0x4000);
4515 return (sysctl_handle_16(oidp, 0, reg, req));
4516 } /* ixgbe_sysctl_phy_overtemp_occurred */
4518 /************************************************************************
4519 * ixgbe_sysctl_eee_state
4521 * Sysctl to set EEE power saving feature
4525 * (none) - get current device EEE state
4526 ************************************************************************/
4528 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4530 struct adapter *adapter = (struct adapter *)arg1;
4531 device_t dev = adapter->dev;
4532 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
4533 int curr_eee, new_eee, error = 0;
4536 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4538 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4539 if ((error) || (req->newptr == NULL))
4543 if (new_eee == curr_eee)
4547 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4550 /* Bounds checking */
4551 if ((new_eee < 0) || (new_eee > 1))
4554 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
4556 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4560 /* Restart auto-neg */
4563 device_printf(dev, "New EEE state: %d\n", new_eee);
4565 /* Cache new value */
4567 adapter->feat_en |= IXGBE_FEATURE_EEE;
4569 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4572 } /* ixgbe_sysctl_eee_state */
4574 /************************************************************************
4575 * ixgbe_init_device_features
4576 ************************************************************************/
4578 ixgbe_init_device_features(struct adapter *adapter)
4580 adapter->feat_cap = IXGBE_FEATURE_NETMAP
4583 | IXGBE_FEATURE_MSIX
4584 | IXGBE_FEATURE_LEGACY_IRQ;
4586 /* Set capabilities first... */
4587 switch (adapter->hw.mac.type) {
4588 case ixgbe_mac_82598EB:
4589 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4590 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4592 case ixgbe_mac_X540:
4593 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4594 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4595 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4596 (adapter->hw.bus.func == 0))
4597 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4599 case ixgbe_mac_X550:
4600 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4601 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4602 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4604 case ixgbe_mac_X550EM_x:
4605 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4606 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4608 case ixgbe_mac_X550EM_a:
4609 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4610 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4611 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4612 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4613 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4614 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4615 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4618 case ixgbe_mac_82599EB:
4619 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4620 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4621 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4622 (adapter->hw.bus.func == 0))
4623 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4624 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4625 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4631 /* Enabled by default... */
4632 /* Fan failure detection */
4633 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4634 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4636 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4637 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4639 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4640 adapter->feat_en |= IXGBE_FEATURE_EEE;
4641 /* Thermal Sensor */
4642 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4643 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4645 /* Enabled via global sysctl... */
4647 if (ixgbe_enable_fdir) {
4648 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4649 adapter->feat_en |= IXGBE_FEATURE_FDIR;
4651 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4654 * Message Signal Interrupts - Extended (MSI-X)
4655 * Normal MSI is only enabled if MSI-X calls fail.
4657 if (!ixgbe_enable_msix)
4658 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4659 /* Receive-Side Scaling (RSS) */
4660 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4661 adapter->feat_en |= IXGBE_FEATURE_RSS;
4663 /* Disable features with unmet dependencies... */
4665 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4666 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4667 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4668 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4669 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4671 } /* ixgbe_init_device_features */
4673 /************************************************************************
4674 * ixgbe_check_fan_failure
4675 ************************************************************************/
4677 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4681 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4685 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4686 } /* ixgbe_check_fan_failure */
4688 /************************************************************************
4689 * ixgbe_sbuf_fw_version
4690 ************************************************************************/
4692 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
4694 struct ixgbe_nvm_version nvm_ver = {0};
4697 const char *space = "";
4699 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
4700 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
4701 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */
4702 status = ixgbe_get_phy_firmware_version(hw, &phyfw);
4704 if (nvm_ver.oem_valid) {
4705 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
4706 nvm_ver.oem_minor, nvm_ver.oem_release);
4710 if (nvm_ver.or_valid) {
4711 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
4712 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
4716 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
4718 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
4722 if (phyfw != 0 && status == IXGBE_SUCCESS)
4723 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw);
4724 } /* ixgbe_sbuf_fw_version */
4726 /************************************************************************
4727 * ixgbe_print_fw_version
4728 ************************************************************************/
4730 ixgbe_print_fw_version(if_ctx_t ctx)
4732 struct adapter *adapter = iflib_get_softc(ctx);
4733 struct ixgbe_hw *hw = &adapter->hw;
4734 device_t dev = adapter->dev;
4738 buf = sbuf_new_auto();
4740 device_printf(dev, "Could not allocate sbuf for output.\n");
4744 ixgbe_sbuf_fw_version(hw, buf);
4746 error = sbuf_finish(buf);
4748 device_printf(dev, "Error finishing sbuf: %d\n", error);
4749 else if (sbuf_len(buf))
4750 device_printf(dev, "%s\n", sbuf_data(buf));
4753 } /* ixgbe_print_fw_version */
4755 /************************************************************************
4756 * ixgbe_sysctl_print_fw_version
4757 ************************************************************************/
4759 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
4761 struct adapter *adapter = (struct adapter *)arg1;
4762 struct ixgbe_hw *hw = &adapter->hw;
4763 device_t dev = adapter->dev;
4767 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4769 device_printf(dev, "Could not allocate sbuf for output.\n");
4773 ixgbe_sbuf_fw_version(hw, buf);
4775 error = sbuf_finish(buf);
4777 device_printf(dev, "Error finishing sbuf: %d\n", error);
4782 } /* ixgbe_sysctl_print_fw_version */