1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
40 #include "ixgbe_sriov.h"
43 #include <net/netmap.h>
44 #include <dev/netmap/netmap_kern.h>
46 /************************************************************************
48 ************************************************************************/
49 char ixgbe_driver_version[] = "4.0.1-k";
51 /************************************************************************
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixgbe_strings
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 ************************************************************************/
60 static pci_vendor_info_t ixgbe_vendor_info_array[] =
62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"),
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"),
70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"),
73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"),
75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"),
77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"),
79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"),
81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"),
85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"),
87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"),
91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"),
93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
106 /* required last entry */
110 static void *ixgbe_register(device_t);
111 static int ixgbe_if_attach_pre(if_ctx_t);
112 static int ixgbe_if_attach_post(if_ctx_t);
113 static int ixgbe_if_detach(if_ctx_t);
114 static int ixgbe_if_shutdown(if_ctx_t);
115 static int ixgbe_if_suspend(if_ctx_t);
116 static int ixgbe_if_resume(if_ctx_t);
118 static void ixgbe_if_stop(if_ctx_t);
119 void ixgbe_if_enable_intr(if_ctx_t);
120 static void ixgbe_if_disable_intr(if_ctx_t);
121 static void ixgbe_link_intr_enable(if_ctx_t);
122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
124 static int ixgbe_if_media_change(if_ctx_t);
125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t);
127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
128 static void ixgbe_if_multi_set(if_ctx_t);
129 static int ixgbe_if_promisc_set(if_ctx_t, int);
130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
132 static void ixgbe_if_queues_free(if_ctx_t);
133 static void ixgbe_if_timer(if_ctx_t, uint16_t);
134 static void ixgbe_if_update_admin_status(if_ctx_t);
135 static void ixgbe_if_vlan_register(if_ctx_t, u16);
136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
139 int ixgbe_intr(void *);
141 /************************************************************************
142 * Function prototypes
143 ************************************************************************/
144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
148 static void ixgbe_add_device_sysctls(if_ctx_t);
149 static int ixgbe_allocate_pci_resources(if_ctx_t);
150 static int ixgbe_setup_low_power_mode(if_ctx_t);
152 static void ixgbe_config_dmac(struct ixgbe_softc *);
153 static void ixgbe_configure_ivars(struct ixgbe_softc *);
154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
156 static bool ixgbe_sfp_probe(if_ctx_t);
158 static void ixgbe_free_pci_resources(if_ctx_t);
160 static int ixgbe_msix_link(void *);
161 static int ixgbe_msix_que(void *);
162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
163 static void ixgbe_initialize_receive_units(if_ctx_t);
164 static void ixgbe_initialize_transmit_units(if_ctx_t);
166 static int ixgbe_setup_interface(if_ctx_t);
167 static void ixgbe_init_device_features(struct ixgbe_softc *);
168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
170 static void ixgbe_print_fw_version(if_ctx_t);
171 static void ixgbe_add_media_types(if_ctx_t);
172 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
173 static void ixgbe_config_link(if_ctx_t);
174 static void ixgbe_get_slot_info(struct ixgbe_softc *);
175 static void ixgbe_check_wol_support(struct ixgbe_softc *);
176 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
177 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
179 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
180 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
181 static int ixgbe_set_advertise(struct ixgbe_softc *, int);
182 static int ixgbe_get_advertise(struct ixgbe_softc *);
183 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
184 static void ixgbe_config_gpie(struct ixgbe_softc *);
185 static void ixgbe_config_delay_values(struct ixgbe_softc *);
187 /* Sysctl handlers */
188 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
189 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
191 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
192 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
193 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
194 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
196 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
197 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
199 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
200 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
201 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
202 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
203 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
204 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
205 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
207 /* Deferred interrupt tasklets */
208 static void ixgbe_handle_msf(void *);
209 static void ixgbe_handle_mod(void *);
210 static void ixgbe_handle_phy(void *);
212 /************************************************************************
213 * FreeBSD Device Interface Entry Points
214 ************************************************************************/
215 static device_method_t ix_methods[] = {
216 /* Device interface */
217 DEVMETHOD(device_register, ixgbe_register),
218 DEVMETHOD(device_probe, iflib_device_probe),
219 DEVMETHOD(device_attach, iflib_device_attach),
220 DEVMETHOD(device_detach, iflib_device_detach),
221 DEVMETHOD(device_shutdown, iflib_device_shutdown),
222 DEVMETHOD(device_suspend, iflib_device_suspend),
223 DEVMETHOD(device_resume, iflib_device_resume),
225 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
226 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
227 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
232 static driver_t ix_driver = {
233 "ix", ix_methods, sizeof(struct ixgbe_softc),
236 devclass_t ix_devclass;
237 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
238 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
239 MODULE_DEPEND(ix, pci, 1, 1, 1);
240 MODULE_DEPEND(ix, ether, 1, 1, 1);
241 MODULE_DEPEND(ix, iflib, 1, 1, 1);
243 static device_method_t ixgbe_if_methods[] = {
244 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
245 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
246 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
247 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
248 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
249 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
250 DEVMETHOD(ifdi_init, ixgbe_if_init),
251 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
252 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
253 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
254 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
255 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
256 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
257 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
259 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
260 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
261 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
262 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
263 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
264 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
265 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
266 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
267 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
268 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
269 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
270 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
271 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
272 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
273 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
275 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
276 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
277 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
283 * TUNEABLE PARAMETERS:
286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
287 static driver_t ixgbe_if_driver = {
288 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
291 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
292 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
293 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
295 /* Flow control setting, default to full */
296 static int ixgbe_flow_control = ixgbe_fc_full;
297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
298 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
300 /* Advertise Speed, default to 0 (auto) */
301 static int ixgbe_advertise_speed = 0;
302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
306 * Smart speed setting, default to on
307 * this only works as a compile option
308 * right now as its during attach, set
309 * this to 'ixgbe_smart_speed_off' to
312 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
315 * MSI-X should be the default for best performance,
316 * but this allows it to be forced off for testing.
318 static int ixgbe_enable_msix = 1;
319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
320 "Enable MSI-X interrupts");
323 * Defining this on will allow the use
324 * of unsupported SFP+ modules, note that
325 * doing so you are on your own :)
327 static int allow_unsupported_sfp = false;
328 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
329 &allow_unsupported_sfp, 0,
330 "Allow unsupported SFP modules...use at your own risk");
333 * Not sure if Flow Director is fully baked,
334 * so we'll default to turning it off.
336 static int ixgbe_enable_fdir = 0;
337 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
338 "Enable Flow Director");
340 /* Receive-Side Scaling */
341 static int ixgbe_enable_rss = 1;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
343 "Enable Receive-Side Scaling (RSS)");
346 /* Keep running tab on them for sanity check */
347 static int ixgbe_total_ports;
350 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
353 * For Flow Director: this is the number of TX packets we sample
354 * for the filter pool, this means every 20th packet will be probed.
356 * This feature can be disabled by setting this to 0.
358 static int atr_sample_rate = 20;
360 extern struct if_txrx ixgbe_txrx;
362 static struct if_shared_ctx ixgbe_sctx_init = {
363 .isc_magic = IFLIB_MAGIC,
364 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
365 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
366 .isc_tx_maxsegsize = PAGE_SIZE,
367 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
368 .isc_tso_maxsegsize = PAGE_SIZE,
369 .isc_rx_maxsize = PAGE_SIZE*4,
370 .isc_rx_nsegments = 1,
371 .isc_rx_maxsegsize = PAGE_SIZE*4,
376 .isc_admin_intrcnt = 1,
377 .isc_vendor_info = ixgbe_vendor_info_array,
378 .isc_driver_version = ixgbe_driver_version,
379 .isc_driver = &ixgbe_if_driver,
380 .isc_flags = IFLIB_TSO_INIT_IP,
382 .isc_nrxd_min = {MIN_RXD},
383 .isc_ntxd_min = {MIN_TXD},
384 .isc_nrxd_max = {MAX_RXD},
385 .isc_ntxd_max = {MAX_TXD},
386 .isc_nrxd_default = {DEFAULT_RXD},
387 .isc_ntxd_default = {DEFAULT_TXD},
390 /************************************************************************
391 * ixgbe_if_tx_queues_alloc
392 ************************************************************************/
394 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
395 int ntxqs, int ntxqsets)
397 struct ixgbe_softc *sc = iflib_get_softc(ctx);
398 if_softc_ctx_t scctx = sc->shared;
399 struct ix_tx_queue *que;
402 MPASS(sc->num_tx_queues > 0);
403 MPASS(sc->num_tx_queues == ntxqsets);
406 /* Allocate queue structure memory */
408 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
409 M_IXGBE, M_NOWAIT | M_ZERO);
410 if (!sc->tx_queues) {
411 device_printf(iflib_get_dev(ctx),
412 "Unable to allocate TX ring memory\n");
416 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
417 struct tx_ring *txr = &que->txr;
419 /* In case SR-IOV is enabled, align the index properly */
420 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
423 txr->sc = que->sc = sc;
425 /* Allocate report status array */
426 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
427 if (txr->tx_rsq == NULL) {
431 for (j = 0; j < scctx->isc_ntxd[0]; j++)
432 txr->tx_rsq[j] = QIDX_INVALID;
433 /* get the virtual and physical address of the hardware queues */
434 txr->tail = IXGBE_TDT(txr->me);
435 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
436 txr->tx_paddr = paddrs[i];
439 txr->total_packets = 0;
441 /* Set the rate at which we sample packets */
442 if (sc->feat_en & IXGBE_FEATURE_FDIR)
443 txr->atr_sample = atr_sample_rate;
447 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
453 ixgbe_if_queues_free(ctx);
456 } /* ixgbe_if_tx_queues_alloc */
458 /************************************************************************
459 * ixgbe_if_rx_queues_alloc
460 ************************************************************************/
462 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
463 int nrxqs, int nrxqsets)
465 struct ixgbe_softc *sc = iflib_get_softc(ctx);
466 struct ix_rx_queue *que;
469 MPASS(sc->num_rx_queues > 0);
470 MPASS(sc->num_rx_queues == nrxqsets);
473 /* Allocate queue structure memory */
475 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
476 M_IXGBE, M_NOWAIT | M_ZERO);
477 if (!sc->rx_queues) {
478 device_printf(iflib_get_dev(ctx),
479 "Unable to allocate TX ring memory\n");
483 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
484 struct rx_ring *rxr = &que->rxr;
486 /* In case SR-IOV is enabled, align the index properly */
487 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
490 rxr->sc = que->sc = sc;
492 /* get the virtual and physical address of the hw queues */
493 rxr->tail = IXGBE_RDT(rxr->me);
494 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
495 rxr->rx_paddr = paddrs[i];
500 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
504 } /* ixgbe_if_rx_queues_alloc */
506 /************************************************************************
507 * ixgbe_if_queues_free
508 ************************************************************************/
510 ixgbe_if_queues_free(if_ctx_t ctx)
512 struct ixgbe_softc *sc = iflib_get_softc(ctx);
513 struct ix_tx_queue *tx_que = sc->tx_queues;
514 struct ix_rx_queue *rx_que = sc->rx_queues;
517 if (tx_que != NULL) {
518 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
519 struct tx_ring *txr = &tx_que->txr;
520 if (txr->tx_rsq == NULL)
523 free(txr->tx_rsq, M_IXGBE);
527 free(sc->tx_queues, M_IXGBE);
528 sc->tx_queues = NULL;
530 if (rx_que != NULL) {
531 free(sc->rx_queues, M_IXGBE);
532 sc->rx_queues = NULL;
534 } /* ixgbe_if_queues_free */
536 /************************************************************************
537 * ixgbe_initialize_rss_mapping
538 ************************************************************************/
540 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
542 struct ixgbe_hw *hw = &sc->hw;
543 u32 reta = 0, mrqc, rss_key[10];
544 int queue_id, table_size, index_mult;
548 if (sc->feat_en & IXGBE_FEATURE_RSS) {
549 /* Fetch the configured RSS key */
550 rss_getkey((uint8_t *)&rss_key);
552 /* set up random bits */
553 arc4rand(&rss_key, sizeof(rss_key), 0);
556 /* Set multiplier for RETA setup and table size based on MAC */
559 switch (sc->hw.mac.type) {
560 case ixgbe_mac_82598EB:
564 case ixgbe_mac_X550EM_x:
565 case ixgbe_mac_X550EM_a:
572 /* Set up the redirection table */
573 for (i = 0, j = 0; i < table_size; i++, j++) {
574 if (j == sc->num_rx_queues)
577 if (sc->feat_en & IXGBE_FEATURE_RSS) {
579 * Fetch the RSS bucket id for the given indirection
580 * entry. Cap it at the number of configured buckets
581 * (which is num_rx_queues.)
583 queue_id = rss_get_indirection_to_bucket(i);
584 queue_id = queue_id % sc->num_rx_queues;
586 queue_id = (j * index_mult);
589 * The low 8 bits are for hash value (n+0);
590 * The next 8 bits are for hash value (n+1), etc.
593 reta = reta | (((uint32_t)queue_id) << 24);
596 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
598 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
604 /* Now fill our hash function seeds */
605 for (i = 0; i < 10; i++)
606 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
608 /* Perform hash on these packet types */
609 if (sc->feat_en & IXGBE_FEATURE_RSS)
610 rss_hash_config = rss_gethashconfig();
613 * Disable UDP - IP fragments aren't currently being handled
614 * and so we end up with a mix of 2-tuple and 4-tuple
617 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
618 | RSS_HASHTYPE_RSS_TCP_IPV4
619 | RSS_HASHTYPE_RSS_IPV6
620 | RSS_HASHTYPE_RSS_TCP_IPV6
621 | RSS_HASHTYPE_RSS_IPV6_EX
622 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
625 mrqc = IXGBE_MRQC_RSSEN;
626 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
627 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
628 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
629 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
630 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
631 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
632 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
633 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
634 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
635 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
636 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
637 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
638 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
639 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
640 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
641 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
642 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
643 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
644 mrqc |= ixgbe_get_mrqc(sc->iov_mode);
645 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
646 } /* ixgbe_initialize_rss_mapping */
648 /************************************************************************
649 * ixgbe_initialize_receive_units - Setup receive registers and features.
650 ************************************************************************/
651 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
654 ixgbe_initialize_receive_units(if_ctx_t ctx)
656 struct ixgbe_softc *sc = iflib_get_softc(ctx);
657 if_softc_ctx_t scctx = sc->shared;
658 struct ixgbe_hw *hw = &sc->hw;
659 struct ifnet *ifp = iflib_get_ifp(ctx);
660 struct ix_rx_queue *que;
662 u32 bufsz, fctrl, srrctl, rxcsum;
666 * Make sure receives are disabled while
667 * setting up the descriptor ring
669 ixgbe_disable_rx(hw);
671 /* Enable broadcasts */
672 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
673 fctrl |= IXGBE_FCTRL_BAM;
674 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
675 fctrl |= IXGBE_FCTRL_DPF;
676 fctrl |= IXGBE_FCTRL_PMCF;
678 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
680 /* Set for Jumbo Frames? */
681 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
682 if (ifp->if_mtu > ETHERMTU)
683 hlreg |= IXGBE_HLREG0_JUMBOEN;
685 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
686 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
688 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
689 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
691 /* Setup the Base and Length of the Rx Descriptor Ring */
692 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
693 struct rx_ring *rxr = &que->rxr;
694 u64 rdba = rxr->rx_paddr;
698 /* Setup the Base and Length of the Rx Descriptor Ring */
699 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
700 (rdba & 0x00000000ffffffffULL));
701 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
702 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
703 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
705 /* Set up the SRRCTL register */
706 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
707 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
708 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
710 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
713 * Set DROP_EN iff we have no flow control and >1 queue.
714 * Note that srrctl was cleared shortly before during reset,
715 * so we do not need to clear the bit, but do it just in case
716 * this code is moved elsewhere.
718 if (sc->num_rx_queues > 1 &&
719 sc->hw.fc.requested_mode == ixgbe_fc_none) {
720 srrctl |= IXGBE_SRRCTL_DROP_EN;
722 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
725 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
727 /* Setup the HW Rx Head and Tail Descriptor Pointers */
728 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
729 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
731 /* Set the driver rx tail address */
732 rxr->tail = IXGBE_RDT(rxr->me);
735 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
736 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
737 | IXGBE_PSRTYPE_UDPHDR
738 | IXGBE_PSRTYPE_IPV4HDR
739 | IXGBE_PSRTYPE_IPV6HDR;
740 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
743 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
745 ixgbe_initialize_rss_mapping(sc);
747 if (sc->num_rx_queues > 1) {
748 /* RSS and RX IPP Checksum are mutually exclusive */
749 rxcsum |= IXGBE_RXCSUM_PCSD;
752 if (ifp->if_capenable & IFCAP_RXCSUM)
753 rxcsum |= IXGBE_RXCSUM_PCSD;
755 /* This is useful for calculating UDP/IP fragment checksums */
756 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
757 rxcsum |= IXGBE_RXCSUM_IPPCSE;
759 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
761 } /* ixgbe_initialize_receive_units */
763 /************************************************************************
764 * ixgbe_initialize_transmit_units - Enable transmit units.
765 ************************************************************************/
767 ixgbe_initialize_transmit_units(if_ctx_t ctx)
769 struct ixgbe_softc *sc = iflib_get_softc(ctx);
770 struct ixgbe_hw *hw = &sc->hw;
771 if_softc_ctx_t scctx = sc->shared;
772 struct ix_tx_queue *que;
775 /* Setup the Base and Length of the Tx Descriptor Ring */
776 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
778 struct tx_ring *txr = &que->txr;
779 u64 tdba = txr->tx_paddr;
783 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
784 (tdba & 0x00000000ffffffffULL));
785 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
786 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
787 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
789 /* Setup the HW Tx Head and Tail descriptor pointers */
790 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
791 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
793 /* Cache the tail address */
794 txr->tail = IXGBE_TDT(txr->me);
796 txr->tx_rs_cidx = txr->tx_rs_pidx;
797 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
798 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
799 txr->tx_rsq[k] = QIDX_INVALID;
801 /* Disable Head Writeback */
803 * Note: for X550 series devices, these registers are actually
804 * prefixed with TPH_ isntead of DCA_, but the addresses and
805 * fields remain the same.
807 switch (hw->mac.type) {
808 case ixgbe_mac_82598EB:
809 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
812 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
815 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
816 switch (hw->mac.type) {
817 case ixgbe_mac_82598EB:
818 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
821 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
827 if (hw->mac.type != ixgbe_mac_82598EB) {
828 u32 dmatxctl, rttdcs;
830 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
831 dmatxctl |= IXGBE_DMATXCTL_TE;
832 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
833 /* Disable arbiter to set MTQC */
834 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
835 rttdcs |= IXGBE_RTTDCS_ARBDIS;
836 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
837 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
838 ixgbe_get_mtqc(sc->iov_mode));
839 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
840 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
843 } /* ixgbe_initialize_transmit_units */
845 /************************************************************************
847 ************************************************************************/
849 ixgbe_register(device_t dev)
851 return (&ixgbe_sctx_init);
852 } /* ixgbe_register */
854 /************************************************************************
855 * ixgbe_if_attach_pre - Device initialization routine, part 1
857 * Called when the driver is being loaded.
858 * Identifies the type of hardware, initializes the hardware,
859 * and initializes iflib structures.
861 * return 0 on success, positive on failure
862 ************************************************************************/
864 ixgbe_if_attach_pre(if_ctx_t ctx)
866 struct ixgbe_softc *sc;
868 if_softc_ctx_t scctx;
873 INIT_DEBUGOUT("ixgbe_attach: begin");
875 /* Allocate, clear, and link in our adapter structure */
876 dev = iflib_get_dev(ctx);
877 sc = iflib_get_softc(ctx);
881 scctx = sc->shared = iflib_get_softc_ctx(ctx);
882 sc->media = iflib_get_media(ctx);
885 /* Determine hardware revision */
886 hw->vendor_id = pci_get_vendor(dev);
887 hw->device_id = pci_get_device(dev);
888 hw->revision_id = pci_get_revid(dev);
889 hw->subsystem_vendor_id = pci_get_subvendor(dev);
890 hw->subsystem_device_id = pci_get_subdevice(dev);
892 /* Do base PCI setup - map BAR0 */
893 if (ixgbe_allocate_pci_resources(ctx)) {
894 device_printf(dev, "Allocation of PCI resources failed\n");
898 /* let hardware know driver is loaded */
899 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
900 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
901 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
904 * Initialize the shared code
906 if (ixgbe_init_shared_code(hw) != 0) {
907 device_printf(dev, "Unable to initialize the shared code\n");
912 if (hw->mbx.ops.init_params)
913 hw->mbx.ops.init_params(hw);
915 hw->allow_unsupported_sfp = allow_unsupported_sfp;
917 if (hw->mac.type != ixgbe_mac_82598EB)
918 hw->phy.smart_speed = ixgbe_smart_speed;
920 ixgbe_init_device_features(sc);
922 /* Enable WoL (if supported) */
923 ixgbe_check_wol_support(sc);
925 /* Verify adapter fan is still functional (if applicable) */
926 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
927 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
928 ixgbe_check_fan_failure(sc, esdp, false);
931 /* Ensure SW/FW semaphore is free */
932 ixgbe_init_swfw_semaphore(hw);
934 /* Set an initial default flow control value */
935 hw->fc.requested_mode = ixgbe_flow_control;
937 hw->phy.reset_if_overtemp = true;
938 error = ixgbe_reset_hw(hw);
939 hw->phy.reset_if_overtemp = false;
940 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
942 * No optics in this port, set up
943 * so the timer routine will probe
944 * for later insertion.
946 sc->sfp_probe = true;
948 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
949 device_printf(dev, "Unsupported SFP+ module detected!\n");
953 device_printf(dev, "Hardware initialization failed\n");
958 /* Make sure we have a good EEPROM before we read from it */
959 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
960 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
965 error = ixgbe_start_hw(hw);
967 case IXGBE_ERR_EEPROM_VERSION:
968 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
970 case IXGBE_ERR_SFP_NOT_SUPPORTED:
971 device_printf(dev, "Unsupported SFP+ Module\n");
974 case IXGBE_ERR_SFP_NOT_PRESENT:
975 device_printf(dev, "No SFP+ Module found\n");
981 /* Most of the iflib initialization... */
983 iflib_set_mac(ctx, hw->mac.addr);
984 switch (sc->hw.mac.type) {
986 case ixgbe_mac_X550EM_x:
987 case ixgbe_mac_X550EM_a:
988 scctx->isc_rss_table_size = 512;
989 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
992 scctx->isc_rss_table_size = 128;
993 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
996 /* Allow legacy interrupts */
997 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
999 scctx->isc_txqsizes[0] =
1000 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1001 sizeof(u32), DBA_ALIGN),
1002 scctx->isc_rxqsizes[0] =
1003 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1007 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1008 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1009 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1010 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1012 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1013 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1016 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1018 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1019 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1020 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1022 scctx->isc_txrx = &ixgbe_txrx;
1024 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1029 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1030 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1031 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1032 ixgbe_free_pci_resources(ctx);
1035 } /* ixgbe_if_attach_pre */
1037 /*********************************************************************
1038 * ixgbe_if_attach_post - Device initialization routine, part 2
1040 * Called during driver load, but after interrupts and
1041 * resources have been allocated and configured.
1042 * Sets up some data structures not relevant to iflib.
1044 * return 0 on success, positive on failure
1045 *********************************************************************/
1047 ixgbe_if_attach_post(if_ctx_t ctx)
1050 struct ixgbe_softc *sc;
1051 struct ixgbe_hw *hw;
1054 dev = iflib_get_dev(ctx);
1055 sc = iflib_get_softc(ctx);
1059 if (sc->intr_type == IFLIB_INTR_LEGACY &&
1060 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1061 device_printf(dev, "Device does not support legacy interrupts");
1066 /* Allocate multicast array memory. */
1067 sc->mta = malloc(sizeof(*sc->mta) *
1068 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1069 if (sc->mta == NULL) {
1070 device_printf(dev, "Can not allocate multicast setup array\n");
1075 /* hw.ix defaults init */
1076 ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1078 /* Enable the optics for 82599 SFP+ fiber */
1079 ixgbe_enable_tx_laser(hw);
1081 /* Enable power to the phy. */
1082 ixgbe_set_phy_power(hw, true);
1084 ixgbe_initialize_iov(sc);
1086 error = ixgbe_setup_interface(ctx);
1088 device_printf(dev, "Interface setup failed: %d\n", error);
1092 ixgbe_if_update_admin_status(ctx);
1094 /* Initialize statistics */
1095 ixgbe_update_stats_counters(sc);
1096 ixgbe_add_hw_stats(sc);
1098 /* Check PCIE slot type/speed/width */
1099 ixgbe_get_slot_info(sc);
1102 * Do time init and sysctl init here, but
1103 * only on the first port of a bypass sc.
1105 ixgbe_bypass_init(sc);
1107 /* Display NVM and Option ROM versions */
1108 ixgbe_print_fw_version(ctx);
1110 /* Set an initial dmac value */
1112 /* Set initial advertised speeds (if applicable) */
1113 sc->advertise = ixgbe_get_advertise(sc);
1115 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1116 ixgbe_define_iov_schemas(dev, &error);
1119 ixgbe_add_device_sysctls(ctx);
1124 } /* ixgbe_if_attach_post */
1126 /************************************************************************
1127 * ixgbe_check_wol_support
1129 * Checks whether the adapter's ports are capable of
1130 * Wake On LAN by reading the adapter's NVM.
1132 * Sets each port's hw->wol_enabled value depending
1133 * on the value read here.
1134 ************************************************************************/
1136 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1138 struct ixgbe_hw *hw = &sc->hw;
1141 /* Find out WoL support for port */
1142 sc->wol_support = hw->wol_enabled = 0;
1143 ixgbe_get_device_caps(hw, &dev_caps);
1144 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1145 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1147 sc->wol_support = hw->wol_enabled = 1;
1149 /* Save initial wake up filter configuration */
1150 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1153 } /* ixgbe_check_wol_support */
1155 /************************************************************************
1156 * ixgbe_setup_interface
1158 * Setup networking device structure and register an interface.
1159 ************************************************************************/
1161 ixgbe_setup_interface(if_ctx_t ctx)
1163 struct ifnet *ifp = iflib_get_ifp(ctx);
1164 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1166 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1168 if_setbaudrate(ifp, IF_Gbps(10));
1170 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1172 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1174 ixgbe_add_media_types(ctx);
1176 /* Autoselect media by default */
1177 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1180 } /* ixgbe_setup_interface */
1182 /************************************************************************
1183 * ixgbe_if_get_counter
1184 ************************************************************************/
1186 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1188 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1189 if_t ifp = iflib_get_ifp(ctx);
1192 case IFCOUNTER_IPACKETS:
1193 return (sc->ipackets);
1194 case IFCOUNTER_OPACKETS:
1195 return (sc->opackets);
1196 case IFCOUNTER_IBYTES:
1197 return (sc->ibytes);
1198 case IFCOUNTER_OBYTES:
1199 return (sc->obytes);
1200 case IFCOUNTER_IMCASTS:
1201 return (sc->imcasts);
1202 case IFCOUNTER_OMCASTS:
1203 return (sc->omcasts);
1204 case IFCOUNTER_COLLISIONS:
1206 case IFCOUNTER_IQDROPS:
1207 return (sc->iqdrops);
1208 case IFCOUNTER_OQDROPS:
1210 case IFCOUNTER_IERRORS:
1211 return (sc->ierrors);
1213 return (if_get_counter_default(ifp, cnt));
1215 } /* ixgbe_if_get_counter */
1217 /************************************************************************
1219 ************************************************************************/
1221 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1223 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1224 struct ixgbe_hw *hw = &sc->hw;
1228 if (hw->phy.ops.read_i2c_byte == NULL)
1230 for (i = 0; i < req->len; i++)
1231 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1232 req->dev_addr, &req->data[i]);
1234 } /* ixgbe_if_i2c_req */
1236 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1237 * @ctx: iflib context
1238 * @event: event code to check
1240 * Defaults to returning true for unknown events.
1242 * @returns true if iflib needs to reinit the interface
1245 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1248 case IFLIB_RESTART_VLAN_CONFIG:
1255 /************************************************************************
1256 * ixgbe_add_media_types
1257 ************************************************************************/
1259 ixgbe_add_media_types(if_ctx_t ctx)
1261 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1262 struct ixgbe_hw *hw = &sc->hw;
1263 device_t dev = iflib_get_dev(ctx);
1266 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1268 /* Media types with matching FreeBSD media defines */
1269 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1270 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1271 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1272 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1273 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1274 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1275 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1276 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1278 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1279 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1280 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1283 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1284 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1285 if (hw->phy.multispeed_fiber)
1286 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1289 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1290 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1291 if (hw->phy.multispeed_fiber)
1292 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1294 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1295 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1296 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1297 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1299 #ifdef IFM_ETH_XTYPE
1300 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1301 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1302 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1303 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1304 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1305 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1306 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1307 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1309 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1310 device_printf(dev, "Media supported: 10GbaseKR\n");
1311 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1312 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1314 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1315 device_printf(dev, "Media supported: 10GbaseKX4\n");
1316 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1317 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1319 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1320 device_printf(dev, "Media supported: 1000baseKX\n");
1321 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1322 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1324 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1325 device_printf(dev, "Media supported: 2500baseKX\n");
1326 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1327 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1330 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1331 device_printf(dev, "Media supported: 1000baseBX\n");
1333 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1334 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1336 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1339 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1340 } /* ixgbe_add_media_types */
1342 /************************************************************************
1344 ************************************************************************/
1346 ixgbe_is_sfp(struct ixgbe_hw *hw)
1348 switch (hw->mac.type) {
1349 case ixgbe_mac_82598EB:
1350 if (hw->phy.type == ixgbe_phy_nl)
1353 case ixgbe_mac_82599EB:
1354 switch (hw->mac.ops.get_media_type(hw)) {
1355 case ixgbe_media_type_fiber:
1356 case ixgbe_media_type_fiber_qsfp:
1361 case ixgbe_mac_X550EM_x:
1362 case ixgbe_mac_X550EM_a:
1363 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1369 } /* ixgbe_is_sfp */
1371 /************************************************************************
1373 ************************************************************************/
1375 ixgbe_config_link(if_ctx_t ctx)
1377 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1378 struct ixgbe_hw *hw = &sc->hw;
1379 u32 autoneg, err = 0;
1380 bool sfp, negotiate;
1382 sfp = ixgbe_is_sfp(hw);
1385 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1386 iflib_admin_intr_deferred(ctx);
1388 if (hw->mac.ops.check_link)
1389 err = ixgbe_check_link(hw, &sc->link_speed,
1390 &sc->link_up, false);
1393 autoneg = hw->phy.autoneg_advertised;
1394 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1395 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1399 if (hw->mac.ops.setup_link)
1400 err = hw->mac.ops.setup_link(hw, autoneg,
1403 } /* ixgbe_config_link */
1405 /************************************************************************
1406 * ixgbe_update_stats_counters - Update board statistics counters.
1407 ************************************************************************/
1409 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1411 struct ixgbe_hw *hw = &sc->hw;
1412 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1413 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1415 u64 total_missed_rx = 0;
1417 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1418 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1419 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1420 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1421 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1423 for (int i = 0; i < 16; i++) {
1424 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1425 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1426 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1428 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1429 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1430 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1432 /* Hardware workaround, gprc counts missed packets */
1433 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1434 stats->gprc -= missed_rx;
1436 if (hw->mac.type != ixgbe_mac_82598EB) {
1437 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1438 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1439 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1440 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1441 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1442 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1443 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1444 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1445 stats->lxoffrxc += lxoffrxc;
1447 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1448 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1449 stats->lxoffrxc += lxoffrxc;
1450 /* 82598 only has a counter in the high register */
1451 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1452 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1453 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1457 * For watchdog management we need to know if we have been paused
1458 * during the last interval, so capture that here.
1461 sc->shared->isc_pause_frames = 1;
1464 * Workaround: mprc hardware is incorrectly counting
1465 * broadcasts, so for now we subtract those.
1467 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1468 stats->bprc += bprc;
1469 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1470 if (hw->mac.type == ixgbe_mac_82598EB)
1471 stats->mprc -= bprc;
1473 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1474 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1475 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1476 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1477 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1478 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1480 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1481 stats->lxontxc += lxon;
1482 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1483 stats->lxofftxc += lxoff;
1484 total = lxon + lxoff;
1486 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1487 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1488 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1489 stats->gptc -= total;
1490 stats->mptc -= total;
1491 stats->ptc64 -= total;
1492 stats->gotc -= total * ETHER_MIN_LEN;
1494 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1495 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1496 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1497 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1498 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1499 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1500 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1501 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1502 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1503 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1504 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1505 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1506 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1507 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1508 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1509 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1510 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1511 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1512 /* Only read FCOE on 82599 */
1513 if (hw->mac.type != ixgbe_mac_82598EB) {
1514 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1515 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1516 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1517 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1518 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1521 /* Fill out the OS statistics structure */
1522 IXGBE_SET_IPACKETS(sc, stats->gprc);
1523 IXGBE_SET_OPACKETS(sc, stats->gptc);
1524 IXGBE_SET_IBYTES(sc, stats->gorc);
1525 IXGBE_SET_OBYTES(sc, stats->gotc);
1526 IXGBE_SET_IMCASTS(sc, stats->mprc);
1527 IXGBE_SET_OMCASTS(sc, stats->mptc);
1528 IXGBE_SET_COLLISIONS(sc, 0);
1529 IXGBE_SET_IQDROPS(sc, total_missed_rx);
1532 * Aggregate following types of errors as RX errors:
1533 * - CRC error count,
1534 * - illegal byte error count,
1535 * - checksum error count,
1536 * - missed packets count,
1537 * - length error count,
1538 * - undersized packets count,
1539 * - fragmented packets count,
1540 * - oversized packets count,
1543 * Ignore XEC errors for 82599 to workaround errata about
1544 * UDP frames with zero checksum.
1546 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1547 (hw->mac.type != ixgbe_mac_82599EB ? stats->xec : 0) +
1548 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1550 } /* ixgbe_update_stats_counters */
1552 /************************************************************************
1553 * ixgbe_add_hw_stats
1555 * Add sysctl variables, one per statistic, to the system.
1556 ************************************************************************/
1558 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1560 device_t dev = iflib_get_dev(sc->ctx);
1561 struct ix_rx_queue *rx_que;
1562 struct ix_tx_queue *tx_que;
1563 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1564 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1565 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1566 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1567 struct sysctl_oid *stat_node, *queue_node;
1568 struct sysctl_oid_list *stat_list, *queue_list;
1571 #define QUEUE_NAME_LEN 32
1572 char namebuf[QUEUE_NAME_LEN];
1574 /* Driver Statistics */
1575 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1576 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1577 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1578 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1579 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1580 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1582 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
1583 struct tx_ring *txr = &tx_que->txr;
1584 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1585 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1586 CTLFLAG_RD, NULL, "Queue Name");
1587 queue_list = SYSCTL_CHILDREN(queue_node);
1589 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1590 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1591 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1592 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1593 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1594 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1595 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1596 CTLFLAG_RD, &txr->tso_tx, "TSO");
1597 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1598 CTLFLAG_RD, &txr->total_packets,
1599 "Queue Packets Transmitted");
1602 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
1603 struct rx_ring *rxr = &rx_que->rxr;
1604 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1605 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1606 CTLFLAG_RD, NULL, "Queue Name");
1607 queue_list = SYSCTL_CHILDREN(queue_node);
1609 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1610 CTLTYPE_UINT | CTLFLAG_RW, &sc->rx_queues[i], 0,
1611 ixgbe_sysctl_interrupt_rate_handler, "IU",
1613 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1614 CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1615 "irqs on this queue");
1616 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1617 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1618 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1619 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1620 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1621 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1622 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1623 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1624 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1625 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1626 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1627 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1628 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1629 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1632 /* MAC stats get their own sub node */
1634 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1635 CTLFLAG_RD, NULL, "MAC Statistics");
1636 stat_list = SYSCTL_CHILDREN(stat_node);
1638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1639 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1640 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1641 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1642 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1643 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1645 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1647 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1648 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1649 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1651 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1653 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1654 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1655 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1657 /* Flow Control stats */
1658 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1659 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1660 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1661 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1663 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1664 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1665 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1667 /* Packet Reception Stats */
1668 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1669 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1670 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1671 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1672 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1673 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1674 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1675 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1676 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1677 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1678 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1679 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1681 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1683 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1684 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1685 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1686 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1687 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1689 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1690 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1691 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1692 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1693 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1694 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1695 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1696 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1697 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1698 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1699 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1700 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1701 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1702 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1703 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1704 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1705 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1707 /* Packet Transmission Stats */
1708 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1709 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1710 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1711 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1712 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1713 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1714 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1715 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1716 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1717 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1718 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1719 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1720 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1721 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1722 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1723 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1724 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1725 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1726 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1727 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1728 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1729 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1730 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1731 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1732 } /* ixgbe_add_hw_stats */
1734 /************************************************************************
1735 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1737 * Retrieves the TDH value from the hardware
1738 ************************************************************************/
1740 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1742 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1749 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1750 error = sysctl_handle_int(oidp, &val, 0, req);
1751 if (error || !req->newptr)
1755 } /* ixgbe_sysctl_tdh_handler */
1757 /************************************************************************
1758 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1760 * Retrieves the TDT value from the hardware
1761 ************************************************************************/
1763 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1765 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1772 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1773 error = sysctl_handle_int(oidp, &val, 0, req);
1774 if (error || !req->newptr)
1778 } /* ixgbe_sysctl_tdt_handler */
1780 /************************************************************************
1781 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1783 * Retrieves the RDH value from the hardware
1784 ************************************************************************/
1786 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1788 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1795 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
1796 error = sysctl_handle_int(oidp, &val, 0, req);
1797 if (error || !req->newptr)
1801 } /* ixgbe_sysctl_rdh_handler */
1803 /************************************************************************
1804 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1806 * Retrieves the RDT value from the hardware
1807 ************************************************************************/
1809 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1811 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1818 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
1819 error = sysctl_handle_int(oidp, &val, 0, req);
1820 if (error || !req->newptr)
1824 } /* ixgbe_sysctl_rdt_handler */
1826 /************************************************************************
1827 * ixgbe_if_vlan_register
1829 * Run via vlan config EVENT, it enables us to use the
1830 * HW Filter table since we can get the vlan id. This
1831 * just creates the entry in the soft version of the
1832 * VFTA, init will repopulate the real table.
1833 ************************************************************************/
1835 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1837 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1840 index = (vtag >> 5) & 0x7F;
1842 sc->shadow_vfta[index] |= (1 << bit);
1844 ixgbe_setup_vlan_hw_support(ctx);
1845 } /* ixgbe_if_vlan_register */
1847 /************************************************************************
1848 * ixgbe_if_vlan_unregister
1850 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1851 ************************************************************************/
1853 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1855 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1858 index = (vtag >> 5) & 0x7F;
1860 sc->shadow_vfta[index] &= ~(1 << bit);
1862 /* Re-init to load the changes */
1863 ixgbe_setup_vlan_hw_support(ctx);
1864 } /* ixgbe_if_vlan_unregister */
1866 /************************************************************************
1867 * ixgbe_setup_vlan_hw_support
1868 ************************************************************************/
1870 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1872 struct ifnet *ifp = iflib_get_ifp(ctx);
1873 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1874 struct ixgbe_hw *hw = &sc->hw;
1875 struct rx_ring *rxr;
1881 * We get here thru init_locked, meaning
1882 * a soft reset, this has already cleared
1883 * the VFTA and other state, so if there
1884 * have been no vlan's registered do nothing.
1886 if (sc->num_vlans == 0)
1889 /* Setup the queues for vlans */
1890 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1891 for (i = 0; i < sc->num_rx_queues; i++) {
1892 rxr = &sc->rx_queues[i].rxr;
1893 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1894 if (hw->mac.type != ixgbe_mac_82598EB) {
1895 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1896 ctrl |= IXGBE_RXDCTL_VME;
1897 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1899 rxr->vtag_strip = true;
1903 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1906 * A soft reset zero's out the VFTA, so
1907 * we need to repopulate it now.
1909 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1910 if (sc->shadow_vfta[i] != 0)
1911 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1912 sc->shadow_vfta[i]);
1914 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1915 /* Enable the Filter Table if enabled */
1916 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1917 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1918 ctrl |= IXGBE_VLNCTRL_VFE;
1920 if (hw->mac.type == ixgbe_mac_82598EB)
1921 ctrl |= IXGBE_VLNCTRL_VME;
1922 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1923 } /* ixgbe_setup_vlan_hw_support */
1925 /************************************************************************
1926 * ixgbe_get_slot_info
1928 * Get the width and transaction speed of
1929 * the slot this adapter is plugged into.
1930 ************************************************************************/
1932 ixgbe_get_slot_info(struct ixgbe_softc *sc)
1934 device_t dev = iflib_get_dev(sc->ctx);
1935 struct ixgbe_hw *hw = &sc->hw;
1936 int bus_info_valid = true;
1940 /* Some devices are behind an internal bridge */
1941 switch (hw->device_id) {
1942 case IXGBE_DEV_ID_82599_SFP_SF_QP:
1943 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1944 goto get_parent_info;
1949 ixgbe_get_bus_info(hw);
1952 * Some devices don't use PCI-E, but there is no need
1953 * to display "Unknown" for bus speed and width.
1955 switch (hw->mac.type) {
1956 case ixgbe_mac_X550EM_x:
1957 case ixgbe_mac_X550EM_a:
1965 * For the Quad port adapter we need to parse back
1966 * up the PCI tree to find the speed of the expansion
1967 * slot into which this adapter is plugged. A bit more work.
1969 dev = device_get_parent(device_get_parent(dev));
1971 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1972 pci_get_slot(dev), pci_get_function(dev));
1974 dev = device_get_parent(device_get_parent(dev));
1976 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1977 pci_get_slot(dev), pci_get_function(dev));
1979 /* Now get the PCI Express Capabilities offset */
1980 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1982 * Hmm...can't get PCI-Express capabilities.
1983 * Falling back to default method.
1985 bus_info_valid = false;
1986 ixgbe_get_bus_info(hw);
1989 /* ...and read the Link Status Register */
1990 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1991 ixgbe_set_pci_config_data_generic(hw, link);
1994 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1995 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
1996 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
1997 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
1999 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2000 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2001 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2004 if (bus_info_valid) {
2005 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2006 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2007 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2008 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2009 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2011 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2012 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2013 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2014 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2015 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2018 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2021 } /* ixgbe_get_slot_info */
2023 /************************************************************************
2024 * ixgbe_if_msix_intr_assign
2026 * Setup MSI-X Interrupt resources and handlers
2027 ************************************************************************/
2029 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2031 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2032 struct ix_rx_queue *rx_que = sc->rx_queues;
2033 struct ix_tx_queue *tx_que;
2034 int error, rid, vector = 0;
2038 /* Admin Que is vector 0*/
2040 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2043 snprintf(buf, sizeof(buf), "rxq%d", i);
2044 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2045 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2048 device_printf(iflib_get_dev(ctx),
2049 "Failed to allocate que int %d err: %d", i, error);
2050 sc->num_rx_queues = i + 1;
2054 rx_que->msix = vector;
2055 if (sc->feat_en & IXGBE_FEATURE_RSS) {
2057 * The queue ID is used as the RSS layer bucket ID.
2058 * We look up the queue ID -> RSS CPU ID and select
2061 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2064 * Bind the MSI-X vector, and thus the
2065 * rings to the corresponding cpu.
2067 * This just happens to match the default RSS
2068 * round-robin bucket -> queue -> CPU allocation.
2070 if (sc->num_rx_queues > 1)
2075 for (int i = 0; i < sc->num_tx_queues; i++) {
2076 snprintf(buf, sizeof(buf), "txq%d", i);
2077 tx_que = &sc->tx_queues[i];
2078 tx_que->msix = i % sc->num_rx_queues;
2079 iflib_softirq_alloc_generic(ctx,
2080 &sc->rx_queues[tx_que->msix].que_irq,
2081 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2084 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2085 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2087 device_printf(iflib_get_dev(ctx),
2088 "Failed to register admin handler");
2092 sc->vector = vector;
2096 iflib_irq_free(ctx, &sc->irq);
2097 rx_que = sc->rx_queues;
2098 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2099 iflib_irq_free(ctx, &rx_que->que_irq);
2102 } /* ixgbe_if_msix_intr_assign */
2104 /*********************************************************************
2105 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2106 **********************************************************************/
2108 ixgbe_msix_que(void *arg)
2110 struct ix_rx_queue *que = arg;
2111 struct ixgbe_softc *sc = que->sc;
2112 struct ifnet *ifp = iflib_get_ifp(que->sc->ctx);
2114 /* Protect against spurious interrupts */
2115 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2116 return (FILTER_HANDLED);
2118 ixgbe_disable_queue(sc, que->msix);
2121 return (FILTER_SCHEDULE_THREAD);
2122 } /* ixgbe_msix_que */
2124 /************************************************************************
2125 * ixgbe_media_status - Media Ioctl callback
2127 * Called whenever the user queries the status of
2128 * the interface using ifconfig.
2129 ************************************************************************/
2131 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2133 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2134 struct ixgbe_hw *hw = &sc->hw;
2137 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2139 ifmr->ifm_status = IFM_AVALID;
2140 ifmr->ifm_active = IFM_ETHER;
2142 if (!sc->link_active)
2145 ifmr->ifm_status |= IFM_ACTIVE;
2146 layer = sc->phy_layer;
2148 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2149 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2150 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2151 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2152 switch (sc->link_speed) {
2153 case IXGBE_LINK_SPEED_10GB_FULL:
2154 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2156 case IXGBE_LINK_SPEED_1GB_FULL:
2157 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2159 case IXGBE_LINK_SPEED_100_FULL:
2160 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2162 case IXGBE_LINK_SPEED_10_FULL:
2163 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2166 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2167 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2168 switch (sc->link_speed) {
2169 case IXGBE_LINK_SPEED_10GB_FULL:
2170 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2173 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2174 switch (sc->link_speed) {
2175 case IXGBE_LINK_SPEED_10GB_FULL:
2176 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2178 case IXGBE_LINK_SPEED_1GB_FULL:
2179 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2182 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2183 switch (sc->link_speed) {
2184 case IXGBE_LINK_SPEED_10GB_FULL:
2185 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2187 case IXGBE_LINK_SPEED_1GB_FULL:
2188 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2191 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2192 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2193 switch (sc->link_speed) {
2194 case IXGBE_LINK_SPEED_10GB_FULL:
2195 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2197 case IXGBE_LINK_SPEED_1GB_FULL:
2198 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2201 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2202 switch (sc->link_speed) {
2203 case IXGBE_LINK_SPEED_10GB_FULL:
2204 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2208 * XXX: These need to use the proper media types once
2211 #ifndef IFM_ETH_XTYPE
2212 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2213 switch (sc->link_speed) {
2214 case IXGBE_LINK_SPEED_10GB_FULL:
2215 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2217 case IXGBE_LINK_SPEED_2_5GB_FULL:
2218 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2220 case IXGBE_LINK_SPEED_1GB_FULL:
2221 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2224 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2225 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2226 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2227 switch (sc->link_speed) {
2228 case IXGBE_LINK_SPEED_10GB_FULL:
2229 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2231 case IXGBE_LINK_SPEED_2_5GB_FULL:
2232 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2234 case IXGBE_LINK_SPEED_1GB_FULL:
2235 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2239 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2240 switch (sc->link_speed) {
2241 case IXGBE_LINK_SPEED_10GB_FULL:
2242 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2244 case IXGBE_LINK_SPEED_2_5GB_FULL:
2245 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2247 case IXGBE_LINK_SPEED_1GB_FULL:
2248 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2251 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2252 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2253 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2254 switch (sc->link_speed) {
2255 case IXGBE_LINK_SPEED_10GB_FULL:
2256 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2258 case IXGBE_LINK_SPEED_2_5GB_FULL:
2259 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2261 case IXGBE_LINK_SPEED_1GB_FULL:
2262 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2267 /* If nothing is recognized... */
2268 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2269 ifmr->ifm_active |= IFM_UNKNOWN;
2271 /* Display current flow control setting used on link */
2272 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2273 hw->fc.current_mode == ixgbe_fc_full)
2274 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2275 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2276 hw->fc.current_mode == ixgbe_fc_full)
2277 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2278 } /* ixgbe_media_status */
2280 /************************************************************************
2281 * ixgbe_media_change - Media Ioctl callback
2283 * Called when the user changes speed/duplex using
2284 * media/mediopt option with ifconfig.
2285 ************************************************************************/
2287 ixgbe_if_media_change(if_ctx_t ctx)
2289 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2290 struct ifmedia *ifm = iflib_get_media(ctx);
2291 struct ixgbe_hw *hw = &sc->hw;
2292 ixgbe_link_speed speed = 0;
2294 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2296 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2299 if (hw->phy.media_type == ixgbe_media_type_backplane)
2303 * We don't actually need to check against the supported
2304 * media types of the adapter; ifmedia will take care of
2307 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2310 speed |= IXGBE_LINK_SPEED_100_FULL;
2311 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2312 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2316 #ifndef IFM_ETH_XTYPE
2317 case IFM_10G_SR: /* KR, too */
2318 case IFM_10G_CX4: /* KX4 */
2323 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2324 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2326 #ifndef IFM_ETH_XTYPE
2327 case IFM_1000_CX: /* KX */
2333 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2336 speed |= IXGBE_LINK_SPEED_100_FULL;
2337 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2339 case IFM_10G_TWINAX:
2340 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2343 speed |= IXGBE_LINK_SPEED_100_FULL;
2346 speed |= IXGBE_LINK_SPEED_10_FULL;
2352 hw->mac.autotry_restart = true;
2353 hw->mac.ops.setup_link(hw, speed, true);
2355 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2356 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
2357 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
2358 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
2363 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2366 } /* ixgbe_if_media_change */
2368 /************************************************************************
2370 ************************************************************************/
2372 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2374 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2375 struct ifnet *ifp = iflib_get_ifp(ctx);
2379 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2380 rctl &= (~IXGBE_FCTRL_UPE);
2381 if (ifp->if_flags & IFF_ALLMULTI)
2382 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2384 mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2386 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2387 rctl &= (~IXGBE_FCTRL_MPE);
2388 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2390 if (ifp->if_flags & IFF_PROMISC) {
2391 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2392 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2393 } else if (ifp->if_flags & IFF_ALLMULTI) {
2394 rctl |= IXGBE_FCTRL_MPE;
2395 rctl &= ~IXGBE_FCTRL_UPE;
2396 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2399 } /* ixgbe_if_promisc_set */
2401 /************************************************************************
2402 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2403 ************************************************************************/
2405 ixgbe_msix_link(void *arg)
2407 struct ixgbe_softc *sc = arg;
2408 struct ixgbe_hw *hw = &sc->hw;
2409 u32 eicr, eicr_mask;
2414 /* Pause other interrupts */
2415 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2417 /* First get the cause */
2418 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2419 /* Be sure the queue bits are not cleared */
2420 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2421 /* Clear interrupt with write */
2422 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2424 /* Link status change */
2425 if (eicr & IXGBE_EICR_LSC) {
2426 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2427 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2430 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2431 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2432 (eicr & IXGBE_EICR_FLOW_DIR)) {
2433 /* This is probably overkill :) */
2434 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2435 return (FILTER_HANDLED);
2436 /* Disable the interrupt */
2437 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2438 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2440 if (eicr & IXGBE_EICR_ECC) {
2441 device_printf(iflib_get_dev(sc->ctx),
2442 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2443 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2446 /* Check for over temp condition */
2447 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2448 switch (sc->hw.mac.type) {
2449 case ixgbe_mac_X550EM_a:
2450 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2452 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2453 IXGBE_EICR_GPI_SDP0_X550EM_a);
2454 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2455 IXGBE_EICR_GPI_SDP0_X550EM_a);
2456 retval = hw->phy.ops.check_overtemp(hw);
2457 if (retval != IXGBE_ERR_OVERTEMP)
2459 device_printf(iflib_get_dev(sc->ctx),
2460 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2461 device_printf(iflib_get_dev(sc->ctx),
2462 "System shutdown required!\n");
2465 if (!(eicr & IXGBE_EICR_TS))
2467 retval = hw->phy.ops.check_overtemp(hw);
2468 if (retval != IXGBE_ERR_OVERTEMP)
2470 device_printf(iflib_get_dev(sc->ctx),
2471 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2472 device_printf(iflib_get_dev(sc->ctx),
2473 "System shutdown required!\n");
2474 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2479 /* Check for VF message */
2480 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2481 (eicr & IXGBE_EICR_MAILBOX))
2482 sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2485 if (ixgbe_is_sfp(hw)) {
2486 /* Pluggable optics-related interrupt */
2487 if (hw->mac.type >= ixgbe_mac_X540)
2488 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2490 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2492 if (eicr & eicr_mask) {
2493 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2494 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2497 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2498 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2499 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2500 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2501 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2505 /* Check for fan failure */
2506 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2507 ixgbe_check_fan_failure(sc, eicr, true);
2508 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2511 /* External PHY interrupt */
2512 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2513 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2514 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2515 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2518 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2519 } /* ixgbe_msix_link */
2521 /************************************************************************
2522 * ixgbe_sysctl_interrupt_rate_handler
2523 ************************************************************************/
2525 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2527 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2529 unsigned int reg, usec, rate;
2531 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2532 usec = ((reg & 0x0FF8) >> 3);
2534 rate = 500000 / usec;
2537 error = sysctl_handle_int(oidp, &rate, 0, req);
2538 if (error || !req->newptr)
2540 reg &= ~0xfff; /* default, no limitation */
2541 ixgbe_max_interrupt_rate = 0;
2542 if (rate > 0 && rate < 500000) {
2545 ixgbe_max_interrupt_rate = rate;
2546 reg |= ((4000000/rate) & 0xff8);
2548 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2551 } /* ixgbe_sysctl_interrupt_rate_handler */
2553 /************************************************************************
2554 * ixgbe_add_device_sysctls
2555 ************************************************************************/
2557 ixgbe_add_device_sysctls(if_ctx_t ctx)
2559 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2560 device_t dev = iflib_get_dev(ctx);
2561 struct ixgbe_hw *hw = &sc->hw;
2562 struct sysctl_oid_list *child;
2563 struct sysctl_ctx_list *ctx_list;
2565 ctx_list = device_get_sysctl_ctx(dev);
2566 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2568 /* Sysctls for all devices */
2569 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2570 CTLTYPE_INT | CTLFLAG_RW, sc, 0, ixgbe_sysctl_flowcntl, "I",
2571 IXGBE_SYSCTL_DESC_SET_FC);
2573 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2574 CTLTYPE_INT | CTLFLAG_RW, sc, 0, ixgbe_sysctl_advertise, "I",
2575 IXGBE_SYSCTL_DESC_ADV_SPEED);
2577 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2578 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2579 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2582 /* testing sysctls (for all devices) */
2583 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2584 CTLTYPE_INT | CTLFLAG_RW, sc, 0, ixgbe_sysctl_power_state,
2585 "I", "PCI Power State");
2587 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2588 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2589 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2591 /* for X550 series devices */
2592 if (hw->mac.type >= ixgbe_mac_X550)
2593 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2594 CTLTYPE_U16 | CTLFLAG_RW, sc, 0, ixgbe_sysctl_dmac,
2595 "I", "DMA Coalesce");
2597 /* for WoL-capable devices */
2598 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2599 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2600 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2601 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2603 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2604 CTLTYPE_U32 | CTLFLAG_RW, sc, 0, ixgbe_sysctl_wufc,
2605 "I", "Enable/Disable Wake Up Filters");
2608 /* for X552/X557-AT devices */
2609 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2610 struct sysctl_oid *phy_node;
2611 struct sysctl_oid_list *phy_list;
2613 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2614 CTLFLAG_RD, NULL, "External PHY sysctls");
2615 phy_list = SYSCTL_CHILDREN(phy_node);
2617 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2618 CTLTYPE_U16 | CTLFLAG_RD, sc, 0, ixgbe_sysctl_phy_temp,
2619 "I", "Current External PHY Temperature (Celsius)");
2621 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2622 "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
2623 ixgbe_sysctl_phy_overtemp_occurred, "I",
2624 "External PHY High Temperature Event Occurred");
2627 if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2628 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2629 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2630 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2632 } /* ixgbe_add_device_sysctls */
2634 /************************************************************************
2635 * ixgbe_allocate_pci_resources
2636 ************************************************************************/
2638 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2640 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2641 device_t dev = iflib_get_dev(ctx);
2645 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2648 if (!(sc->pci_mem)) {
2649 device_printf(dev, "Unable to allocate bus resource: memory\n");
2653 /* Save bus_space values for READ/WRITE_REG macros */
2654 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
2655 sc->osdep.mem_bus_space_handle =
2656 rman_get_bushandle(sc->pci_mem);
2657 /* Set hw values for shared code */
2658 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
2661 } /* ixgbe_allocate_pci_resources */
2663 /************************************************************************
2664 * ixgbe_detach - Device removal routine
2666 * Called when the driver is being removed.
2667 * Stops the adapter and deallocates all the resources
2668 * that were allocated for driver operation.
2670 * return 0 on success, positive on failure
2671 ************************************************************************/
2673 ixgbe_if_detach(if_ctx_t ctx)
2675 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2676 device_t dev = iflib_get_dev(ctx);
2679 INIT_DEBUGOUT("ixgbe_detach: begin");
2681 if (ixgbe_pci_iov_detach(dev) != 0) {
2682 device_printf(dev, "SR-IOV in use; detach first.\n");
2686 ixgbe_setup_low_power_mode(ctx);
2688 /* let hardware know driver is unloading */
2689 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
2690 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2691 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
2693 ixgbe_free_pci_resources(ctx);
2694 free(sc->mta, M_IXGBE);
2697 } /* ixgbe_if_detach */
2699 /************************************************************************
2700 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2702 * Prepare the adapter/port for LPLU and/or WoL
2703 ************************************************************************/
2705 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2707 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2708 struct ixgbe_hw *hw = &sc->hw;
2709 device_t dev = iflib_get_dev(ctx);
2712 if (!hw->wol_enabled)
2713 ixgbe_set_phy_power(hw, false);
2715 /* Limit power management flow to X550EM baseT */
2716 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2717 hw->phy.ops.enter_lplu) {
2718 /* Turn off support for APM wakeup. (Using ACPI instead) */
2719 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2720 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2723 * Clear Wake Up Status register to prevent any previous wakeup
2724 * events from waking us up immediately after we suspend.
2726 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2729 * Program the Wakeup Filter Control register with user filter
2732 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
2734 /* Enable wakeups and power management in Wakeup Control */
2735 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2736 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2738 /* X550EM baseT adapters need a special LPLU flow */
2739 hw->phy.reset_disable = true;
2741 error = hw->phy.ops.enter_lplu(hw);
2743 device_printf(dev, "Error entering LPLU: %d\n", error);
2744 hw->phy.reset_disable = false;
2746 /* Just stop for other adapters */
2751 } /* ixgbe_setup_low_power_mode */
2753 /************************************************************************
2754 * ixgbe_shutdown - Shutdown entry point
2755 ************************************************************************/
2757 ixgbe_if_shutdown(if_ctx_t ctx)
2761 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2763 error = ixgbe_setup_low_power_mode(ctx);
2766 } /* ixgbe_if_shutdown */
2768 /************************************************************************
2772 ************************************************************************/
2774 ixgbe_if_suspend(if_ctx_t ctx)
2778 INIT_DEBUGOUT("ixgbe_suspend: begin");
2780 error = ixgbe_setup_low_power_mode(ctx);
2783 } /* ixgbe_if_suspend */
2785 /************************************************************************
2789 ************************************************************************/
2791 ixgbe_if_resume(if_ctx_t ctx)
2793 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2794 device_t dev = iflib_get_dev(ctx);
2795 struct ifnet *ifp = iflib_get_ifp(ctx);
2796 struct ixgbe_hw *hw = &sc->hw;
2799 INIT_DEBUGOUT("ixgbe_resume: begin");
2801 /* Read & clear WUS register */
2802 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2804 device_printf(dev, "Woken up by (WUS): %#010x\n",
2805 IXGBE_READ_REG(hw, IXGBE_WUS));
2806 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2807 /* And clear WUFC until next low-power transition */
2808 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2811 * Required after D3->D0 transition;
2812 * will re-advertise all previous advertised speeds
2814 if (ifp->if_flags & IFF_UP)
2818 } /* ixgbe_if_resume */
2820 /************************************************************************
2821 * ixgbe_if_mtu_set - Ioctl mtu entry point
2823 * Return 0 on success, EINVAL on failure
2824 ************************************************************************/
2826 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2828 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2831 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2833 if (mtu > IXGBE_MAX_MTU) {
2836 sc->max_frame_size = mtu + IXGBE_MTU_HDR;
2840 } /* ixgbe_if_mtu_set */
2842 /************************************************************************
2843 * ixgbe_if_crcstrip_set
2844 ************************************************************************/
2846 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2848 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2849 struct ixgbe_hw *hw = &sc->hw;
2850 /* crc stripping is set in two places:
2851 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2852 * IXGBE_RDRXCTL (set by the original driver in
2853 * ixgbe_setup_hw_rsc() called in init_locked.
2854 * We disable the setting when netmap is compiled in).
2855 * We update the values here, but also in ixgbe.c because
2856 * init_locked sometimes is called outside our control.
2860 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2861 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2864 D("%s read HLREG 0x%x rxc 0x%x",
2865 onoff ? "enter" : "exit", hl, rxc);
2867 /* hw requirements ... */
2868 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2869 rxc |= IXGBE_RDRXCTL_RSCACKC;
2870 if (onoff && !crcstrip) {
2871 /* keep the crc. Fast rx */
2872 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2873 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2875 /* reset default mode */
2876 hl |= IXGBE_HLREG0_RXCRCSTRP;
2877 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2881 D("%s write HLREG 0x%x rxc 0x%x",
2882 onoff ? "enter" : "exit", hl, rxc);
2884 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2885 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2886 } /* ixgbe_if_crcstrip_set */
2888 /*********************************************************************
2889 * ixgbe_if_init - Init entry point
2891 * Used in two ways: It is used by the stack as an init
2892 * entry point in network interface structure. It is also
2893 * used by the driver as a hw/sw initialization routine to
2894 * get to a consistent state.
2896 * Return 0 on success, positive on failure
2897 **********************************************************************/
2899 ixgbe_if_init(if_ctx_t ctx)
2901 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2902 struct ifnet *ifp = iflib_get_ifp(ctx);
2903 device_t dev = iflib_get_dev(ctx);
2904 struct ixgbe_hw *hw = &sc->hw;
2905 struct ix_rx_queue *rx_que;
2906 struct ix_tx_queue *tx_que;
2913 INIT_DEBUGOUT("ixgbe_if_init: begin");
2915 /* Queue indices may change with IOV mode */
2916 ixgbe_align_all_queue_indices(sc);
2918 /* reprogram the RAR[0] in case user changed it. */
2919 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
2921 /* Get the latest mac address, User can use a LAA */
2922 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2923 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
2924 hw->addr_ctrl.rar_used_count = 1;
2928 ixgbe_initialize_iov(sc);
2930 ixgbe_initialize_transmit_units(ctx);
2932 /* Setup Multicast table */
2933 ixgbe_if_multi_set(ctx);
2935 /* Determine the correct mbuf pool, based on frame size */
2936 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2938 /* Configure RX settings */
2939 ixgbe_initialize_receive_units(ctx);
2942 * Initialize variable holding task enqueue requests
2943 * from MSI-X interrupts
2945 sc->task_requests = 0;
2947 /* Enable SDP & MSI-X interrupts based on adapter */
2948 ixgbe_config_gpie(sc);
2951 if (ifp->if_mtu > ETHERMTU) {
2952 /* aka IXGBE_MAXFRS on 82599 and newer */
2953 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2954 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2955 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2956 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2959 /* Now enable all the queues */
2960 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
2961 struct tx_ring *txr = &tx_que->txr;
2963 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2964 txdctl |= IXGBE_TXDCTL_ENABLE;
2965 /* Set WTHRESH to 8, burst writeback */
2966 txdctl |= (8 << 16);
2968 * When the internal queue falls below PTHRESH (32),
2969 * start prefetching as long as there are at least
2970 * HTHRESH (1) buffers ready. The values are taken
2971 * from the Intel linux driver 3.8.21.
2972 * Prefetching enables tx line rate even with 1 queue.
2974 txdctl |= (32 << 0) | (1 << 8);
2975 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2978 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
2979 struct rx_ring *rxr = &rx_que->rxr;
2981 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2982 if (hw->mac.type == ixgbe_mac_82598EB) {
2988 rxdctl &= ~0x3FFFFF;
2991 rxdctl |= IXGBE_RXDCTL_ENABLE;
2992 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2993 for (j = 0; j < 10; j++) {
2994 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2995 IXGBE_RXDCTL_ENABLE)
3003 /* Enable Receive engine */
3004 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3005 if (hw->mac.type == ixgbe_mac_82598EB)
3006 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3007 rxctrl |= IXGBE_RXCTRL_RXEN;
3008 ixgbe_enable_rx_dma(hw, rxctrl);
3010 /* Set up MSI/MSI-X routing */
3011 if (ixgbe_enable_msix) {
3012 ixgbe_configure_ivars(sc);
3013 /* Set up auto-mask */
3014 if (hw->mac.type == ixgbe_mac_82598EB)
3015 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3017 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3018 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3020 } else { /* Simple settings for Legacy/MSI */
3021 ixgbe_set_ivar(sc, 0, 0, 0);
3022 ixgbe_set_ivar(sc, 0, 0, 1);
3023 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3026 ixgbe_init_fdir(sc);
3029 * Check on any SFP devices that
3030 * need to be kick-started
3032 if (hw->phy.type == ixgbe_phy_none) {
3033 err = hw->phy.ops.identify(hw);
3034 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3036 "Unsupported SFP+ module type was detected.\n");
3041 /* Set moderation on the Link interrupt */
3042 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3044 /* Enable power to the phy. */
3045 ixgbe_set_phy_power(hw, true);
3047 /* Config/Enable Link */
3048 ixgbe_config_link(ctx);
3050 /* Hardware Packet Buffer & Flow Control setup */
3051 ixgbe_config_delay_values(sc);
3053 /* Initialize the FC settings */
3056 /* Set up VLAN support and filter */
3057 ixgbe_setup_vlan_hw_support(ctx);
3059 /* Setup DMA Coalescing */
3060 ixgbe_config_dmac(sc);
3062 /* And now turn on interrupts */
3063 ixgbe_if_enable_intr(ctx);
3065 /* Enable the use of the MBX by the VF's */
3066 if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3067 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3068 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3069 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3072 } /* ixgbe_init_locked */
3074 /************************************************************************
3077 * Setup the correct IVAR register for a particular MSI-X interrupt
3078 * (yes this is all very magic and confusing :)
3079 * - entry is the register array entry
3080 * - vector is the MSI-X vector for this queue
3081 * - type is RX/TX/MISC
3082 ************************************************************************/
3084 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3086 struct ixgbe_hw *hw = &sc->hw;
3089 vector |= IXGBE_IVAR_ALLOC_VAL;
3091 switch (hw->mac.type) {
3092 case ixgbe_mac_82598EB:
3094 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3096 entry += (type * 64);
3097 index = (entry >> 2) & 0x1F;
3098 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3099 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3100 ivar |= (vector << (8 * (entry & 0x3)));
3101 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3103 case ixgbe_mac_82599EB:
3104 case ixgbe_mac_X540:
3105 case ixgbe_mac_X550:
3106 case ixgbe_mac_X550EM_x:
3107 case ixgbe_mac_X550EM_a:
3108 if (type == -1) { /* MISC IVAR */
3109 index = (entry & 1) * 8;
3110 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3111 ivar &= ~(0xFF << index);
3112 ivar |= (vector << index);
3113 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3114 } else { /* RX/TX IVARS */
3115 index = (16 * (entry & 1)) + (8 * type);
3116 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3117 ivar &= ~(0xFF << index);
3118 ivar |= (vector << index);
3119 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3124 } /* ixgbe_set_ivar */
3126 /************************************************************************
3127 * ixgbe_configure_ivars
3128 ************************************************************************/
3130 ixgbe_configure_ivars(struct ixgbe_softc *sc)
3132 struct ix_rx_queue *rx_que = sc->rx_queues;
3133 struct ix_tx_queue *tx_que = sc->tx_queues;
3136 if (ixgbe_max_interrupt_rate > 0)
3137 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3140 * Disable DMA coalescing if interrupt moderation is
3147 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3148 struct rx_ring *rxr = &rx_que->rxr;
3150 /* First the RX queue entry */
3151 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3153 /* Set an Initial EITR value */
3154 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3156 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3157 struct tx_ring *txr = &tx_que->txr;
3159 /* ... and the TX */
3160 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3162 /* For the Link interrupt */
3163 ixgbe_set_ivar(sc, 1, sc->vector, -1);
3164 } /* ixgbe_configure_ivars */
3166 /************************************************************************
3168 ************************************************************************/
3170 ixgbe_config_gpie(struct ixgbe_softc *sc)
3172 struct ixgbe_hw *hw = &sc->hw;
3175 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3177 if (sc->intr_type == IFLIB_INTR_MSIX) {
3178 /* Enable Enhanced MSI-X mode */
3179 gpie |= IXGBE_GPIE_MSIX_MODE
3181 | IXGBE_GPIE_PBA_SUPPORT
3185 /* Fan Failure Interrupt */
3186 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3187 gpie |= IXGBE_SDP1_GPIEN;
3189 /* Thermal Sensor Interrupt */
3190 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3191 gpie |= IXGBE_SDP0_GPIEN_X540;
3193 /* Link detection */
3194 switch (hw->mac.type) {
3195 case ixgbe_mac_82599EB:
3196 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3198 case ixgbe_mac_X550EM_x:
3199 case ixgbe_mac_X550EM_a:
3200 gpie |= IXGBE_SDP0_GPIEN_X540;
3206 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3208 } /* ixgbe_config_gpie */
3210 /************************************************************************
3211 * ixgbe_config_delay_values
3213 * Requires sc->max_frame_size to be set.
3214 ************************************************************************/
3216 ixgbe_config_delay_values(struct ixgbe_softc *sc)
3218 struct ixgbe_hw *hw = &sc->hw;
3219 u32 rxpb, frame, size, tmp;
3221 frame = sc->max_frame_size;
3223 /* Calculate High Water */
3224 switch (hw->mac.type) {
3225 case ixgbe_mac_X540:
3226 case ixgbe_mac_X550:
3227 case ixgbe_mac_X550EM_x:
3228 case ixgbe_mac_X550EM_a:
3229 tmp = IXGBE_DV_X540(frame, frame);
3232 tmp = IXGBE_DV(frame, frame);
3235 size = IXGBE_BT2KB(tmp);
3236 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3237 hw->fc.high_water[0] = rxpb - size;
3239 /* Now calculate Low Water */
3240 switch (hw->mac.type) {
3241 case ixgbe_mac_X540:
3242 case ixgbe_mac_X550:
3243 case ixgbe_mac_X550EM_x:
3244 case ixgbe_mac_X550EM_a:
3245 tmp = IXGBE_LOW_DV_X540(frame);
3248 tmp = IXGBE_LOW_DV(frame);
3251 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3253 hw->fc.pause_time = IXGBE_FC_PAUSE;
3254 hw->fc.send_xon = true;
3255 } /* ixgbe_config_delay_values */
3257 /************************************************************************
3258 * ixgbe_set_multi - Multicast Update
3260 * Called whenever multicast address list is updated.
3261 ************************************************************************/
3263 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int idx)
3265 struct ixgbe_softc *sc = arg;
3266 struct ixgbe_mc_addr *mta = sc->mta;
3268 if (ifma->ifma_addr->sa_family != AF_LINK)
3270 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3272 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3273 mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3274 mta[idx].vmdq = sc->pool;
3277 } /* ixgbe_mc_filter_apply */
3280 ixgbe_if_multi_set(if_ctx_t ctx)
3282 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3283 struct ixgbe_mc_addr *mta;
3284 struct ifnet *ifp = iflib_get_ifp(ctx);
3289 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3292 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3294 mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc);
3296 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3298 if (ifp->if_flags & IFF_PROMISC)
3299 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3300 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3301 ifp->if_flags & IFF_ALLMULTI) {
3302 fctrl |= IXGBE_FCTRL_MPE;
3303 fctrl &= ~IXGBE_FCTRL_UPE;
3305 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3307 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3309 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3310 update_ptr = (u8 *)mta;
3311 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3312 ixgbe_mc_array_itr, true);
3315 } /* ixgbe_if_multi_set */
3317 /************************************************************************
3318 * ixgbe_mc_array_itr
3320 * An iterator function needed by the multicast shared code.
3321 * It feeds the shared code routine the addresses in the
3322 * array of ixgbe_set_multi() one by one.
3323 ************************************************************************/
3325 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3327 struct ixgbe_mc_addr *mta;
3329 mta = (struct ixgbe_mc_addr *)*update_ptr;
3332 *update_ptr = (u8*)(mta + 1);
3335 } /* ixgbe_mc_array_itr */
3337 /************************************************************************
3338 * ixgbe_local_timer - Timer routine
3340 * Checks for link status, updates statistics,
3341 * and runs the watchdog check.
3342 ************************************************************************/
3344 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3346 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3351 /* Check for pluggable optics */
3353 if (!ixgbe_sfp_probe(ctx))
3354 return; /* Nothing to do */
3356 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3358 /* Fire off the adminq task */
3359 iflib_admin_intr_deferred(ctx);
3361 } /* ixgbe_if_timer */
3363 /************************************************************************
3366 * Determine if a port had optics inserted.
3367 ************************************************************************/
3369 ixgbe_sfp_probe(if_ctx_t ctx)
3371 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3372 struct ixgbe_hw *hw = &sc->hw;
3373 device_t dev = iflib_get_dev(ctx);
3374 bool result = false;
3376 if ((hw->phy.type == ixgbe_phy_nl) &&
3377 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3378 s32 ret = hw->phy.ops.identify_sfp(hw);
3381 ret = hw->phy.ops.reset(hw);
3382 sc->sfp_probe = false;
3383 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3384 device_printf(dev, "Unsupported SFP+ module detected!");
3386 "Reload driver with supported module.\n");
3389 device_printf(dev, "SFP+ module detected!\n");
3390 /* We now have supported optics */
3396 } /* ixgbe_sfp_probe */
3398 /************************************************************************
3399 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3400 ************************************************************************/
3402 ixgbe_handle_mod(void *context)
3404 if_ctx_t ctx = context;
3405 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3406 struct ixgbe_hw *hw = &sc->hw;
3407 device_t dev = iflib_get_dev(ctx);
3408 u32 err, cage_full = 0;
3410 if (sc->hw.need_crosstalk_fix) {
3411 switch (hw->mac.type) {
3412 case ixgbe_mac_82599EB:
3413 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3416 case ixgbe_mac_X550EM_x:
3417 case ixgbe_mac_X550EM_a:
3418 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3426 goto handle_mod_out;
3429 err = hw->phy.ops.identify_sfp(hw);
3430 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3432 "Unsupported SFP+ module type was detected.\n");
3433 goto handle_mod_out;
3436 if (hw->mac.type == ixgbe_mac_82598EB)
3437 err = hw->phy.ops.reset(hw);
3439 err = hw->mac.ops.setup_sfp(hw);
3441 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3443 "Setup failure - unsupported SFP+ module type.\n");
3444 goto handle_mod_out;
3446 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3450 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3451 } /* ixgbe_handle_mod */
3454 /************************************************************************
3455 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3456 ************************************************************************/
3458 ixgbe_handle_msf(void *context)
3460 if_ctx_t ctx = context;
3461 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3462 struct ixgbe_hw *hw = &sc->hw;
3466 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3467 sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3469 autoneg = hw->phy.autoneg_advertised;
3470 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3471 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3472 if (hw->mac.ops.setup_link)
3473 hw->mac.ops.setup_link(hw, autoneg, true);
3475 /* Adjust media types shown in ifconfig */
3476 ifmedia_removeall(sc->media);
3477 ixgbe_add_media_types(sc->ctx);
3478 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3479 } /* ixgbe_handle_msf */
3481 /************************************************************************
3482 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3483 ************************************************************************/
3485 ixgbe_handle_phy(void *context)
3487 if_ctx_t ctx = context;
3488 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3489 struct ixgbe_hw *hw = &sc->hw;
3492 error = hw->phy.ops.handle_lasi(hw);
3493 if (error == IXGBE_ERR_OVERTEMP)
3494 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3496 device_printf(sc->dev,
3497 "Error handling LASI interrupt: %d\n", error);
3498 } /* ixgbe_handle_phy */
3500 /************************************************************************
3501 * ixgbe_if_stop - Stop the hardware
3503 * Disables all traffic on the adapter by issuing a
3504 * global reset on the MAC and deallocates TX/RX buffers.
3505 ************************************************************************/
3507 ixgbe_if_stop(if_ctx_t ctx)
3509 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3510 struct ixgbe_hw *hw = &sc->hw;
3512 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3515 hw->adapter_stopped = false;
3516 ixgbe_stop_adapter(hw);
3517 if (hw->mac.type == ixgbe_mac_82599EB)
3518 ixgbe_stop_mac_link_on_d3_82599(hw);
3519 /* Turn off the laser - noop with no optics */
3520 ixgbe_disable_tx_laser(hw);
3522 /* Update the stack */
3523 sc->link_up = false;
3524 ixgbe_if_update_admin_status(ctx);
3526 /* reprogram the RAR[0] in case user changed it. */
3527 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
3530 } /* ixgbe_if_stop */
3532 /************************************************************************
3533 * ixgbe_update_link_status - Update OS on link state
3535 * Note: Only updates the OS on the cached link state.
3536 * The real check of the hardware only happens with
3538 ************************************************************************/
3540 ixgbe_if_update_admin_status(if_ctx_t ctx)
3542 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3543 device_t dev = iflib_get_dev(ctx);
3546 if (sc->link_active == false) {
3548 device_printf(dev, "Link is up %d Gbps %s \n",
3549 ((sc->link_speed == 128) ? 10 : 1),
3551 sc->link_active = true;
3552 /* Update any Flow Control changes */
3553 ixgbe_fc_enable(&sc->hw);
3554 /* Update DMA coalescing config */
3555 ixgbe_config_dmac(sc);
3556 /* should actually be negotiated value */
3557 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3559 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3560 ixgbe_ping_all_vfs(sc);
3562 } else { /* Link down */
3563 if (sc->link_active == true) {
3565 device_printf(dev, "Link is Down\n");
3566 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3567 sc->link_active = false;
3568 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3569 ixgbe_ping_all_vfs(sc);
3573 /* Handle task requests from msix_link() */
3574 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
3575 ixgbe_handle_mod(ctx);
3576 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
3577 ixgbe_handle_msf(ctx);
3578 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
3579 ixgbe_handle_mbx(ctx);
3580 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
3581 ixgbe_reinit_fdir(ctx);
3582 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
3583 ixgbe_handle_phy(ctx);
3584 sc->task_requests = 0;
3586 ixgbe_update_stats_counters(sc);
3587 } /* ixgbe_if_update_admin_status */
3589 /************************************************************************
3590 * ixgbe_config_dmac - Configure DMA Coalescing
3591 ************************************************************************/
3593 ixgbe_config_dmac(struct ixgbe_softc *sc)
3595 struct ixgbe_hw *hw = &sc->hw;
3596 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3598 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3601 if (dcfg->watchdog_timer ^ sc->dmac ||
3602 dcfg->link_speed ^ sc->link_speed) {
3603 dcfg->watchdog_timer = sc->dmac;
3604 dcfg->fcoe_en = false;
3605 dcfg->link_speed = sc->link_speed;
3608 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3609 dcfg->watchdog_timer, dcfg->link_speed);
3611 hw->mac.ops.dmac_config(hw);
3613 } /* ixgbe_config_dmac */
3615 /************************************************************************
3616 * ixgbe_if_enable_intr
3617 ************************************************************************/
3619 ixgbe_if_enable_intr(if_ctx_t ctx)
3621 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3622 struct ixgbe_hw *hw = &sc->hw;
3623 struct ix_rx_queue *que = sc->rx_queues;
3626 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3628 switch (sc->hw.mac.type) {
3629 case ixgbe_mac_82599EB:
3630 mask |= IXGBE_EIMS_ECC;
3631 /* Temperature sensor on some scs */
3632 mask |= IXGBE_EIMS_GPI_SDP0;
3633 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3634 mask |= IXGBE_EIMS_GPI_SDP1;
3635 mask |= IXGBE_EIMS_GPI_SDP2;
3637 case ixgbe_mac_X540:
3638 /* Detect if Thermal Sensor is enabled */
3639 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3640 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3641 mask |= IXGBE_EIMS_TS;
3642 mask |= IXGBE_EIMS_ECC;
3644 case ixgbe_mac_X550:
3645 /* MAC thermal sensor is automatically enabled */
3646 mask |= IXGBE_EIMS_TS;
3647 mask |= IXGBE_EIMS_ECC;
3649 case ixgbe_mac_X550EM_x:
3650 case ixgbe_mac_X550EM_a:
3651 /* Some devices use SDP0 for important information */
3652 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3653 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3654 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3655 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3656 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3657 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3658 mask |= IXGBE_EICR_GPI_SDP0_X540;
3659 mask |= IXGBE_EIMS_ECC;
3665 /* Enable Fan Failure detection */
3666 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3667 mask |= IXGBE_EIMS_GPI_SDP1;
3669 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3670 mask |= IXGBE_EIMS_MAILBOX;
3671 /* Enable Flow Director */
3672 if (sc->feat_en & IXGBE_FEATURE_FDIR)
3673 mask |= IXGBE_EIMS_FLOW_DIR;
3675 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3677 /* With MSI-X we use auto clear */
3678 if (sc->intr_type == IFLIB_INTR_MSIX) {
3679 mask = IXGBE_EIMS_ENABLE_MASK;
3680 /* Don't autoclear Link */
3681 mask &= ~IXGBE_EIMS_OTHER;
3682 mask &= ~IXGBE_EIMS_LSC;
3683 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
3684 mask &= ~IXGBE_EIMS_MAILBOX;
3685 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3689 * Now enable all queues, this is done separately to
3690 * allow for handling the extended (beyond 32) MSI-X
3691 * vectors that can be used by 82599
3693 for (int i = 0; i < sc->num_rx_queues; i++, que++)
3694 ixgbe_enable_queue(sc, que->msix);
3696 IXGBE_WRITE_FLUSH(hw);
3698 } /* ixgbe_if_enable_intr */
3700 /************************************************************************
3701 * ixgbe_disable_intr
3702 ************************************************************************/
3704 ixgbe_if_disable_intr(if_ctx_t ctx)
3706 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3708 if (sc->intr_type == IFLIB_INTR_MSIX)
3709 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3710 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3711 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3713 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3714 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3715 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3717 IXGBE_WRITE_FLUSH(&sc->hw);
3719 } /* ixgbe_if_disable_intr */
3721 /************************************************************************
3722 * ixgbe_link_intr_enable
3723 ************************************************************************/
3725 ixgbe_link_intr_enable(if_ctx_t ctx)
3727 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
3729 /* Re-enable other interrupts */
3730 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3731 } /* ixgbe_link_intr_enable */
3733 /************************************************************************
3734 * ixgbe_if_rx_queue_intr_enable
3735 ************************************************************************/
3737 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3739 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3740 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
3742 ixgbe_enable_queue(sc, que->msix);
3745 } /* ixgbe_if_rx_queue_intr_enable */
3747 /************************************************************************
3748 * ixgbe_enable_queue
3749 ************************************************************************/
3751 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
3753 struct ixgbe_hw *hw = &sc->hw;
3754 u64 queue = 1ULL << vector;
3757 if (hw->mac.type == ixgbe_mac_82598EB) {
3758 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3759 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3761 mask = (queue & 0xFFFFFFFF);
3763 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3764 mask = (queue >> 32);
3766 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3768 } /* ixgbe_enable_queue */
3770 /************************************************************************
3771 * ixgbe_disable_queue
3772 ************************************************************************/
3774 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
3776 struct ixgbe_hw *hw = &sc->hw;
3777 u64 queue = 1ULL << vector;
3780 if (hw->mac.type == ixgbe_mac_82598EB) {
3781 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3782 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3784 mask = (queue & 0xFFFFFFFF);
3786 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3787 mask = (queue >> 32);
3789 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3791 } /* ixgbe_disable_queue */
3793 /************************************************************************
3794 * ixgbe_intr - Legacy Interrupt Service Routine
3795 ************************************************************************/
3797 ixgbe_intr(void *arg)
3799 struct ixgbe_softc *sc = arg;
3800 struct ix_rx_queue *que = sc->rx_queues;
3801 struct ixgbe_hw *hw = &sc->hw;
3802 if_ctx_t ctx = sc->ctx;
3803 u32 eicr, eicr_mask;
3805 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3809 ixgbe_if_enable_intr(ctx);
3810 return (FILTER_HANDLED);
3813 /* Check for fan failure */
3814 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3815 (eicr & IXGBE_EICR_GPI_SDP1)) {
3816 device_printf(sc->dev,
3817 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3818 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3821 /* Link status change */
3822 if (eicr & IXGBE_EICR_LSC) {
3823 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3824 iflib_admin_intr_deferred(ctx);
3827 if (ixgbe_is_sfp(hw)) {
3828 /* Pluggable optics-related interrupt */
3829 if (hw->mac.type >= ixgbe_mac_X540)
3830 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3832 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3834 if (eicr & eicr_mask) {
3835 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3836 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
3839 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3840 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3841 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3842 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3843 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3847 /* External PHY interrupt */
3848 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3849 (eicr & IXGBE_EICR_GPI_SDP0_X540))
3850 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
3852 return (FILTER_SCHEDULE_THREAD);
3855 /************************************************************************
3856 * ixgbe_free_pci_resources
3857 ************************************************************************/
3859 ixgbe_free_pci_resources(if_ctx_t ctx)
3861 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3862 struct ix_rx_queue *que = sc->rx_queues;
3863 device_t dev = iflib_get_dev(ctx);
3865 /* Release all MSI-X queue resources */
3866 if (sc->intr_type == IFLIB_INTR_MSIX)
3867 iflib_irq_free(ctx, &sc->irq);
3870 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
3871 iflib_irq_free(ctx, &que->que_irq);
3875 if (sc->pci_mem != NULL)
3876 bus_release_resource(dev, SYS_RES_MEMORY,
3877 rman_get_rid(sc->pci_mem), sc->pci_mem);
3878 } /* ixgbe_free_pci_resources */
3880 /************************************************************************
3881 * ixgbe_sysctl_flowcntl
3883 * SYSCTL wrapper around setting Flow Control
3884 ************************************************************************/
3886 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3888 struct ixgbe_softc *sc;
3891 sc = (struct ixgbe_softc *)arg1;
3892 fc = sc->hw.fc.current_mode;
3894 error = sysctl_handle_int(oidp, &fc, 0, req);
3895 if ((error) || (req->newptr == NULL))
3898 /* Don't bother if it's not changed */
3899 if (fc == sc->hw.fc.current_mode)
3902 return ixgbe_set_flowcntl(sc, fc);
3903 } /* ixgbe_sysctl_flowcntl */
3905 /************************************************************************
3906 * ixgbe_set_flowcntl - Set flow control
3908 * Flow control values:
3913 ************************************************************************/
3915 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
3918 case ixgbe_fc_rx_pause:
3919 case ixgbe_fc_tx_pause:
3921 sc->hw.fc.requested_mode = fc;
3922 if (sc->num_rx_queues > 1)
3923 ixgbe_disable_rx_drop(sc);
3926 sc->hw.fc.requested_mode = ixgbe_fc_none;
3927 if (sc->num_rx_queues > 1)
3928 ixgbe_enable_rx_drop(sc);
3934 /* Don't autoneg if forcing a value */
3935 sc->hw.fc.disable_fc_autoneg = true;
3936 ixgbe_fc_enable(&sc->hw);
3939 } /* ixgbe_set_flowcntl */
3941 /************************************************************************
3942 * ixgbe_enable_rx_drop
3944 * Enable the hardware to drop packets when the buffer is
3945 * full. This is useful with multiqueue, so that no single
3946 * queue being full stalls the entire RX engine. We only
3947 * enable this when Multiqueue is enabled AND Flow Control
3949 ************************************************************************/
3951 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
3953 struct ixgbe_hw *hw = &sc->hw;
3954 struct rx_ring *rxr;
3957 for (int i = 0; i < sc->num_rx_queues; i++) {
3958 rxr = &sc->rx_queues[i].rxr;
3959 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3960 srrctl |= IXGBE_SRRCTL_DROP_EN;
3961 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3964 /* enable drop for each vf */
3965 for (int i = 0; i < sc->num_vfs; i++) {
3966 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3967 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3970 } /* ixgbe_enable_rx_drop */
3972 /************************************************************************
3973 * ixgbe_disable_rx_drop
3974 ************************************************************************/
3976 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
3978 struct ixgbe_hw *hw = &sc->hw;
3979 struct rx_ring *rxr;
3982 for (int i = 0; i < sc->num_rx_queues; i++) {
3983 rxr = &sc->rx_queues[i].rxr;
3984 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3985 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3986 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3989 /* disable drop for each vf */
3990 for (int i = 0; i < sc->num_vfs; i++) {
3991 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3992 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3994 } /* ixgbe_disable_rx_drop */
3996 /************************************************************************
3997 * ixgbe_sysctl_advertise
3999 * SYSCTL wrapper around setting advertised speed
4000 ************************************************************************/
4002 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4004 struct ixgbe_softc *sc;
4005 int error, advertise;
4007 sc = (struct ixgbe_softc *)arg1;
4008 advertise = sc->advertise;
4010 error = sysctl_handle_int(oidp, &advertise, 0, req);
4011 if ((error) || (req->newptr == NULL))
4014 return ixgbe_set_advertise(sc, advertise);
4015 } /* ixgbe_sysctl_advertise */
4017 /************************************************************************
4018 * ixgbe_set_advertise - Control advertised link speed
4021 * 0x1 - advertise 100 Mb
4022 * 0x2 - advertise 1G
4023 * 0x4 - advertise 10G
4024 * 0x8 - advertise 10 Mb (yes, Mb)
4025 ************************************************************************/
4027 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
4029 device_t dev = iflib_get_dev(sc->ctx);
4030 struct ixgbe_hw *hw;
4031 ixgbe_link_speed speed = 0;
4032 ixgbe_link_speed link_caps = 0;
4033 s32 err = IXGBE_NOT_IMPLEMENTED;
4034 bool negotiate = false;
4036 /* Checks to validate new value */
4037 if (sc->advertise == advertise) /* no change */
4042 /* No speed changes for backplane media */
4043 if (hw->phy.media_type == ixgbe_media_type_backplane)
4046 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4047 (hw->phy.multispeed_fiber))) {
4048 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4052 if (advertise < 0x1 || advertise > 0xF) {
4053 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4057 if (hw->mac.ops.get_link_capabilities) {
4058 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4060 if (err != IXGBE_SUCCESS) {
4061 device_printf(dev, "Unable to determine supported advertise speeds\n");
4066 /* Set new value and report new advertised mode */
4067 if (advertise & 0x1) {
4068 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4069 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4072 speed |= IXGBE_LINK_SPEED_100_FULL;
4074 if (advertise & 0x2) {
4075 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4076 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4079 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4081 if (advertise & 0x4) {
4082 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4083 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4086 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4088 if (advertise & 0x8) {
4089 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4090 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4093 speed |= IXGBE_LINK_SPEED_10_FULL;
4096 hw->mac.autotry_restart = true;
4097 hw->mac.ops.setup_link(hw, speed, true);
4098 sc->advertise = advertise;
4101 } /* ixgbe_set_advertise */
4103 /************************************************************************
4104 * ixgbe_get_advertise - Get current advertised speed settings
4106 * Formatted for sysctl usage.
4108 * 0x1 - advertise 100 Mb
4109 * 0x2 - advertise 1G
4110 * 0x4 - advertise 10G
4111 * 0x8 - advertise 10 Mb (yes, Mb)
4112 ************************************************************************/
4114 ixgbe_get_advertise(struct ixgbe_softc *sc)
4116 struct ixgbe_hw *hw = &sc->hw;
4118 ixgbe_link_speed link_caps = 0;
4120 bool negotiate = false;
4123 * Advertised speed means nothing unless it's copper or
4126 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4127 !(hw->phy.multispeed_fiber))
4130 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4131 if (err != IXGBE_SUCCESS)
4135 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4136 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
4137 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
4138 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
4141 } /* ixgbe_get_advertise */
4143 /************************************************************************
4144 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4147 * 0/1 - off / on (use default value of 1000)
4149 * Legal timer values are:
4150 * 50,100,250,500,1000,2000,5000,10000
4152 * Turning off interrupt moderation will also turn this off.
4153 ************************************************************************/
4155 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4157 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4158 struct ifnet *ifp = iflib_get_ifp(sc->ctx);
4163 error = sysctl_handle_16(oidp, &newval, 0, req);
4164 if ((error) || (req->newptr == NULL))
4173 /* Enable and use default */
4184 /* Legal values - allow */
4188 /* Do nothing, illegal value */
4192 /* Re-initialize hardware if it's already running */
4193 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4197 } /* ixgbe_sysctl_dmac */
4200 /************************************************************************
4201 * ixgbe_sysctl_power_state
4203 * Sysctl to test power states
4205 * 0 - set device to D0
4206 * 3 - set device to D3
4207 * (none) - get current device power state
4208 ************************************************************************/
4210 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4212 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4213 device_t dev = sc->dev;
4214 int curr_ps, new_ps, error = 0;
4216 curr_ps = new_ps = pci_get_powerstate(dev);
4218 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4219 if ((error) || (req->newptr == NULL))
4222 if (new_ps == curr_ps)
4225 if (new_ps == 3 && curr_ps == 0)
4226 error = DEVICE_SUSPEND(dev);
4227 else if (new_ps == 0 && curr_ps == 3)
4228 error = DEVICE_RESUME(dev);
4232 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4235 } /* ixgbe_sysctl_power_state */
4238 /************************************************************************
4239 * ixgbe_sysctl_wol_enable
4241 * Sysctl to enable/disable the WoL capability,
4242 * if supported by the adapter.
4247 ************************************************************************/
4249 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4251 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4252 struct ixgbe_hw *hw = &sc->hw;
4253 int new_wol_enabled;
4256 new_wol_enabled = hw->wol_enabled;
4257 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4258 if ((error) || (req->newptr == NULL))
4260 new_wol_enabled = !!(new_wol_enabled);
4261 if (new_wol_enabled == hw->wol_enabled)
4264 if (new_wol_enabled > 0 && !sc->wol_support)
4267 hw->wol_enabled = new_wol_enabled;
4270 } /* ixgbe_sysctl_wol_enable */
4272 /************************************************************************
4273 * ixgbe_sysctl_wufc - Wake Up Filter Control
4275 * Sysctl to enable/disable the types of packets that the
4276 * adapter will wake up on upon receipt.
4278 * 0x1 - Link Status Change
4279 * 0x2 - Magic Packet
4280 * 0x4 - Direct Exact
4281 * 0x8 - Directed Multicast
4283 * 0x20 - ARP/IPv4 Request Packet
4284 * 0x40 - Direct IPv4 Packet
4285 * 0x80 - Direct IPv6 Packet
4287 * Settings not listed above will cause the sysctl to return an error.
4288 ************************************************************************/
4290 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4292 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4296 new_wufc = sc->wufc;
4298 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4299 if ((error) || (req->newptr == NULL))
4301 if (new_wufc == sc->wufc)
4304 if (new_wufc & 0xffffff00)
4308 new_wufc |= (0xffffff & sc->wufc);
4309 sc->wufc = new_wufc;
4312 } /* ixgbe_sysctl_wufc */
4315 /************************************************************************
4316 * ixgbe_sysctl_print_rss_config
4317 ************************************************************************/
4319 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4321 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4322 struct ixgbe_hw *hw = &sc->hw;
4323 device_t dev = sc->dev;
4325 int error = 0, reta_size;
4328 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4330 device_printf(dev, "Could not allocate sbuf for output.\n");
4334 // TODO: use sbufs to make a string to print out
4335 /* Set multiplier for RETA setup and table size based on MAC */
4336 switch (sc->hw.mac.type) {
4337 case ixgbe_mac_X550:
4338 case ixgbe_mac_X550EM_x:
4339 case ixgbe_mac_X550EM_a:
4347 /* Print out the redirection table */
4348 sbuf_cat(buf, "\n");
4349 for (int i = 0; i < reta_size; i++) {
4351 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4352 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4354 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4355 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4359 // TODO: print more config
4361 error = sbuf_finish(buf);
4363 device_printf(dev, "Error finishing sbuf: %d\n", error);
4368 } /* ixgbe_sysctl_print_rss_config */
4369 #endif /* IXGBE_DEBUG */
4371 /************************************************************************
4372 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4374 * For X552/X557-AT devices using an external PHY
4375 ************************************************************************/
4377 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4379 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4380 struct ixgbe_hw *hw = &sc->hw;
4383 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4384 device_printf(iflib_get_dev(sc->ctx),
4385 "Device has no supported external thermal sensor.\n");
4389 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4390 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4391 device_printf(iflib_get_dev(sc->ctx),
4392 "Error reading from PHY's current temperature register\n");
4396 /* Shift temp for output */
4399 return (sysctl_handle_16(oidp, NULL, reg, req));
4400 } /* ixgbe_sysctl_phy_temp */
4402 /************************************************************************
4403 * ixgbe_sysctl_phy_overtemp_occurred
4405 * Reports (directly from the PHY) whether the current PHY
4406 * temperature is over the overtemp threshold.
4407 ************************************************************************/
4409 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4411 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4412 struct ixgbe_hw *hw = &sc->hw;
4415 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4416 device_printf(iflib_get_dev(sc->ctx),
4417 "Device has no supported external thermal sensor.\n");
4421 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4422 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4423 device_printf(iflib_get_dev(sc->ctx),
4424 "Error reading from PHY's temperature status register\n");
4428 /* Get occurrence bit */
4429 reg = !!(reg & 0x4000);
4431 return (sysctl_handle_16(oidp, 0, reg, req));
4432 } /* ixgbe_sysctl_phy_overtemp_occurred */
4434 /************************************************************************
4435 * ixgbe_sysctl_eee_state
4437 * Sysctl to set EEE power saving feature
4441 * (none) - get current device EEE state
4442 ************************************************************************/
4444 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4446 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4447 device_t dev = sc->dev;
4448 struct ifnet *ifp = iflib_get_ifp(sc->ctx);
4449 int curr_eee, new_eee, error = 0;
4452 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
4454 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4455 if ((error) || (req->newptr == NULL))
4459 if (new_eee == curr_eee)
4463 if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
4466 /* Bounds checking */
4467 if ((new_eee < 0) || (new_eee > 1))
4470 retval = ixgbe_setup_eee(&sc->hw, new_eee);
4472 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4476 /* Restart auto-neg */
4479 device_printf(dev, "New EEE state: %d\n", new_eee);
4481 /* Cache new value */
4483 sc->feat_en |= IXGBE_FEATURE_EEE;
4485 sc->feat_en &= ~IXGBE_FEATURE_EEE;
4488 } /* ixgbe_sysctl_eee_state */
4490 /************************************************************************
4491 * ixgbe_init_device_features
4492 ************************************************************************/
4494 ixgbe_init_device_features(struct ixgbe_softc *sc)
4496 sc->feat_cap = IXGBE_FEATURE_NETMAP
4499 | IXGBE_FEATURE_MSIX
4500 | IXGBE_FEATURE_LEGACY_IRQ;
4502 /* Set capabilities first... */
4503 switch (sc->hw.mac.type) {
4504 case ixgbe_mac_82598EB:
4505 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
4506 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4508 case ixgbe_mac_X540:
4509 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4510 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4511 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4512 (sc->hw.bus.func == 0))
4513 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4515 case ixgbe_mac_X550:
4516 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4517 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4518 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4520 case ixgbe_mac_X550EM_x:
4521 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4522 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4524 case ixgbe_mac_X550EM_a:
4525 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4526 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4527 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4528 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4529 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4530 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4531 sc->feat_cap |= IXGBE_FEATURE_EEE;
4534 case ixgbe_mac_82599EB:
4535 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4536 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4537 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4538 (sc->hw.bus.func == 0))
4539 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4540 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4541 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4547 /* Enabled by default... */
4548 /* Fan failure detection */
4549 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4550 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4552 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
4553 sc->feat_en |= IXGBE_FEATURE_NETMAP;
4555 if (sc->feat_cap & IXGBE_FEATURE_EEE)
4556 sc->feat_en |= IXGBE_FEATURE_EEE;
4557 /* Thermal Sensor */
4558 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4559 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4561 /* Enabled via global sysctl... */
4563 if (ixgbe_enable_fdir) {
4564 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
4565 sc->feat_en |= IXGBE_FEATURE_FDIR;
4567 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled.");
4570 * Message Signal Interrupts - Extended (MSI-X)
4571 * Normal MSI is only enabled if MSI-X calls fail.
4573 if (!ixgbe_enable_msix)
4574 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
4575 /* Receive-Side Scaling (RSS) */
4576 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4577 sc->feat_en |= IXGBE_FEATURE_RSS;
4579 /* Disable features with unmet dependencies... */
4581 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
4582 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
4583 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4584 sc->feat_en &= ~IXGBE_FEATURE_RSS;
4585 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
4587 } /* ixgbe_init_device_features */
4589 /************************************************************************
4590 * ixgbe_check_fan_failure
4591 ************************************************************************/
4593 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
4597 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
4601 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4602 } /* ixgbe_check_fan_failure */
4604 /************************************************************************
4605 * ixgbe_sbuf_fw_version
4606 ************************************************************************/
4608 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
4610 struct ixgbe_nvm_version nvm_ver = {0};
4613 const char *space = "";
4615 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
4616 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
4617 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */
4618 status = ixgbe_get_phy_firmware_version(hw, &phyfw);
4620 if (nvm_ver.oem_valid) {
4621 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
4622 nvm_ver.oem_minor, nvm_ver.oem_release);
4626 if (nvm_ver.or_valid) {
4627 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
4628 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
4632 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
4634 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
4638 if (phyfw != 0 && status == IXGBE_SUCCESS)
4639 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw);
4640 } /* ixgbe_sbuf_fw_version */
4642 /************************************************************************
4643 * ixgbe_print_fw_version
4644 ************************************************************************/
4646 ixgbe_print_fw_version(if_ctx_t ctx)
4648 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4649 struct ixgbe_hw *hw = &sc->hw;
4650 device_t dev = sc->dev;
4654 buf = sbuf_new_auto();
4656 device_printf(dev, "Could not allocate sbuf for output.\n");
4660 ixgbe_sbuf_fw_version(hw, buf);
4662 error = sbuf_finish(buf);
4664 device_printf(dev, "Error finishing sbuf: %d\n", error);
4665 else if (sbuf_len(buf))
4666 device_printf(dev, "%s\n", sbuf_data(buf));
4669 } /* ixgbe_print_fw_version */
4671 /************************************************************************
4672 * ixgbe_sysctl_print_fw_version
4673 ************************************************************************/
4675 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
4677 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4678 struct ixgbe_hw *hw = &sc->hw;
4679 device_t dev = sc->dev;
4683 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4685 device_printf(dev, "Could not allocate sbuf for output.\n");
4689 ixgbe_sbuf_fw_version(hw, buf);
4691 error = sbuf_finish(buf);
4693 device_printf(dev, "Error finishing sbuf: %d\n", error);
4698 } /* ixgbe_sysctl_print_fw_version */