1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 #include "opt_inet6.h"
41 #include "ixgbe_sriov.h"
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
47 /************************************************************************
49 ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
53 /************************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixgbe_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 /* required last entry */
112 static void *ixgbe_register(device_t dev);
113 static int ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int ixgbe_if_attach_post(if_ctx_t ctx);
115 static int ixgbe_if_detach(if_ctx_t ctx);
116 static int ixgbe_if_shutdown(if_ctx_t ctx);
117 static int ixgbe_if_suspend(if_ctx_t ctx);
118 static int ixgbe_if_resume(if_ctx_t ctx);
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
124 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
125 static int ixgbe_if_media_change(if_ctx_t ctx);
126 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
127 static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
128 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
129 static void ixgbe_if_multi_set(if_ctx_t ctx);
130 static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
131 static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
132 uint64_t *paddrs, int nrxqs, int nrxqsets);
133 static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
134 uint64_t *paddrs, int nrxqs, int nrxqsets);
135 static void ixgbe_if_queues_free(if_ctx_t ctx);
136 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
137 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
138 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
139 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
140 static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
141 int ixgbe_intr(void *arg);
143 /************************************************************************
144 * Function prototypes
145 ************************************************************************/
146 #if __FreeBSD_version >= 1100036
147 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
150 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
151 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
153 static int ixgbe_allocate_pci_resources(if_ctx_t ctx);
154 static int ixgbe_setup_low_power_mode(if_ctx_t ctx);
156 static void ixgbe_config_dmac(struct adapter *adapter);
157 static void ixgbe_configure_ivars(struct adapter *adapter);
158 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
160 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
161 static bool ixgbe_sfp_probe(if_ctx_t ctx);
163 static void ixgbe_free_pci_resources(if_ctx_t ctx);
165 static int ixgbe_msix_link(void *arg);
166 static int ixgbe_msix_que(void *arg);
167 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
168 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
169 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
171 static int ixgbe_setup_interface(if_ctx_t ctx);
172 static void ixgbe_init_device_features(struct adapter *adapter);
173 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
174 static void ixgbe_add_media_types(if_ctx_t ctx);
175 static void ixgbe_update_stats_counters(struct adapter *adapter);
176 static void ixgbe_config_link(struct adapter *adapter);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static void ixgbe_check_wol_support(struct adapter *adapter);
179 static void ixgbe_enable_rx_drop(struct adapter *);
180 static void ixgbe_disable_rx_drop(struct adapter *);
182 static void ixgbe_add_hw_stats(struct adapter *adapter);
183 static int ixgbe_set_flowcntl(struct adapter *, int);
184 static int ixgbe_set_advertise(struct adapter *, int);
185 static int ixgbe_get_advertise(struct adapter *);
186 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
187 static void ixgbe_config_gpie(struct adapter *adapter);
188 static void ixgbe_config_delay_values(struct adapter *adapter);
190 /* Sysctl handlers */
191 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
192 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
193 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
194 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
195 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
196 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
198 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
199 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
201 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
202 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
203 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
204 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
205 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
206 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
207 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
209 /* Deferred interrupt tasklets */
210 static void ixgbe_handle_msf(void *);
211 static void ixgbe_handle_mod(void *);
212 static void ixgbe_handle_phy(void *);
214 /************************************************************************
215 * FreeBSD Device Interface Entry Points
216 ************************************************************************/
217 static device_method_t ix_methods[] = {
218 /* Device interface */
219 DEVMETHOD(device_register, ixgbe_register),
220 DEVMETHOD(device_probe, iflib_device_probe),
221 DEVMETHOD(device_attach, iflib_device_attach),
222 DEVMETHOD(device_detach, iflib_device_detach),
223 DEVMETHOD(device_shutdown, iflib_device_shutdown),
224 DEVMETHOD(device_suspend, iflib_device_suspend),
225 DEVMETHOD(device_resume, iflib_device_resume),
227 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
228 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
229 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
234 static driver_t ix_driver = {
235 "ix", ix_methods, sizeof(struct adapter),
238 devclass_t ix_devclass;
239 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
240 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
241 MODULE_DEPEND(ix, pci, 1, 1, 1);
242 MODULE_DEPEND(ix, ether, 1, 1, 1);
243 MODULE_DEPEND(ix, iflib, 1, 1, 1);
245 static device_method_t ixgbe_if_methods[] = {
246 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
247 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
248 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
249 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
250 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
251 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
252 DEVMETHOD(ifdi_init, ixgbe_if_init),
253 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
254 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
255 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
256 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
257 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
259 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
260 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
261 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
262 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
263 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
264 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
265 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
266 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
267 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
268 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
269 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
270 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
271 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
272 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
273 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
275 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
276 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
277 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
283 * TUNEABLE PARAMETERS:
286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
287 static driver_t ixgbe_if_driver = {
288 "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
291 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
292 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
293 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
295 /* Flow control setting, default to full */
296 static int ixgbe_flow_control = ixgbe_fc_full;
297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
298 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
300 /* Advertise Speed, default to 0 (auto) */
301 static int ixgbe_advertise_speed = 0;
302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
306 * Smart speed setting, default to on
307 * this only works as a compile option
308 * right now as its during attach, set
309 * this to 'ixgbe_smart_speed_off' to
312 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
315 * MSI-X should be the default for best performance,
316 * but this allows it to be forced off for testing.
318 static int ixgbe_enable_msix = 1;
319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
320 "Enable MSI-X interrupts");
323 * Defining this on will allow the use
324 * of unsupported SFP+ modules, note that
325 * doing so you are on your own :)
327 static int allow_unsupported_sfp = FALSE;
328 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
329 &allow_unsupported_sfp, 0,
330 "Allow unsupported SFP modules...use at your own risk");
333 * Not sure if Flow Director is fully baked,
334 * so we'll default to turning it off.
336 static int ixgbe_enable_fdir = 0;
337 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
338 "Enable Flow Director");
340 /* Receive-Side Scaling */
341 static int ixgbe_enable_rss = 1;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
343 "Enable Receive-Side Scaling (RSS)");
346 /* Keep running tab on them for sanity check */
347 static int ixgbe_total_ports;
350 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
353 * For Flow Director: this is the number of TX packets we sample
354 * for the filter pool, this means every 20th packet will be probed.
356 * This feature can be disabled by setting this to 0.
358 static int atr_sample_rate = 20;
360 extern struct if_txrx ixgbe_txrx;
362 static struct if_shared_ctx ixgbe_sctx_init = {
363 .isc_magic = IFLIB_MAGIC,
364 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
365 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
366 .isc_tx_maxsegsize = PAGE_SIZE,
367 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
368 .isc_tso_maxsegsize = PAGE_SIZE,
369 .isc_rx_maxsize = PAGE_SIZE*4,
370 .isc_rx_nsegments = 1,
371 .isc_rx_maxsegsize = PAGE_SIZE*4,
376 .isc_admin_intrcnt = 1,
377 .isc_vendor_info = ixgbe_vendor_info_array,
378 .isc_driver_version = ixgbe_driver_version,
379 .isc_driver = &ixgbe_if_driver,
381 .isc_nrxd_min = {MIN_RXD},
382 .isc_ntxd_min = {MIN_TXD},
383 .isc_nrxd_max = {MAX_RXD},
384 .isc_ntxd_max = {MAX_TXD},
385 .isc_nrxd_default = {DEFAULT_RXD},
386 .isc_ntxd_default = {DEFAULT_TXD},
389 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
391 /************************************************************************
392 * ixgbe_if_tx_queues_alloc
393 ************************************************************************/
395 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
396 int ntxqs, int ntxqsets)
398 struct adapter *adapter = iflib_get_softc(ctx);
399 if_softc_ctx_t scctx = adapter->shared;
400 struct ix_tx_queue *que;
403 MPASS(adapter->num_tx_queues > 0);
404 MPASS(adapter->num_tx_queues == ntxqsets);
407 /* Allocate queue structure memory */
409 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
410 M_IXGBE, M_NOWAIT | M_ZERO);
411 if (!adapter->tx_queues) {
412 device_printf(iflib_get_dev(ctx),
413 "Unable to allocate TX ring memory\n");
417 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
418 struct tx_ring *txr = &que->txr;
420 /* In case SR-IOV is enabled, align the index properly */
421 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
424 txr->adapter = que->adapter = adapter;
425 adapter->active_queues |= (u64)1 << txr->me;
427 /* Allocate report status array */
428 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
429 if (txr->tx_rsq == NULL) {
433 for (j = 0; j < scctx->isc_ntxd[0]; j++)
434 txr->tx_rsq[j] = QIDX_INVALID;
435 /* get the virtual and physical address of the hardware queues */
436 txr->tail = IXGBE_TDT(txr->me);
437 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
438 txr->tx_paddr = paddrs[i];
441 txr->total_packets = 0;
443 /* Set the rate at which we sample packets */
444 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
445 txr->atr_sample = atr_sample_rate;
449 iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod,
451 iflib_config_gtask_init(ctx, &adapter->msf_task, ixgbe_handle_msf,
453 iflib_config_gtask_init(ctx, &adapter->phy_task, ixgbe_handle_phy,
455 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
456 iflib_config_gtask_init(ctx, &adapter->mbx_task,
457 ixgbe_handle_mbx, "mbx_task");
458 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
459 iflib_config_gtask_init(ctx, &adapter->fdir_task,
460 ixgbe_reinit_fdir, "fdir_task");
462 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
463 adapter->num_tx_queues);
468 ixgbe_if_queues_free(ctx);
471 } /* ixgbe_if_tx_queues_alloc */
473 /************************************************************************
474 * ixgbe_if_rx_queues_alloc
475 ************************************************************************/
477 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
478 int nrxqs, int nrxqsets)
480 struct adapter *adapter = iflib_get_softc(ctx);
481 struct ix_rx_queue *que;
484 MPASS(adapter->num_rx_queues > 0);
485 MPASS(adapter->num_rx_queues == nrxqsets);
488 /* Allocate queue structure memory */
490 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
491 M_IXGBE, M_NOWAIT | M_ZERO);
492 if (!adapter->rx_queues) {
493 device_printf(iflib_get_dev(ctx),
494 "Unable to allocate TX ring memory\n");
498 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
499 struct rx_ring *rxr = &que->rxr;
501 /* In case SR-IOV is enabled, align the index properly */
502 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
505 rxr->adapter = que->adapter = adapter;
507 /* get the virtual and physical address of the hw queues */
508 rxr->tail = IXGBE_RDT(rxr->me);
509 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
510 rxr->rx_paddr = paddrs[i];
515 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
516 adapter->num_rx_queues);
519 } /* ixgbe_if_rx_queues_alloc */
521 /************************************************************************
522 * ixgbe_if_queues_free
523 ************************************************************************/
525 ixgbe_if_queues_free(if_ctx_t ctx)
527 struct adapter *adapter = iflib_get_softc(ctx);
528 struct ix_tx_queue *tx_que = adapter->tx_queues;
529 struct ix_rx_queue *rx_que = adapter->rx_queues;
532 if (tx_que != NULL) {
533 for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
534 struct tx_ring *txr = &tx_que->txr;
535 if (txr->tx_rsq == NULL)
538 free(txr->tx_rsq, M_IXGBE);
542 free(adapter->tx_queues, M_IXGBE);
543 adapter->tx_queues = NULL;
545 if (rx_que != NULL) {
546 free(adapter->rx_queues, M_IXGBE);
547 adapter->rx_queues = NULL;
549 } /* ixgbe_if_queues_free */
551 /************************************************************************
552 * ixgbe_initialize_rss_mapping
553 ************************************************************************/
555 ixgbe_initialize_rss_mapping(struct adapter *adapter)
557 struct ixgbe_hw *hw = &adapter->hw;
558 u32 reta = 0, mrqc, rss_key[10];
559 int queue_id, table_size, index_mult;
563 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
564 /* Fetch the configured RSS key */
565 rss_getkey((uint8_t *)&rss_key);
567 /* set up random bits */
568 arc4rand(&rss_key, sizeof(rss_key), 0);
571 /* Set multiplier for RETA setup and table size based on MAC */
574 switch (adapter->hw.mac.type) {
575 case ixgbe_mac_82598EB:
579 case ixgbe_mac_X550EM_x:
580 case ixgbe_mac_X550EM_a:
587 /* Set up the redirection table */
588 for (i = 0, j = 0; i < table_size; i++, j++) {
589 if (j == adapter->num_rx_queues)
592 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
594 * Fetch the RSS bucket id for the given indirection
595 * entry. Cap it at the number of configured buckets
596 * (which is num_rx_queues.)
598 queue_id = rss_get_indirection_to_bucket(i);
599 queue_id = queue_id % adapter->num_rx_queues;
601 queue_id = (j * index_mult);
604 * The low 8 bits are for hash value (n+0);
605 * The next 8 bits are for hash value (n+1), etc.
608 reta = reta | (((uint32_t)queue_id) << 24);
611 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
613 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
619 /* Now fill our hash function seeds */
620 for (i = 0; i < 10; i++)
621 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
623 /* Perform hash on these packet types */
624 if (adapter->feat_en & IXGBE_FEATURE_RSS)
625 rss_hash_config = rss_gethashconfig();
628 * Disable UDP - IP fragments aren't currently being handled
629 * and so we end up with a mix of 2-tuple and 4-tuple
632 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
633 | RSS_HASHTYPE_RSS_TCP_IPV4
634 | RSS_HASHTYPE_RSS_IPV6
635 | RSS_HASHTYPE_RSS_TCP_IPV6
636 | RSS_HASHTYPE_RSS_IPV6_EX
637 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
640 mrqc = IXGBE_MRQC_RSSEN;
641 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
642 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
643 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
645 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
647 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
649 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
650 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
651 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
652 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
653 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
654 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
655 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
656 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
657 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
658 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
659 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
660 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
661 } /* ixgbe_initialize_rss_mapping */
663 /************************************************************************
664 * ixgbe_initialize_receive_units - Setup receive registers and features.
665 ************************************************************************/
666 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
669 ixgbe_initialize_receive_units(if_ctx_t ctx)
671 struct adapter *adapter = iflib_get_softc(ctx);
672 if_softc_ctx_t scctx = adapter->shared;
673 struct ixgbe_hw *hw = &adapter->hw;
674 struct ifnet *ifp = iflib_get_ifp(ctx);
675 struct ix_rx_queue *que;
677 u32 bufsz, fctrl, srrctl, rxcsum;
681 * Make sure receives are disabled while
682 * setting up the descriptor ring
684 ixgbe_disable_rx(hw);
686 /* Enable broadcasts */
687 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
688 fctrl |= IXGBE_FCTRL_BAM;
689 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
690 fctrl |= IXGBE_FCTRL_DPF;
691 fctrl |= IXGBE_FCTRL_PMCF;
693 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
695 /* Set for Jumbo Frames? */
696 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
697 if (ifp->if_mtu > ETHERMTU)
698 hlreg |= IXGBE_HLREG0_JUMBOEN;
700 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
701 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
703 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
704 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
706 /* Setup the Base and Length of the Rx Descriptor Ring */
707 for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
708 struct rx_ring *rxr = &que->rxr;
709 u64 rdba = rxr->rx_paddr;
713 /* Setup the Base and Length of the Rx Descriptor Ring */
714 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
715 (rdba & 0x00000000ffffffffULL));
716 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
717 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
718 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
720 /* Set up the SRRCTL register */
721 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
722 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
723 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
725 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
728 * Set DROP_EN iff we have no flow control and >1 queue.
729 * Note that srrctl was cleared shortly before during reset,
730 * so we do not need to clear the bit, but do it just in case
731 * this code is moved elsewhere.
733 if (adapter->num_rx_queues > 1 &&
734 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
735 srrctl |= IXGBE_SRRCTL_DROP_EN;
737 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
740 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
742 /* Setup the HW Rx Head and Tail Descriptor Pointers */
743 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
744 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
746 /* Set the driver rx tail address */
747 rxr->tail = IXGBE_RDT(rxr->me);
750 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
751 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
752 | IXGBE_PSRTYPE_UDPHDR
753 | IXGBE_PSRTYPE_IPV4HDR
754 | IXGBE_PSRTYPE_IPV6HDR;
755 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
758 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
760 ixgbe_initialize_rss_mapping(adapter);
762 if (adapter->num_rx_queues > 1) {
763 /* RSS and RX IPP Checksum are mutually exclusive */
764 rxcsum |= IXGBE_RXCSUM_PCSD;
767 if (ifp->if_capenable & IFCAP_RXCSUM)
768 rxcsum |= IXGBE_RXCSUM_PCSD;
770 /* This is useful for calculating UDP/IP fragment checksums */
771 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
772 rxcsum |= IXGBE_RXCSUM_IPPCSE;
774 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
776 } /* ixgbe_initialize_receive_units */
778 /************************************************************************
779 * ixgbe_initialize_transmit_units - Enable transmit units.
780 ************************************************************************/
782 ixgbe_initialize_transmit_units(if_ctx_t ctx)
784 struct adapter *adapter = iflib_get_softc(ctx);
785 struct ixgbe_hw *hw = &adapter->hw;
786 if_softc_ctx_t scctx = adapter->shared;
787 struct ix_tx_queue *que;
790 /* Setup the Base and Length of the Tx Descriptor Ring */
791 for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
793 struct tx_ring *txr = &que->txr;
794 u64 tdba = txr->tx_paddr;
798 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
799 (tdba & 0x00000000ffffffffULL));
800 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
801 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
802 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
804 /* Setup the HW Tx Head and Tail descriptor pointers */
805 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
806 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
808 /* Cache the tail address */
809 txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
810 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
811 txr->tx_rsq[k] = QIDX_INVALID;
813 /* Disable Head Writeback */
815 * Note: for X550 series devices, these registers are actually
816 * prefixed with TPH_ isntead of DCA_, but the addresses and
817 * fields remain the same.
819 switch (hw->mac.type) {
820 case ixgbe_mac_82598EB:
821 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
824 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
827 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
828 switch (hw->mac.type) {
829 case ixgbe_mac_82598EB:
830 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
833 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
839 if (hw->mac.type != ixgbe_mac_82598EB) {
840 u32 dmatxctl, rttdcs;
842 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
843 dmatxctl |= IXGBE_DMATXCTL_TE;
844 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
845 /* Disable arbiter to set MTQC */
846 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
847 rttdcs |= IXGBE_RTTDCS_ARBDIS;
848 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
849 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
850 ixgbe_get_mtqc(adapter->iov_mode));
851 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
852 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
855 } /* ixgbe_initialize_transmit_units */
857 /************************************************************************
859 ************************************************************************/
861 ixgbe_register(device_t dev)
864 } /* ixgbe_register */
866 /************************************************************************
867 * ixgbe_if_attach_pre - Device initialization routine, part 1
869 * Called when the driver is being loaded.
870 * Identifies the type of hardware, initializes the hardware,
871 * and initializes iflib structures.
873 * return 0 on success, positive on failure
874 ************************************************************************/
876 ixgbe_if_attach_pre(if_ctx_t ctx)
878 struct adapter *adapter;
880 if_softc_ctx_t scctx;
885 INIT_DEBUGOUT("ixgbe_attach: begin");
887 /* Allocate, clear, and link in our adapter structure */
888 dev = iflib_get_dev(ctx);
889 adapter = iflib_get_softc(ctx);
890 adapter->hw.back = adapter;
893 scctx = adapter->shared = iflib_get_softc_ctx(ctx);
894 adapter->media = iflib_get_media(ctx);
897 /* Determine hardware revision */
898 hw->vendor_id = pci_get_vendor(dev);
899 hw->device_id = pci_get_device(dev);
900 hw->revision_id = pci_get_revid(dev);
901 hw->subsystem_vendor_id = pci_get_subvendor(dev);
902 hw->subsystem_device_id = pci_get_subdevice(dev);
904 /* Do base PCI setup - map BAR0 */
905 if (ixgbe_allocate_pci_resources(ctx)) {
906 device_printf(dev, "Allocation of PCI resources failed\n");
910 /* let hardware know driver is loaded */
911 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
912 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
913 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
916 * Initialize the shared code
918 if (ixgbe_init_shared_code(hw) != 0) {
919 device_printf(dev, "Unable to initialize the shared code\n");
924 if (hw->mbx.ops.init_params)
925 hw->mbx.ops.init_params(hw);
927 hw->allow_unsupported_sfp = allow_unsupported_sfp;
929 if (hw->mac.type != ixgbe_mac_82598EB)
930 hw->phy.smart_speed = ixgbe_smart_speed;
932 ixgbe_init_device_features(adapter);
934 /* Enable WoL (if supported) */
935 ixgbe_check_wol_support(adapter);
937 /* Verify adapter fan is still functional (if applicable) */
938 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
939 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
940 ixgbe_check_fan_failure(adapter, esdp, FALSE);
943 /* Ensure SW/FW semaphore is free */
944 ixgbe_init_swfw_semaphore(hw);
946 /* Set an initial default flow control value */
947 hw->fc.requested_mode = ixgbe_flow_control;
949 hw->phy.reset_if_overtemp = TRUE;
950 error = ixgbe_reset_hw(hw);
951 hw->phy.reset_if_overtemp = FALSE;
952 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
954 * No optics in this port, set up
955 * so the timer routine will probe
956 * for later insertion.
958 adapter->sfp_probe = TRUE;
960 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
961 device_printf(dev, "Unsupported SFP+ module detected!\n");
965 device_printf(dev, "Hardware initialization failed\n");
970 /* Make sure we have a good EEPROM before we read from it */
971 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
972 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
977 error = ixgbe_start_hw(hw);
979 case IXGBE_ERR_EEPROM_VERSION:
980 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
982 case IXGBE_ERR_SFP_NOT_SUPPORTED:
983 device_printf(dev, "Unsupported SFP+ Module\n");
986 case IXGBE_ERR_SFP_NOT_PRESENT:
987 device_printf(dev, "No SFP+ Module found\n");
993 /* Most of the iflib initialization... */
995 iflib_set_mac(ctx, hw->mac.addr);
996 switch (adapter->hw.mac.type) {
998 case ixgbe_mac_X550EM_x:
999 case ixgbe_mac_X550EM_a:
1000 scctx->isc_rss_table_size = 512;
1001 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1004 scctx->isc_rss_table_size = 128;
1005 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1008 /* Allow legacy interrupts */
1009 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1011 scctx->isc_txqsizes[0] =
1012 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1013 sizeof(u32), DBA_ALIGN),
1014 scctx->isc_rxqsizes[0] =
1015 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1019 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1020 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1021 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1022 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1023 scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
1025 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1026 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1027 scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR);
1029 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1030 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1031 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1033 scctx->isc_txrx = &ixgbe_txrx;
1035 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1040 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1041 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1042 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1043 ixgbe_free_pci_resources(ctx);
1046 } /* ixgbe_if_attach_pre */
1048 /*********************************************************************
1049 * ixgbe_if_attach_post - Device initialization routine, part 2
1051 * Called during driver load, but after interrupts and
1052 * resources have been allocated and configured.
1053 * Sets up some data structures not relevant to iflib.
1055 * return 0 on success, positive on failure
1056 *********************************************************************/
1058 ixgbe_if_attach_post(if_ctx_t ctx)
1061 struct adapter *adapter;
1062 struct ixgbe_hw *hw;
1065 dev = iflib_get_dev(ctx);
1066 adapter = iflib_get_softc(ctx);
1070 if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1071 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1072 device_printf(dev, "Device does not support legacy interrupts");
1077 /* Allocate multicast array memory. */
1078 adapter->mta = malloc(sizeof(*adapter->mta) *
1079 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1080 if (adapter->mta == NULL) {
1081 device_printf(dev, "Can not allocate multicast setup array\n");
1086 /* hw.ix defaults init */
1087 ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1089 /* Enable the optics for 82599 SFP+ fiber */
1090 ixgbe_enable_tx_laser(hw);
1092 /* Enable power to the phy. */
1093 ixgbe_set_phy_power(hw, TRUE);
1095 ixgbe_initialize_iov(adapter);
1097 error = ixgbe_setup_interface(ctx);
1099 device_printf(dev, "Interface setup failed: %d\n", error);
1103 ixgbe_if_update_admin_status(ctx);
1105 /* Initialize statistics */
1106 ixgbe_update_stats_counters(adapter);
1107 ixgbe_add_hw_stats(adapter);
1109 /* Check PCIE slot type/speed/width */
1110 ixgbe_get_slot_info(adapter);
1113 * Do time init and sysctl init here, but
1114 * only on the first port of a bypass adapter.
1116 ixgbe_bypass_init(adapter);
1118 /* Set an initial dmac value */
1120 /* Set initial advertised speeds (if applicable) */
1121 adapter->advertise = ixgbe_get_advertise(adapter);
1123 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1124 ixgbe_define_iov_schemas(dev, &error);
1127 ixgbe_add_device_sysctls(ctx);
1132 } /* ixgbe_if_attach_post */
1134 /************************************************************************
1135 * ixgbe_check_wol_support
1137 * Checks whether the adapter's ports are capable of
1138 * Wake On LAN by reading the adapter's NVM.
1140 * Sets each port's hw->wol_enabled value depending
1141 * on the value read here.
1142 ************************************************************************/
1144 ixgbe_check_wol_support(struct adapter *adapter)
1146 struct ixgbe_hw *hw = &adapter->hw;
1149 /* Find out WoL support for port */
1150 adapter->wol_support = hw->wol_enabled = 0;
1151 ixgbe_get_device_caps(hw, &dev_caps);
1152 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1153 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1155 adapter->wol_support = hw->wol_enabled = 1;
1157 /* Save initial wake up filter configuration */
1158 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1161 } /* ixgbe_check_wol_support */
1163 /************************************************************************
1164 * ixgbe_setup_interface
1166 * Setup networking device structure and register an interface.
1167 ************************************************************************/
1169 ixgbe_setup_interface(if_ctx_t ctx)
1171 struct ifnet *ifp = iflib_get_ifp(ctx);
1172 struct adapter *adapter = iflib_get_softc(ctx);
1174 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1176 if_setbaudrate(ifp, IF_Gbps(10));
1178 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1180 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1182 ixgbe_add_media_types(ctx);
1184 /* Autoselect media by default */
1185 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1188 } /* ixgbe_setup_interface */
1190 /************************************************************************
1191 * ixgbe_if_get_counter
1192 ************************************************************************/
1194 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1196 struct adapter *adapter = iflib_get_softc(ctx);
1197 if_t ifp = iflib_get_ifp(ctx);
1200 case IFCOUNTER_IPACKETS:
1201 return (adapter->ipackets);
1202 case IFCOUNTER_OPACKETS:
1203 return (adapter->opackets);
1204 case IFCOUNTER_IBYTES:
1205 return (adapter->ibytes);
1206 case IFCOUNTER_OBYTES:
1207 return (adapter->obytes);
1208 case IFCOUNTER_IMCASTS:
1209 return (adapter->imcasts);
1210 case IFCOUNTER_OMCASTS:
1211 return (adapter->omcasts);
1212 case IFCOUNTER_COLLISIONS:
1214 case IFCOUNTER_IQDROPS:
1215 return (adapter->iqdrops);
1216 case IFCOUNTER_OQDROPS:
1218 case IFCOUNTER_IERRORS:
1219 return (adapter->ierrors);
1221 return (if_get_counter_default(ifp, cnt));
1223 } /* ixgbe_if_get_counter */
1225 /************************************************************************
1227 ************************************************************************/
1229 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1231 struct adapter *adapter = iflib_get_softc(ctx);
1232 struct ixgbe_hw *hw = &adapter->hw;
1236 if (hw->phy.ops.read_i2c_byte == NULL)
1238 for (i = 0; i < req->len; i++)
1239 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1240 req->dev_addr, &req->data[i]);
1242 } /* ixgbe_if_i2c_req */
1244 /************************************************************************
1245 * ixgbe_add_media_types
1246 ************************************************************************/
1248 ixgbe_add_media_types(if_ctx_t ctx)
1250 struct adapter *adapter = iflib_get_softc(ctx);
1251 struct ixgbe_hw *hw = &adapter->hw;
1252 device_t dev = iflib_get_dev(ctx);
1255 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1257 /* Media types with matching FreeBSD media defines */
1258 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1259 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1260 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1261 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1262 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1263 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1264 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1265 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1267 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1268 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1269 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1272 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1273 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1274 if (hw->phy.multispeed_fiber)
1275 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1278 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1279 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1280 if (hw->phy.multispeed_fiber)
1281 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1283 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1284 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1285 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1286 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1288 #ifdef IFM_ETH_XTYPE
1289 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1290 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1291 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1292 ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1293 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1294 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1295 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1296 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1298 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1299 device_printf(dev, "Media supported: 10GbaseKR\n");
1300 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1301 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1303 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1304 device_printf(dev, "Media supported: 10GbaseKX4\n");
1305 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1306 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1308 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1309 device_printf(dev, "Media supported: 1000baseKX\n");
1310 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1311 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1313 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1314 device_printf(dev, "Media supported: 2500baseKX\n");
1315 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1316 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1319 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1320 device_printf(dev, "Media supported: 1000baseBX\n");
1322 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1323 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1325 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1328 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1329 } /* ixgbe_add_media_types */
1331 /************************************************************************
1333 ************************************************************************/
1335 ixgbe_is_sfp(struct ixgbe_hw *hw)
1337 switch (hw->mac.type) {
1338 case ixgbe_mac_82598EB:
1339 if (hw->phy.type == ixgbe_phy_nl)
1342 case ixgbe_mac_82599EB:
1343 switch (hw->mac.ops.get_media_type(hw)) {
1344 case ixgbe_media_type_fiber:
1345 case ixgbe_media_type_fiber_qsfp:
1350 case ixgbe_mac_X550EM_x:
1351 case ixgbe_mac_X550EM_a:
1352 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1358 } /* ixgbe_is_sfp */
1360 /************************************************************************
1362 ************************************************************************/
1364 ixgbe_config_link(struct adapter *adapter)
1366 struct ixgbe_hw *hw = &adapter->hw;
1367 u32 autoneg, err = 0;
1368 bool sfp, negotiate;
1370 sfp = ixgbe_is_sfp(hw);
1373 GROUPTASK_ENQUEUE(&adapter->mod_task);
1375 if (hw->mac.ops.check_link)
1376 err = ixgbe_check_link(hw, &adapter->link_speed,
1377 &adapter->link_up, FALSE);
1380 autoneg = hw->phy.autoneg_advertised;
1381 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1382 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1386 if (hw->mac.ops.setup_link)
1387 err = hw->mac.ops.setup_link(hw, autoneg,
1391 } /* ixgbe_config_link */
1393 /************************************************************************
1394 * ixgbe_update_stats_counters - Update board statistics counters.
1395 ************************************************************************/
1397 ixgbe_update_stats_counters(struct adapter *adapter)
1399 struct ixgbe_hw *hw = &adapter->hw;
1400 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1401 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1402 u64 total_missed_rx = 0;
1404 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1405 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1406 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1407 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1408 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1410 for (int i = 0; i < 16; i++) {
1411 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1412 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1413 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1415 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1416 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1417 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1419 /* Hardware workaround, gprc counts missed packets */
1420 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1421 stats->gprc -= missed_rx;
1423 if (hw->mac.type != ixgbe_mac_82598EB) {
1424 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1425 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1426 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1427 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1428 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1429 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1430 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1431 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1433 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1434 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1435 /* 82598 only has a counter in the high register */
1436 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1437 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1438 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1442 * Workaround: mprc hardware is incorrectly counting
1443 * broadcasts, so for now we subtract those.
1445 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1446 stats->bprc += bprc;
1447 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1448 if (hw->mac.type == ixgbe_mac_82598EB)
1449 stats->mprc -= bprc;
1451 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1452 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1453 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1454 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1455 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1456 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1458 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1459 stats->lxontxc += lxon;
1460 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1461 stats->lxofftxc += lxoff;
1462 total = lxon + lxoff;
1464 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1465 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1466 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1467 stats->gptc -= total;
1468 stats->mptc -= total;
1469 stats->ptc64 -= total;
1470 stats->gotc -= total * ETHER_MIN_LEN;
1472 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1473 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1474 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1475 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1476 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1477 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1478 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1479 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1480 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1481 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1482 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1483 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1484 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1485 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1486 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1487 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1488 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1489 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1490 /* Only read FCOE on 82599 */
1491 if (hw->mac.type != ixgbe_mac_82598EB) {
1492 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1493 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1494 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1495 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1496 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1499 /* Fill out the OS statistics structure */
1500 IXGBE_SET_IPACKETS(adapter, stats->gprc);
1501 IXGBE_SET_OPACKETS(adapter, stats->gptc);
1502 IXGBE_SET_IBYTES(adapter, stats->gorc);
1503 IXGBE_SET_OBYTES(adapter, stats->gotc);
1504 IXGBE_SET_IMCASTS(adapter, stats->mprc);
1505 IXGBE_SET_OMCASTS(adapter, stats->mptc);
1506 IXGBE_SET_COLLISIONS(adapter, 0);
1507 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1508 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1509 } /* ixgbe_update_stats_counters */
1511 /************************************************************************
1512 * ixgbe_add_hw_stats
1514 * Add sysctl variables, one per statistic, to the system.
1515 ************************************************************************/
1517 ixgbe_add_hw_stats(struct adapter *adapter)
1519 device_t dev = iflib_get_dev(adapter->ctx);
1520 struct ix_rx_queue *rx_que;
1521 struct ix_tx_queue *tx_que;
1522 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1523 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1524 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1525 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1526 struct sysctl_oid *stat_node, *queue_node;
1527 struct sysctl_oid_list *stat_list, *queue_list;
1530 #define QUEUE_NAME_LEN 32
1531 char namebuf[QUEUE_NAME_LEN];
1533 /* Driver Statistics */
1534 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1535 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1536 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1537 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1538 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1539 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1541 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1542 struct tx_ring *txr = &tx_que->txr;
1543 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1544 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1545 CTLFLAG_RD, NULL, "Queue Name");
1546 queue_list = SYSCTL_CHILDREN(queue_node);
1548 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1549 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1550 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1551 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1552 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1553 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1554 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1555 CTLFLAG_RD, &txr->tso_tx, "TSO");
1556 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1557 CTLFLAG_RD, &txr->total_packets,
1558 "Queue Packets Transmitted");
1561 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1562 struct rx_ring *rxr = &rx_que->rxr;
1563 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1564 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1565 CTLFLAG_RD, NULL, "Queue Name");
1566 queue_list = SYSCTL_CHILDREN(queue_node);
1568 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1569 CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
1570 sizeof(&adapter->rx_queues[i]),
1571 ixgbe_sysctl_interrupt_rate_handler, "IU",
1573 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1574 CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1575 "irqs on this queue");
1576 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1577 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1578 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1579 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1580 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1581 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1582 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1583 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1584 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1585 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1586 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1587 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1588 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1589 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1592 /* MAC stats get their own sub node */
1594 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1595 CTLFLAG_RD, NULL, "MAC Statistics");
1596 stat_list = SYSCTL_CHILDREN(stat_node);
1598 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1599 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1600 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1601 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1602 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1603 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1604 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1605 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1606 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1607 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1608 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1609 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1610 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1611 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1613 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1615 /* Flow Control stats */
1616 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1617 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1618 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1619 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1621 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1622 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1623 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1625 /* Packet Reception Stats */
1626 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1627 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1628 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1629 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1631 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1633 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1634 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1635 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1636 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1637 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1639 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1640 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1641 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1642 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1643 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1645 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1647 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1648 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1649 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1651 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1653 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1654 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1655 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1656 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1657 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1658 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1659 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1660 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1661 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1663 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1665 /* Packet Transmission Stats */
1666 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1667 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1668 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1669 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1670 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1671 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1672 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1673 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1674 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1675 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1676 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1677 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1678 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1679 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1681 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1683 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1684 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1685 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1686 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1687 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1689 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1690 } /* ixgbe_add_hw_stats */
1692 /************************************************************************
1693 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1695 * Retrieves the TDH value from the hardware
1696 ************************************************************************/
1698 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1700 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1707 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1708 error = sysctl_handle_int(oidp, &val, 0, req);
1709 if (error || !req->newptr)
1713 } /* ixgbe_sysctl_tdh_handler */
1715 /************************************************************************
1716 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1718 * Retrieves the TDT value from the hardware
1719 ************************************************************************/
1721 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1723 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1730 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1731 error = sysctl_handle_int(oidp, &val, 0, req);
1732 if (error || !req->newptr)
1736 } /* ixgbe_sysctl_tdt_handler */
1738 /************************************************************************
1739 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1741 * Retrieves the RDH value from the hardware
1742 ************************************************************************/
1744 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1746 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1753 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1754 error = sysctl_handle_int(oidp, &val, 0, req);
1755 if (error || !req->newptr)
1759 } /* ixgbe_sysctl_rdh_handler */
1761 /************************************************************************
1762 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1764 * Retrieves the RDT value from the hardware
1765 ************************************************************************/
1767 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1769 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1776 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1777 error = sysctl_handle_int(oidp, &val, 0, req);
1778 if (error || !req->newptr)
1782 } /* ixgbe_sysctl_rdt_handler */
1784 /************************************************************************
1785 * ixgbe_if_vlan_register
1787 * Run via vlan config EVENT, it enables us to use the
1788 * HW Filter table since we can get the vlan id. This
1789 * just creates the entry in the soft version of the
1790 * VFTA, init will repopulate the real table.
1791 ************************************************************************/
1793 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1795 struct adapter *adapter = iflib_get_softc(ctx);
1798 index = (vtag >> 5) & 0x7F;
1800 adapter->shadow_vfta[index] |= (1 << bit);
1801 ++adapter->num_vlans;
1802 ixgbe_setup_vlan_hw_support(ctx);
1803 } /* ixgbe_if_vlan_register */
1805 /************************************************************************
1806 * ixgbe_if_vlan_unregister
1808 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1809 ************************************************************************/
1811 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1813 struct adapter *adapter = iflib_get_softc(ctx);
1816 index = (vtag >> 5) & 0x7F;
1818 adapter->shadow_vfta[index] &= ~(1 << bit);
1819 --adapter->num_vlans;
1820 /* Re-init to load the changes */
1821 ixgbe_setup_vlan_hw_support(ctx);
1822 } /* ixgbe_if_vlan_unregister */
1824 /************************************************************************
1825 * ixgbe_setup_vlan_hw_support
1826 ************************************************************************/
1828 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1830 struct ifnet *ifp = iflib_get_ifp(ctx);
1831 struct adapter *adapter = iflib_get_softc(ctx);
1832 struct ixgbe_hw *hw = &adapter->hw;
1833 struct rx_ring *rxr;
1839 * We get here thru init_locked, meaning
1840 * a soft reset, this has already cleared
1841 * the VFTA and other state, so if there
1842 * have been no vlan's registered do nothing.
1844 if (adapter->num_vlans == 0)
1847 /* Setup the queues for vlans */
1848 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1849 for (i = 0; i < adapter->num_rx_queues; i++) {
1850 rxr = &adapter->rx_queues[i].rxr;
1851 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1852 if (hw->mac.type != ixgbe_mac_82598EB) {
1853 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1854 ctrl |= IXGBE_RXDCTL_VME;
1855 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1857 rxr->vtag_strip = TRUE;
1861 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1864 * A soft reset zero's out the VFTA, so
1865 * we need to repopulate it now.
1867 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1868 if (adapter->shadow_vfta[i] != 0)
1869 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1870 adapter->shadow_vfta[i]);
1872 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1873 /* Enable the Filter Table if enabled */
1874 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1875 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1876 ctrl |= IXGBE_VLNCTRL_VFE;
1878 if (hw->mac.type == ixgbe_mac_82598EB)
1879 ctrl |= IXGBE_VLNCTRL_VME;
1880 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1881 } /* ixgbe_setup_vlan_hw_support */
1883 /************************************************************************
1884 * ixgbe_get_slot_info
1886 * Get the width and transaction speed of
1887 * the slot this adapter is plugged into.
1888 ************************************************************************/
1890 ixgbe_get_slot_info(struct adapter *adapter)
1892 device_t dev = iflib_get_dev(adapter->ctx);
1893 struct ixgbe_hw *hw = &adapter->hw;
1894 int bus_info_valid = TRUE;
1898 /* Some devices are behind an internal bridge */
1899 switch (hw->device_id) {
1900 case IXGBE_DEV_ID_82599_SFP_SF_QP:
1901 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1902 goto get_parent_info;
1907 ixgbe_get_bus_info(hw);
1910 * Some devices don't use PCI-E, but there is no need
1911 * to display "Unknown" for bus speed and width.
1913 switch (hw->mac.type) {
1914 case ixgbe_mac_X550EM_x:
1915 case ixgbe_mac_X550EM_a:
1923 * For the Quad port adapter we need to parse back
1924 * up the PCI tree to find the speed of the expansion
1925 * slot into which this adapter is plugged. A bit more work.
1927 dev = device_get_parent(device_get_parent(dev));
1929 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1930 pci_get_slot(dev), pci_get_function(dev));
1932 dev = device_get_parent(device_get_parent(dev));
1934 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1935 pci_get_slot(dev), pci_get_function(dev));
1937 /* Now get the PCI Express Capabilities offset */
1938 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1940 * Hmm...can't get PCI-Express capabilities.
1941 * Falling back to default method.
1943 bus_info_valid = FALSE;
1944 ixgbe_get_bus_info(hw);
1947 /* ...and read the Link Status Register */
1948 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1949 ixgbe_set_pci_config_data_generic(hw, link);
1952 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1953 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
1954 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
1955 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
1957 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1958 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1959 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1962 if (bus_info_valid) {
1963 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1964 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1965 (hw->bus.speed == ixgbe_bus_speed_2500))) {
1966 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1967 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1969 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1970 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1971 (hw->bus.speed < ixgbe_bus_speed_8000))) {
1972 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1973 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1976 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1979 } /* ixgbe_get_slot_info */
1981 /************************************************************************
1982 * ixgbe_if_msix_intr_assign
1984 * Setup MSI-X Interrupt resources and handlers
1985 ************************************************************************/
1987 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1989 struct adapter *adapter = iflib_get_softc(ctx);
1990 struct ix_rx_queue *rx_que = adapter->rx_queues;
1991 struct ix_tx_queue *tx_que;
1992 int error, rid, vector = 0;
1996 /* Admin Que is vector 0*/
1998 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2001 snprintf(buf, sizeof(buf), "rxq%d", i);
2002 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2003 IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2006 device_printf(iflib_get_dev(ctx),
2007 "Failed to allocate que int %d err: %d", i, error);
2008 adapter->num_rx_queues = i + 1;
2012 rx_que->msix = vector;
2013 adapter->active_queues |= (u64)(1 << rx_que->msix);
2014 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2016 * The queue ID is used as the RSS layer bucket ID.
2017 * We look up the queue ID -> RSS CPU ID and select
2020 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2023 * Bind the msix vector, and thus the
2024 * rings to the corresponding cpu.
2026 * This just happens to match the default RSS
2027 * round-robin bucket -> queue -> CPU allocation.
2029 if (adapter->num_rx_queues > 1)
2034 for (int i = 0; i < adapter->num_tx_queues; i++) {
2035 snprintf(buf, sizeof(buf), "txq%d", i);
2036 tx_que = &adapter->tx_queues[i];
2037 tx_que->msix = i % adapter->num_rx_queues;
2038 iflib_softirq_alloc_generic(ctx,
2039 &adapter->rx_queues[tx_que->msix].que_irq,
2040 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2043 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2044 IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2046 device_printf(iflib_get_dev(ctx),
2047 "Failed to register admin handler");
2051 adapter->vector = vector;
2055 iflib_irq_free(ctx, &adapter->irq);
2056 rx_que = adapter->rx_queues;
2057 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2058 iflib_irq_free(ctx, &rx_que->que_irq);
2061 } /* ixgbe_if_msix_intr_assign */
2063 /*********************************************************************
2064 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2065 **********************************************************************/
2067 ixgbe_msix_que(void *arg)
2069 struct ix_rx_queue *que = arg;
2070 struct adapter *adapter = que->adapter;
2071 struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx);
2073 /* Protect against spurious interrupts */
2074 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2077 ixgbe_disable_queue(adapter, que->msix);
2080 return (FILTER_SCHEDULE_THREAD);
2081 } /* ixgbe_msix_que */
2083 /************************************************************************
2084 * ixgbe_media_status - Media Ioctl callback
2086 * Called whenever the user queries the status of
2087 * the interface using ifconfig.
2088 ************************************************************************/
2090 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2092 struct adapter *adapter = iflib_get_softc(ctx);
2093 struct ixgbe_hw *hw = &adapter->hw;
2096 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2098 iflib_admin_intr_deferred(ctx);
2100 ifmr->ifm_status = IFM_AVALID;
2101 ifmr->ifm_active = IFM_ETHER;
2103 if (!adapter->link_active)
2106 ifmr->ifm_status |= IFM_ACTIVE;
2107 layer = adapter->phy_layer;
2109 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2110 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2111 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2112 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2113 switch (adapter->link_speed) {
2114 case IXGBE_LINK_SPEED_10GB_FULL:
2115 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2117 case IXGBE_LINK_SPEED_1GB_FULL:
2118 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2120 case IXGBE_LINK_SPEED_100_FULL:
2121 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2123 case IXGBE_LINK_SPEED_10_FULL:
2124 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2127 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2128 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2129 switch (adapter->link_speed) {
2130 case IXGBE_LINK_SPEED_10GB_FULL:
2131 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2134 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2135 switch (adapter->link_speed) {
2136 case IXGBE_LINK_SPEED_10GB_FULL:
2137 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2139 case IXGBE_LINK_SPEED_1GB_FULL:
2140 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2143 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2144 switch (adapter->link_speed) {
2145 case IXGBE_LINK_SPEED_10GB_FULL:
2146 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2148 case IXGBE_LINK_SPEED_1GB_FULL:
2149 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2152 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2153 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2154 switch (adapter->link_speed) {
2155 case IXGBE_LINK_SPEED_10GB_FULL:
2156 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2158 case IXGBE_LINK_SPEED_1GB_FULL:
2159 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2162 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2163 switch (adapter->link_speed) {
2164 case IXGBE_LINK_SPEED_10GB_FULL:
2165 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2169 * XXX: These need to use the proper media types once
2172 #ifndef IFM_ETH_XTYPE
2173 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2174 switch (adapter->link_speed) {
2175 case IXGBE_LINK_SPEED_10GB_FULL:
2176 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2178 case IXGBE_LINK_SPEED_2_5GB_FULL:
2179 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2181 case IXGBE_LINK_SPEED_1GB_FULL:
2182 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2185 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2186 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2187 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2188 switch (adapter->link_speed) {
2189 case IXGBE_LINK_SPEED_10GB_FULL:
2190 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2192 case IXGBE_LINK_SPEED_2_5GB_FULL:
2193 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2195 case IXGBE_LINK_SPEED_1GB_FULL:
2196 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2200 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2201 switch (adapter->link_speed) {
2202 case IXGBE_LINK_SPEED_10GB_FULL:
2203 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2205 case IXGBE_LINK_SPEED_2_5GB_FULL:
2206 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2208 case IXGBE_LINK_SPEED_1GB_FULL:
2209 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2212 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2213 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2214 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2215 switch (adapter->link_speed) {
2216 case IXGBE_LINK_SPEED_10GB_FULL:
2217 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2219 case IXGBE_LINK_SPEED_2_5GB_FULL:
2220 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2222 case IXGBE_LINK_SPEED_1GB_FULL:
2223 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2228 /* If nothing is recognized... */
2229 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2230 ifmr->ifm_active |= IFM_UNKNOWN;
2232 /* Display current flow control setting used on link */
2233 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2234 hw->fc.current_mode == ixgbe_fc_full)
2235 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2236 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2237 hw->fc.current_mode == ixgbe_fc_full)
2238 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2239 } /* ixgbe_media_status */
2241 /************************************************************************
2242 * ixgbe_media_change - Media Ioctl callback
2244 * Called when the user changes speed/duplex using
2245 * media/mediopt option with ifconfig.
2246 ************************************************************************/
2248 ixgbe_if_media_change(if_ctx_t ctx)
2250 struct adapter *adapter = iflib_get_softc(ctx);
2251 struct ifmedia *ifm = iflib_get_media(ctx);
2252 struct ixgbe_hw *hw = &adapter->hw;
2253 ixgbe_link_speed speed = 0;
2255 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2257 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2260 if (hw->phy.media_type == ixgbe_media_type_backplane)
2264 * We don't actually need to check against the supported
2265 * media types of the adapter; ifmedia will take care of
2268 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2271 speed |= IXGBE_LINK_SPEED_100_FULL;
2272 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2273 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2277 #ifndef IFM_ETH_XTYPE
2278 case IFM_10G_SR: /* KR, too */
2279 case IFM_10G_CX4: /* KX4 */
2284 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2285 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2287 #ifndef IFM_ETH_XTYPE
2288 case IFM_1000_CX: /* KX */
2294 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2297 speed |= IXGBE_LINK_SPEED_100_FULL;
2298 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2300 case IFM_10G_TWINAX:
2301 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2304 speed |= IXGBE_LINK_SPEED_100_FULL;
2307 speed |= IXGBE_LINK_SPEED_10_FULL;
2313 hw->mac.autotry_restart = TRUE;
2314 hw->mac.ops.setup_link(hw, speed, TRUE);
2315 adapter->advertise =
2316 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2317 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
2318 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
2319 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
2324 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2327 } /* ixgbe_if_media_change */
2329 /************************************************************************
2331 ************************************************************************/
2333 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2335 struct adapter *adapter = iflib_get_softc(ctx);
2336 struct ifnet *ifp = iflib_get_ifp(ctx);
2340 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2341 rctl &= (~IXGBE_FCTRL_UPE);
2342 if (ifp->if_flags & IFF_ALLMULTI)
2343 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2345 mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2347 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2348 rctl &= (~IXGBE_FCTRL_MPE);
2349 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2351 if (ifp->if_flags & IFF_PROMISC) {
2352 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2353 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2354 } else if (ifp->if_flags & IFF_ALLMULTI) {
2355 rctl |= IXGBE_FCTRL_MPE;
2356 rctl &= ~IXGBE_FCTRL_UPE;
2357 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2360 } /* ixgbe_if_promisc_set */
2362 /************************************************************************
2363 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2364 ************************************************************************/
2366 ixgbe_msix_link(void *arg)
2368 struct adapter *adapter = arg;
2369 struct ixgbe_hw *hw = &adapter->hw;
2370 u32 eicr, eicr_mask;
2373 ++adapter->link_irq;
2375 /* Pause other interrupts */
2376 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2378 /* First get the cause */
2379 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2380 /* Be sure the queue bits are not cleared */
2381 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2382 /* Clear interrupt with write */
2383 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2385 /* Link status change */
2386 if (eicr & IXGBE_EICR_LSC) {
2387 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2388 iflib_admin_intr_deferred(adapter->ctx);
2391 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2392 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2393 (eicr & IXGBE_EICR_FLOW_DIR)) {
2394 /* This is probably overkill :) */
2395 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2396 return (FILTER_HANDLED);
2397 /* Disable the interrupt */
2398 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2399 GROUPTASK_ENQUEUE(&adapter->fdir_task);
2401 if (eicr & IXGBE_EICR_ECC) {
2402 device_printf(iflib_get_dev(adapter->ctx),
2403 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2404 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2407 /* Check for over temp condition */
2408 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2409 switch (adapter->hw.mac.type) {
2410 case ixgbe_mac_X550EM_a:
2411 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2413 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2414 IXGBE_EICR_GPI_SDP0_X550EM_a);
2415 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2416 IXGBE_EICR_GPI_SDP0_X550EM_a);
2417 retval = hw->phy.ops.check_overtemp(hw);
2418 if (retval != IXGBE_ERR_OVERTEMP)
2420 device_printf(iflib_get_dev(adapter->ctx),
2421 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2422 device_printf(iflib_get_dev(adapter->ctx),
2423 "System shutdown required!\n");
2426 if (!(eicr & IXGBE_EICR_TS))
2428 retval = hw->phy.ops.check_overtemp(hw);
2429 if (retval != IXGBE_ERR_OVERTEMP)
2431 device_printf(iflib_get_dev(adapter->ctx),
2432 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2433 device_printf(iflib_get_dev(adapter->ctx),
2434 "System shutdown required!\n");
2435 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2440 /* Check for VF message */
2441 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2442 (eicr & IXGBE_EICR_MAILBOX))
2443 GROUPTASK_ENQUEUE(&adapter->mbx_task);
2446 if (ixgbe_is_sfp(hw)) {
2447 /* Pluggable optics-related interrupt */
2448 if (hw->mac.type >= ixgbe_mac_X540)
2449 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2451 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2453 if (eicr & eicr_mask) {
2454 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2455 if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2456 GROUPTASK_ENQUEUE(&adapter->mod_task);
2459 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2460 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2461 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2462 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2463 if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2464 GROUPTASK_ENQUEUE(&adapter->msf_task);
2468 /* Check for fan failure */
2469 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2470 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2471 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2474 /* External PHY interrupt */
2475 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2476 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2477 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2478 GROUPTASK_ENQUEUE(&adapter->phy_task);
2481 /* Re-enable other interrupts */
2482 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2484 return (FILTER_HANDLED);
2485 } /* ixgbe_msix_link */
2487 /************************************************************************
2488 * ixgbe_sysctl_interrupt_rate_handler
2489 ************************************************************************/
2491 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2493 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2495 unsigned int reg, usec, rate;
2497 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2498 usec = ((reg & 0x0FF8) >> 3);
2500 rate = 500000 / usec;
2503 error = sysctl_handle_int(oidp, &rate, 0, req);
2504 if (error || !req->newptr)
2506 reg &= ~0xfff; /* default, no limitation */
2507 ixgbe_max_interrupt_rate = 0;
2508 if (rate > 0 && rate < 500000) {
2511 ixgbe_max_interrupt_rate = rate;
2512 reg |= ((4000000/rate) & 0xff8);
2514 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2517 } /* ixgbe_sysctl_interrupt_rate_handler */
2519 /************************************************************************
2520 * ixgbe_add_device_sysctls
2521 ************************************************************************/
2523 ixgbe_add_device_sysctls(if_ctx_t ctx)
2525 struct adapter *adapter = iflib_get_softc(ctx);
2526 device_t dev = iflib_get_dev(ctx);
2527 struct ixgbe_hw *hw = &adapter->hw;
2528 struct sysctl_oid_list *child;
2529 struct sysctl_ctx_list *ctx_list;
2531 ctx_list = device_get_sysctl_ctx(dev);
2532 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2534 /* Sysctls for all devices */
2535 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2536 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2537 IXGBE_SYSCTL_DESC_SET_FC);
2539 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2540 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2541 IXGBE_SYSCTL_DESC_ADV_SPEED);
2544 /* testing sysctls (for all devices) */
2545 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2546 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2547 "I", "PCI Power State");
2549 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2550 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2551 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2553 /* for X550 series devices */
2554 if (hw->mac.type >= ixgbe_mac_X550)
2555 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2556 CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2557 "I", "DMA Coalesce");
2559 /* for WoL-capable devices */
2560 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2561 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2562 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2563 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2565 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2566 CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2567 "I", "Enable/Disable Wake Up Filters");
2570 /* for X552/X557-AT devices */
2571 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2572 struct sysctl_oid *phy_node;
2573 struct sysctl_oid_list *phy_list;
2575 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2576 CTLFLAG_RD, NULL, "External PHY sysctls");
2577 phy_list = SYSCTL_CHILDREN(phy_node);
2579 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2580 CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2581 "I", "Current External PHY Temperature (Celsius)");
2583 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2584 "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2585 ixgbe_sysctl_phy_overtemp_occurred, "I",
2586 "External PHY High Temperature Event Occurred");
2589 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2590 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2591 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2592 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2594 } /* ixgbe_add_device_sysctls */
2596 /************************************************************************
2597 * ixgbe_allocate_pci_resources
2598 ************************************************************************/
2600 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2602 struct adapter *adapter = iflib_get_softc(ctx);
2603 device_t dev = iflib_get_dev(ctx);
2607 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2610 if (!(adapter->pci_mem)) {
2611 device_printf(dev, "Unable to allocate bus resource: memory\n");
2615 /* Save bus_space values for READ/WRITE_REG macros */
2616 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2617 adapter->osdep.mem_bus_space_handle =
2618 rman_get_bushandle(adapter->pci_mem);
2619 /* Set hw values for shared code */
2620 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2623 } /* ixgbe_allocate_pci_resources */
2625 /************************************************************************
2626 * ixgbe_detach - Device removal routine
2628 * Called when the driver is being removed.
2629 * Stops the adapter and deallocates all the resources
2630 * that were allocated for driver operation.
2632 * return 0 on success, positive on failure
2633 ************************************************************************/
2635 ixgbe_if_detach(if_ctx_t ctx)
2637 struct adapter *adapter = iflib_get_softc(ctx);
2638 device_t dev = iflib_get_dev(ctx);
2641 INIT_DEBUGOUT("ixgbe_detach: begin");
2643 if (ixgbe_pci_iov_detach(dev) != 0) {
2644 device_printf(dev, "SR-IOV in use; detach first.\n");
2648 iflib_config_gtask_deinit(&adapter->mod_task);
2649 iflib_config_gtask_deinit(&adapter->msf_task);
2650 iflib_config_gtask_deinit(&adapter->phy_task);
2651 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2652 iflib_config_gtask_deinit(&adapter->mbx_task);
2654 ixgbe_setup_low_power_mode(ctx);
2656 /* let hardware know driver is unloading */
2657 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2658 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2659 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2661 ixgbe_free_pci_resources(ctx);
2662 free(adapter->mta, M_IXGBE);
2665 } /* ixgbe_if_detach */
2667 /************************************************************************
2668 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2670 * Prepare the adapter/port for LPLU and/or WoL
2671 ************************************************************************/
2673 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2675 struct adapter *adapter = iflib_get_softc(ctx);
2676 struct ixgbe_hw *hw = &adapter->hw;
2677 device_t dev = iflib_get_dev(ctx);
2680 if (!hw->wol_enabled)
2681 ixgbe_set_phy_power(hw, FALSE);
2683 /* Limit power management flow to X550EM baseT */
2684 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2685 hw->phy.ops.enter_lplu) {
2686 /* Turn off support for APM wakeup. (Using ACPI instead) */
2687 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2688 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2691 * Clear Wake Up Status register to prevent any previous wakeup
2692 * events from waking us up immediately after we suspend.
2694 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2697 * Program the Wakeup Filter Control register with user filter
2700 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2702 /* Enable wakeups and power management in Wakeup Control */
2703 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2704 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2706 /* X550EM baseT adapters need a special LPLU flow */
2707 hw->phy.reset_disable = TRUE;
2709 error = hw->phy.ops.enter_lplu(hw);
2711 device_printf(dev, "Error entering LPLU: %d\n", error);
2712 hw->phy.reset_disable = FALSE;
2714 /* Just stop for other adapters */
2719 } /* ixgbe_setup_low_power_mode */
2721 /************************************************************************
2722 * ixgbe_shutdown - Shutdown entry point
2723 ************************************************************************/
2725 ixgbe_if_shutdown(if_ctx_t ctx)
2729 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2731 error = ixgbe_setup_low_power_mode(ctx);
2734 } /* ixgbe_if_shutdown */
2736 /************************************************************************
2740 ************************************************************************/
2742 ixgbe_if_suspend(if_ctx_t ctx)
2746 INIT_DEBUGOUT("ixgbe_suspend: begin");
2748 error = ixgbe_setup_low_power_mode(ctx);
2751 } /* ixgbe_if_suspend */
2753 /************************************************************************
2757 ************************************************************************/
2759 ixgbe_if_resume(if_ctx_t ctx)
2761 struct adapter *adapter = iflib_get_softc(ctx);
2762 device_t dev = iflib_get_dev(ctx);
2763 struct ifnet *ifp = iflib_get_ifp(ctx);
2764 struct ixgbe_hw *hw = &adapter->hw;
2767 INIT_DEBUGOUT("ixgbe_resume: begin");
2769 /* Read & clear WUS register */
2770 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2772 device_printf(dev, "Woken up by (WUS): %#010x\n",
2773 IXGBE_READ_REG(hw, IXGBE_WUS));
2774 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2775 /* And clear WUFC until next low-power transition */
2776 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2779 * Required after D3->D0 transition;
2780 * will re-advertise all previous advertised speeds
2782 if (ifp->if_flags & IFF_UP)
2786 } /* ixgbe_if_resume */
2788 /************************************************************************
2789 * ixgbe_if_mtu_set - Ioctl mtu entry point
2791 * Return 0 on success, EINVAL on failure
2792 ************************************************************************/
2794 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2796 struct adapter *adapter = iflib_get_softc(ctx);
2799 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2801 if (mtu > IXGBE_MAX_MTU) {
2804 adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2808 } /* ixgbe_if_mtu_set */
2810 /************************************************************************
2811 * ixgbe_if_crcstrip_set
2812 ************************************************************************/
2814 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2816 struct adapter *sc = iflib_get_softc(ctx);
2817 struct ixgbe_hw *hw = &sc->hw;
2818 /* crc stripping is set in two places:
2819 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2820 * IXGBE_RDRXCTL (set by the original driver in
2821 * ixgbe_setup_hw_rsc() called in init_locked.
2822 * We disable the setting when netmap is compiled in).
2823 * We update the values here, but also in ixgbe.c because
2824 * init_locked sometimes is called outside our control.
2828 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2829 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2832 D("%s read HLREG 0x%x rxc 0x%x",
2833 onoff ? "enter" : "exit", hl, rxc);
2835 /* hw requirements ... */
2836 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2837 rxc |= IXGBE_RDRXCTL_RSCACKC;
2838 if (onoff && !crcstrip) {
2839 /* keep the crc. Fast rx */
2840 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2841 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2843 /* reset default mode */
2844 hl |= IXGBE_HLREG0_RXCRCSTRP;
2845 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2849 D("%s write HLREG 0x%x rxc 0x%x",
2850 onoff ? "enter" : "exit", hl, rxc);
2852 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2853 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2854 } /* ixgbe_if_crcstrip_set */
2856 /*********************************************************************
2857 * ixgbe_if_init - Init entry point
2859 * Used in two ways: It is used by the stack as an init
2860 * entry point in network interface structure. It is also
2861 * used by the driver as a hw/sw initialization routine to
2862 * get to a consistent state.
2864 * Return 0 on success, positive on failure
2865 **********************************************************************/
2867 ixgbe_if_init(if_ctx_t ctx)
2869 struct adapter *adapter = iflib_get_softc(ctx);
2870 struct ifnet *ifp = iflib_get_ifp(ctx);
2871 device_t dev = iflib_get_dev(ctx);
2872 struct ixgbe_hw *hw = &adapter->hw;
2873 struct ix_rx_queue *rx_que;
2874 struct ix_tx_queue *tx_que;
2881 INIT_DEBUGOUT("ixgbe_if_init: begin");
2883 /* Queue indices may change with IOV mode */
2884 ixgbe_align_all_queue_indices(adapter);
2886 /* reprogram the RAR[0] in case user changed it. */
2887 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2889 /* Get the latest mac address, User can use a LAA */
2890 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2891 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2892 hw->addr_ctrl.rar_used_count = 1;
2896 ixgbe_initialize_iov(adapter);
2898 ixgbe_initialize_transmit_units(ctx);
2900 /* Setup Multicast table */
2901 ixgbe_if_multi_set(ctx);
2903 /* Determine the correct mbuf pool, based on frame size */
2904 if (adapter->max_frame_size <= MCLBYTES)
2905 adapter->rx_mbuf_sz = MCLBYTES;
2907 adapter->rx_mbuf_sz = MJUMPAGESIZE;
2909 /* Configure RX settings */
2910 ixgbe_initialize_receive_units(ctx);
2912 /* Enable SDP & MSI-X interrupts based on adapter */
2913 ixgbe_config_gpie(adapter);
2916 if (ifp->if_mtu > ETHERMTU) {
2917 /* aka IXGBE_MAXFRS on 82599 and newer */
2918 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2919 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2920 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2921 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2924 /* Now enable all the queues */
2925 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2926 struct tx_ring *txr = &tx_que->txr;
2928 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2929 txdctl |= IXGBE_TXDCTL_ENABLE;
2930 /* Set WTHRESH to 8, burst writeback */
2931 txdctl |= (8 << 16);
2933 * When the internal queue falls below PTHRESH (32),
2934 * start prefetching as long as there are at least
2935 * HTHRESH (1) buffers ready. The values are taken
2936 * from the Intel linux driver 3.8.21.
2937 * Prefetching enables tx line rate even with 1 queue.
2939 txdctl |= (32 << 0) | (1 << 8);
2940 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2943 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2944 struct rx_ring *rxr = &rx_que->rxr;
2946 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2947 if (hw->mac.type == ixgbe_mac_82598EB) {
2953 rxdctl &= ~0x3FFFFF;
2956 rxdctl |= IXGBE_RXDCTL_ENABLE;
2957 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2958 for (j = 0; j < 10; j++) {
2959 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2960 IXGBE_RXDCTL_ENABLE)
2968 /* Enable Receive engine */
2969 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2970 if (hw->mac.type == ixgbe_mac_82598EB)
2971 rxctrl |= IXGBE_RXCTRL_DMBYPS;
2972 rxctrl |= IXGBE_RXCTRL_RXEN;
2973 ixgbe_enable_rx_dma(hw, rxctrl);
2975 /* Set up MSI/MSI-X routing */
2976 if (ixgbe_enable_msix) {
2977 ixgbe_configure_ivars(adapter);
2978 /* Set up auto-mask */
2979 if (hw->mac.type == ixgbe_mac_82598EB)
2980 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2982 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2983 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2985 } else { /* Simple settings for Legacy/MSI */
2986 ixgbe_set_ivar(adapter, 0, 0, 0);
2987 ixgbe_set_ivar(adapter, 0, 0, 1);
2988 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2991 ixgbe_init_fdir(adapter);
2994 * Check on any SFP devices that
2995 * need to be kick-started
2997 if (hw->phy.type == ixgbe_phy_none) {
2998 err = hw->phy.ops.identify(hw);
2999 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3001 "Unsupported SFP+ module type was detected.\n");
3006 /* Set moderation on the Link interrupt */
3007 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3009 /* Enable power to the phy. */
3010 ixgbe_set_phy_power(hw, TRUE);
3012 /* Config/Enable Link */
3013 ixgbe_config_link(adapter);
3015 /* Hardware Packet Buffer & Flow Control setup */
3016 ixgbe_config_delay_values(adapter);
3018 /* Initialize the FC settings */
3021 /* Set up VLAN support and filter */
3022 ixgbe_setup_vlan_hw_support(ctx);
3024 /* Setup DMA Coalescing */
3025 ixgbe_config_dmac(adapter);
3027 /* And now turn on interrupts */
3028 ixgbe_if_enable_intr(ctx);
3030 /* Enable the use of the MBX by the VF's */
3031 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3032 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3033 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3034 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3037 } /* ixgbe_init_locked */
3039 /************************************************************************
3042 * Setup the correct IVAR register for a particular MSI-X interrupt
3043 * (yes this is all very magic and confusing :)
3044 * - entry is the register array entry
3045 * - vector is the MSI-X vector for this queue
3046 * - type is RX/TX/MISC
3047 ************************************************************************/
3049 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3051 struct ixgbe_hw *hw = &adapter->hw;
3054 vector |= IXGBE_IVAR_ALLOC_VAL;
3056 switch (hw->mac.type) {
3057 case ixgbe_mac_82598EB:
3059 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3061 entry += (type * 64);
3062 index = (entry >> 2) & 0x1F;
3063 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3064 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3065 ivar |= (vector << (8 * (entry & 0x3)));
3066 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3068 case ixgbe_mac_82599EB:
3069 case ixgbe_mac_X540:
3070 case ixgbe_mac_X550:
3071 case ixgbe_mac_X550EM_x:
3072 case ixgbe_mac_X550EM_a:
3073 if (type == -1) { /* MISC IVAR */
3074 index = (entry & 1) * 8;
3075 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3076 ivar &= ~(0xFF << index);
3077 ivar |= (vector << index);
3078 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3079 } else { /* RX/TX IVARS */
3080 index = (16 * (entry & 1)) + (8 * type);
3081 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3082 ivar &= ~(0xFF << index);
3083 ivar |= (vector << index);
3084 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3089 } /* ixgbe_set_ivar */
3091 /************************************************************************
3092 * ixgbe_configure_ivars
3093 ************************************************************************/
3095 ixgbe_configure_ivars(struct adapter *adapter)
3097 struct ix_rx_queue *rx_que = adapter->rx_queues;
3098 struct ix_tx_queue *tx_que = adapter->tx_queues;
3101 if (ixgbe_max_interrupt_rate > 0)
3102 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3105 * Disable DMA coalescing if interrupt moderation is
3112 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3113 struct rx_ring *rxr = &rx_que->rxr;
3115 /* First the RX queue entry */
3116 ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3118 /* Set an Initial EITR value */
3119 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3121 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3122 struct tx_ring *txr = &tx_que->txr;
3124 /* ... and the TX */
3125 ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3127 /* For the Link interrupt */
3128 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3129 } /* ixgbe_configure_ivars */
3131 /************************************************************************
3133 ************************************************************************/
3135 ixgbe_config_gpie(struct adapter *adapter)
3137 struct ixgbe_hw *hw = &adapter->hw;
3140 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3142 if (adapter->intr_type == IFLIB_INTR_MSIX) {
3143 /* Enable Enhanced MSI-X mode */
3144 gpie |= IXGBE_GPIE_MSIX_MODE
3146 | IXGBE_GPIE_PBA_SUPPORT
3150 /* Fan Failure Interrupt */
3151 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3152 gpie |= IXGBE_SDP1_GPIEN;
3154 /* Thermal Sensor Interrupt */
3155 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3156 gpie |= IXGBE_SDP0_GPIEN_X540;
3158 /* Link detection */
3159 switch (hw->mac.type) {
3160 case ixgbe_mac_82599EB:
3161 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3163 case ixgbe_mac_X550EM_x:
3164 case ixgbe_mac_X550EM_a:
3165 gpie |= IXGBE_SDP0_GPIEN_X540;
3171 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3173 } /* ixgbe_config_gpie */
3175 /************************************************************************
3176 * ixgbe_config_delay_values
3178 * Requires adapter->max_frame_size to be set.
3179 ************************************************************************/
3181 ixgbe_config_delay_values(struct adapter *adapter)
3183 struct ixgbe_hw *hw = &adapter->hw;
3184 u32 rxpb, frame, size, tmp;
3186 frame = adapter->max_frame_size;
3188 /* Calculate High Water */
3189 switch (hw->mac.type) {
3190 case ixgbe_mac_X540:
3191 case ixgbe_mac_X550:
3192 case ixgbe_mac_X550EM_x:
3193 case ixgbe_mac_X550EM_a:
3194 tmp = IXGBE_DV_X540(frame, frame);
3197 tmp = IXGBE_DV(frame, frame);
3200 size = IXGBE_BT2KB(tmp);
3201 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3202 hw->fc.high_water[0] = rxpb - size;
3204 /* Now calculate Low Water */
3205 switch (hw->mac.type) {
3206 case ixgbe_mac_X540:
3207 case ixgbe_mac_X550:
3208 case ixgbe_mac_X550EM_x:
3209 case ixgbe_mac_X550EM_a:
3210 tmp = IXGBE_LOW_DV_X540(frame);
3213 tmp = IXGBE_LOW_DV(frame);
3216 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3218 hw->fc.pause_time = IXGBE_FC_PAUSE;
3219 hw->fc.send_xon = TRUE;
3220 } /* ixgbe_config_delay_values */
3222 /************************************************************************
3223 * ixgbe_set_multi - Multicast Update
3225 * Called whenever multicast address list is updated.
3226 ************************************************************************/
3228 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
3230 struct adapter *adapter = arg;
3231 struct ixgbe_mc_addr *mta = adapter->mta;
3233 if (ifma->ifma_addr->sa_family != AF_LINK)
3235 if (count == MAX_NUM_MULTICAST_ADDRESSES)
3237 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3238 mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3239 mta[count].vmdq = adapter->pool;
3242 } /* ixgbe_mc_filter_apply */
3245 ixgbe_if_multi_set(if_ctx_t ctx)
3247 struct adapter *adapter = iflib_get_softc(ctx);
3248 struct ixgbe_mc_addr *mta;
3249 struct ifnet *ifp = iflib_get_ifp(ctx);
3254 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3257 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3259 mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
3261 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3262 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3263 if (ifp->if_flags & IFF_PROMISC)
3264 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3265 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3266 ifp->if_flags & IFF_ALLMULTI) {
3267 fctrl |= IXGBE_FCTRL_MPE;
3268 fctrl &= ~IXGBE_FCTRL_UPE;
3270 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3272 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3274 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3275 update_ptr = (u8 *)mta;
3276 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3277 ixgbe_mc_array_itr, TRUE);
3280 } /* ixgbe_if_multi_set */
3282 /************************************************************************
3283 * ixgbe_mc_array_itr
3285 * An iterator function needed by the multicast shared code.
3286 * It feeds the shared code routine the addresses in the
3287 * array of ixgbe_set_multi() one by one.
3288 ************************************************************************/
3290 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3292 struct ixgbe_mc_addr *mta;
3294 mta = (struct ixgbe_mc_addr *)*update_ptr;
3297 *update_ptr = (u8*)(mta + 1);
3300 } /* ixgbe_mc_array_itr */
3302 /************************************************************************
3303 * ixgbe_local_timer - Timer routine
3305 * Checks for link status, updates statistics,
3306 * and runs the watchdog check.
3307 ************************************************************************/
3309 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3311 struct adapter *adapter = iflib_get_softc(ctx);
3316 /* Check for pluggable optics */
3317 if (adapter->sfp_probe)
3318 if (!ixgbe_sfp_probe(ctx))
3319 return; /* Nothing to do */
3321 ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3322 &adapter->link_up, 0);
3324 /* Fire off the adminq task */
3325 iflib_admin_intr_deferred(ctx);
3327 } /* ixgbe_if_timer */
3329 /************************************************************************
3332 * Determine if a port had optics inserted.
3333 ************************************************************************/
3335 ixgbe_sfp_probe(if_ctx_t ctx)
3337 struct adapter *adapter = iflib_get_softc(ctx);
3338 struct ixgbe_hw *hw = &adapter->hw;
3339 device_t dev = iflib_get_dev(ctx);
3340 bool result = FALSE;
3342 if ((hw->phy.type == ixgbe_phy_nl) &&
3343 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3344 s32 ret = hw->phy.ops.identify_sfp(hw);
3347 ret = hw->phy.ops.reset(hw);
3348 adapter->sfp_probe = FALSE;
3349 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3350 device_printf(dev, "Unsupported SFP+ module detected!");
3352 "Reload driver with supported module.\n");
3355 device_printf(dev, "SFP+ module detected!\n");
3356 /* We now have supported optics */
3362 } /* ixgbe_sfp_probe */
3364 /************************************************************************
3365 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3366 ************************************************************************/
3368 ixgbe_handle_mod(void *context)
3370 if_ctx_t ctx = context;
3371 struct adapter *adapter = iflib_get_softc(ctx);
3372 struct ixgbe_hw *hw = &adapter->hw;
3373 device_t dev = iflib_get_dev(ctx);
3374 u32 err, cage_full = 0;
3376 adapter->sfp_reinit = 1;
3377 if (adapter->hw.need_crosstalk_fix) {
3378 switch (hw->mac.type) {
3379 case ixgbe_mac_82599EB:
3380 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3383 case ixgbe_mac_X550EM_x:
3384 case ixgbe_mac_X550EM_a:
3385 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3393 goto handle_mod_out;
3396 err = hw->phy.ops.identify_sfp(hw);
3397 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3399 "Unsupported SFP+ module type was detected.\n");
3400 goto handle_mod_out;
3403 if (hw->mac.type == ixgbe_mac_82598EB)
3404 err = hw->phy.ops.reset(hw);
3406 err = hw->mac.ops.setup_sfp(hw);
3408 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3410 "Setup failure - unsupported SFP+ module type.\n");
3411 goto handle_mod_out;
3413 GROUPTASK_ENQUEUE(&adapter->msf_task);
3417 adapter->sfp_reinit = 0;
3418 } /* ixgbe_handle_mod */
3421 /************************************************************************
3422 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3423 ************************************************************************/
3425 ixgbe_handle_msf(void *context)
3427 if_ctx_t ctx = context;
3428 struct adapter *adapter = iflib_get_softc(ctx);
3429 struct ixgbe_hw *hw = &adapter->hw;
3433 if (adapter->sfp_reinit != 1)
3436 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3437 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3439 autoneg = hw->phy.autoneg_advertised;
3440 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3441 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3442 if (hw->mac.ops.setup_link)
3443 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3445 /* Adjust media types shown in ifconfig */
3446 ifmedia_removeall(adapter->media);
3447 ixgbe_add_media_types(adapter->ctx);
3448 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3450 adapter->sfp_reinit = 0;
3451 } /* ixgbe_handle_msf */
3453 /************************************************************************
3454 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3455 ************************************************************************/
3457 ixgbe_handle_phy(void *context)
3459 if_ctx_t ctx = context;
3460 struct adapter *adapter = iflib_get_softc(ctx);
3461 struct ixgbe_hw *hw = &adapter->hw;
3464 error = hw->phy.ops.handle_lasi(hw);
3465 if (error == IXGBE_ERR_OVERTEMP)
3466 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3468 device_printf(adapter->dev,
3469 "Error handling LASI interrupt: %d\n", error);
3470 } /* ixgbe_handle_phy */
3472 /************************************************************************
3473 * ixgbe_if_stop - Stop the hardware
3475 * Disables all traffic on the adapter by issuing a
3476 * global reset on the MAC and deallocates TX/RX buffers.
3477 ************************************************************************/
3479 ixgbe_if_stop(if_ctx_t ctx)
3481 struct adapter *adapter = iflib_get_softc(ctx);
3482 struct ixgbe_hw *hw = &adapter->hw;
3484 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3487 hw->adapter_stopped = FALSE;
3488 ixgbe_stop_adapter(hw);
3489 if (hw->mac.type == ixgbe_mac_82599EB)
3490 ixgbe_stop_mac_link_on_d3_82599(hw);
3491 /* Turn off the laser - noop with no optics */
3492 ixgbe_disable_tx_laser(hw);
3494 /* Update the stack */
3495 adapter->link_up = FALSE;
3496 ixgbe_if_update_admin_status(ctx);
3498 /* reprogram the RAR[0] in case user changed it. */
3499 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3502 } /* ixgbe_if_stop */
3504 /************************************************************************
3505 * ixgbe_update_link_status - Update OS on link state
3507 * Note: Only updates the OS on the cached link state.
3508 * The real check of the hardware only happens with
3510 ************************************************************************/
3512 ixgbe_if_update_admin_status(if_ctx_t ctx)
3514 struct adapter *adapter = iflib_get_softc(ctx);
3515 device_t dev = iflib_get_dev(ctx);
3517 if (adapter->link_up) {
3518 if (adapter->link_active == FALSE) {
3520 device_printf(dev, "Link is up %d Gbps %s \n",
3521 ((adapter->link_speed == 128) ? 10 : 1),
3523 adapter->link_active = TRUE;
3524 /* Update any Flow Control changes */
3525 ixgbe_fc_enable(&adapter->hw);
3526 /* Update DMA coalescing config */
3527 ixgbe_config_dmac(adapter);
3528 /* should actually be negotiated value */
3529 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3531 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3532 ixgbe_ping_all_vfs(adapter);
3534 } else { /* Link down */
3535 if (adapter->link_active == TRUE) {
3537 device_printf(dev, "Link is Down\n");
3538 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3539 adapter->link_active = FALSE;
3540 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3541 ixgbe_ping_all_vfs(adapter);
3545 ixgbe_update_stats_counters(adapter);
3547 /* Re-enable link interrupts */
3548 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3549 } /* ixgbe_if_update_admin_status */
3551 /************************************************************************
3552 * ixgbe_config_dmac - Configure DMA Coalescing
3553 ************************************************************************/
3555 ixgbe_config_dmac(struct adapter *adapter)
3557 struct ixgbe_hw *hw = &adapter->hw;
3558 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3560 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3563 if (dcfg->watchdog_timer ^ adapter->dmac ||
3564 dcfg->link_speed ^ adapter->link_speed) {
3565 dcfg->watchdog_timer = adapter->dmac;
3566 dcfg->fcoe_en = FALSE;
3567 dcfg->link_speed = adapter->link_speed;
3570 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3571 dcfg->watchdog_timer, dcfg->link_speed);
3573 hw->mac.ops.dmac_config(hw);
3575 } /* ixgbe_config_dmac */
3577 /************************************************************************
3578 * ixgbe_if_enable_intr
3579 ************************************************************************/
3581 ixgbe_if_enable_intr(if_ctx_t ctx)
3583 struct adapter *adapter = iflib_get_softc(ctx);
3584 struct ixgbe_hw *hw = &adapter->hw;
3585 struct ix_rx_queue *que = adapter->rx_queues;
3588 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3590 switch (adapter->hw.mac.type) {
3591 case ixgbe_mac_82599EB:
3592 mask |= IXGBE_EIMS_ECC;
3593 /* Temperature sensor on some adapters */
3594 mask |= IXGBE_EIMS_GPI_SDP0;
3595 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3596 mask |= IXGBE_EIMS_GPI_SDP1;
3597 mask |= IXGBE_EIMS_GPI_SDP2;
3599 case ixgbe_mac_X540:
3600 /* Detect if Thermal Sensor is enabled */
3601 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3602 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3603 mask |= IXGBE_EIMS_TS;
3604 mask |= IXGBE_EIMS_ECC;
3606 case ixgbe_mac_X550:
3607 /* MAC thermal sensor is automatically enabled */
3608 mask |= IXGBE_EIMS_TS;
3609 mask |= IXGBE_EIMS_ECC;
3611 case ixgbe_mac_X550EM_x:
3612 case ixgbe_mac_X550EM_a:
3613 /* Some devices use SDP0 for important information */
3614 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3615 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3616 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3617 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3618 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3619 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3620 mask |= IXGBE_EICR_GPI_SDP0_X540;
3621 mask |= IXGBE_EIMS_ECC;
3627 /* Enable Fan Failure detection */
3628 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3629 mask |= IXGBE_EIMS_GPI_SDP1;
3631 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3632 mask |= IXGBE_EIMS_MAILBOX;
3633 /* Enable Flow Director */
3634 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3635 mask |= IXGBE_EIMS_FLOW_DIR;
3637 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3639 /* With MSI-X we use auto clear */
3640 if (adapter->intr_type == IFLIB_INTR_MSIX) {
3641 mask = IXGBE_EIMS_ENABLE_MASK;
3642 /* Don't autoclear Link */
3643 mask &= ~IXGBE_EIMS_OTHER;
3644 mask &= ~IXGBE_EIMS_LSC;
3645 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3646 mask &= ~IXGBE_EIMS_MAILBOX;
3647 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3651 * Now enable all queues, this is done separately to
3652 * allow for handling the extended (beyond 32) MSI-X
3653 * vectors that can be used by 82599
3655 for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3656 ixgbe_enable_queue(adapter, que->msix);
3658 IXGBE_WRITE_FLUSH(hw);
3660 } /* ixgbe_if_enable_intr */
3662 /************************************************************************
3663 * ixgbe_disable_intr
3664 ************************************************************************/
3666 ixgbe_if_disable_intr(if_ctx_t ctx)
3668 struct adapter *adapter = iflib_get_softc(ctx);
3670 if (adapter->intr_type == IFLIB_INTR_MSIX)
3671 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3672 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3673 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3675 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3676 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3677 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3679 IXGBE_WRITE_FLUSH(&adapter->hw);
3681 } /* ixgbe_if_disable_intr */
3683 /************************************************************************
3684 * ixgbe_if_rx_queue_intr_enable
3685 ************************************************************************/
3687 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3689 struct adapter *adapter = iflib_get_softc(ctx);
3690 struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3692 ixgbe_enable_queue(adapter, que->rxr.me);
3695 } /* ixgbe_if_rx_queue_intr_enable */
3697 /************************************************************************
3698 * ixgbe_enable_queue
3699 ************************************************************************/
3701 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3703 struct ixgbe_hw *hw = &adapter->hw;
3704 u64 queue = (u64)(1 << vector);
3707 if (hw->mac.type == ixgbe_mac_82598EB) {
3708 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3709 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3711 mask = (queue & 0xFFFFFFFF);
3713 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3714 mask = (queue >> 32);
3716 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3718 } /* ixgbe_enable_queue */
3720 /************************************************************************
3721 * ixgbe_disable_queue
3722 ************************************************************************/
3724 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3726 struct ixgbe_hw *hw = &adapter->hw;
3727 u64 queue = (u64)(1 << vector);
3730 if (hw->mac.type == ixgbe_mac_82598EB) {
3731 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3732 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3734 mask = (queue & 0xFFFFFFFF);
3736 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3737 mask = (queue >> 32);
3739 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3741 } /* ixgbe_disable_queue */
3743 /************************************************************************
3744 * ixgbe_intr - Legacy Interrupt Service Routine
3745 ************************************************************************/
3747 ixgbe_intr(void *arg)
3749 struct adapter *adapter = arg;
3750 struct ix_rx_queue *que = adapter->rx_queues;
3751 struct ixgbe_hw *hw = &adapter->hw;
3752 if_ctx_t ctx = adapter->ctx;
3753 u32 eicr, eicr_mask;
3755 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3759 ixgbe_if_enable_intr(ctx);
3760 return (FILTER_HANDLED);
3763 /* Check for fan failure */
3764 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3765 (eicr & IXGBE_EICR_GPI_SDP1)) {
3766 device_printf(adapter->dev,
3767 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3768 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3771 /* Link status change */
3772 if (eicr & IXGBE_EICR_LSC) {
3773 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3774 iflib_admin_intr_deferred(ctx);
3777 if (ixgbe_is_sfp(hw)) {
3778 /* Pluggable optics-related interrupt */
3779 if (hw->mac.type >= ixgbe_mac_X540)
3780 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3782 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3784 if (eicr & eicr_mask) {
3785 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3786 GROUPTASK_ENQUEUE(&adapter->mod_task);
3789 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3790 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3791 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3792 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3793 if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
3794 GROUPTASK_ENQUEUE(&adapter->msf_task);
3798 /* External PHY interrupt */
3799 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3800 (eicr & IXGBE_EICR_GPI_SDP0_X540))
3801 GROUPTASK_ENQUEUE(&adapter->phy_task);
3803 return (FILTER_SCHEDULE_THREAD);
3806 /************************************************************************
3807 * ixgbe_free_pci_resources
3808 ************************************************************************/
3810 ixgbe_free_pci_resources(if_ctx_t ctx)
3812 struct adapter *adapter = iflib_get_softc(ctx);
3813 struct ix_rx_queue *que = adapter->rx_queues;
3814 device_t dev = iflib_get_dev(ctx);
3816 /* Release all msix queue resources */
3817 if (adapter->intr_type == IFLIB_INTR_MSIX)
3818 iflib_irq_free(ctx, &adapter->irq);
3821 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3822 iflib_irq_free(ctx, &que->que_irq);
3827 * Free link/admin interrupt
3829 if (adapter->pci_mem != NULL)
3830 bus_release_resource(dev, SYS_RES_MEMORY,
3831 PCIR_BAR(0), adapter->pci_mem);
3833 } /* ixgbe_free_pci_resources */
3835 /************************************************************************
3836 * ixgbe_sysctl_flowcntl
3838 * SYSCTL wrapper around setting Flow Control
3839 ************************************************************************/
3841 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3843 struct adapter *adapter;
3846 adapter = (struct adapter *)arg1;
3847 fc = adapter->hw.fc.current_mode;
3849 error = sysctl_handle_int(oidp, &fc, 0, req);
3850 if ((error) || (req->newptr == NULL))
3853 /* Don't bother if it's not changed */
3854 if (fc == adapter->hw.fc.current_mode)
3857 return ixgbe_set_flowcntl(adapter, fc);
3858 } /* ixgbe_sysctl_flowcntl */
3860 /************************************************************************
3861 * ixgbe_set_flowcntl - Set flow control
3863 * Flow control values:
3868 ************************************************************************/
3870 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3873 case ixgbe_fc_rx_pause:
3874 case ixgbe_fc_tx_pause:
3876 adapter->hw.fc.requested_mode = fc;
3877 if (adapter->num_rx_queues > 1)
3878 ixgbe_disable_rx_drop(adapter);
3881 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3882 if (adapter->num_rx_queues > 1)
3883 ixgbe_enable_rx_drop(adapter);
3889 /* Don't autoneg if forcing a value */
3890 adapter->hw.fc.disable_fc_autoneg = TRUE;
3891 ixgbe_fc_enable(&adapter->hw);
3894 } /* ixgbe_set_flowcntl */
3896 /************************************************************************
3897 * ixgbe_enable_rx_drop
3899 * Enable the hardware to drop packets when the buffer is
3900 * full. This is useful with multiqueue, so that no single
3901 * queue being full stalls the entire RX engine. We only
3902 * enable this when Multiqueue is enabled AND Flow Control
3904 ************************************************************************/
3906 ixgbe_enable_rx_drop(struct adapter *adapter)
3908 struct ixgbe_hw *hw = &adapter->hw;
3909 struct rx_ring *rxr;
3912 for (int i = 0; i < adapter->num_rx_queues; i++) {
3913 rxr = &adapter->rx_queues[i].rxr;
3914 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3915 srrctl |= IXGBE_SRRCTL_DROP_EN;
3916 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3919 /* enable drop for each vf */
3920 for (int i = 0; i < adapter->num_vfs; i++) {
3921 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3922 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3925 } /* ixgbe_enable_rx_drop */
3927 /************************************************************************
3928 * ixgbe_disable_rx_drop
3929 ************************************************************************/
3931 ixgbe_disable_rx_drop(struct adapter *adapter)
3933 struct ixgbe_hw *hw = &adapter->hw;
3934 struct rx_ring *rxr;
3937 for (int i = 0; i < adapter->num_rx_queues; i++) {
3938 rxr = &adapter->rx_queues[i].rxr;
3939 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3940 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3941 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3944 /* disable drop for each vf */
3945 for (int i = 0; i < adapter->num_vfs; i++) {
3946 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3947 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3949 } /* ixgbe_disable_rx_drop */
3951 /************************************************************************
3952 * ixgbe_sysctl_advertise
3954 * SYSCTL wrapper around setting advertised speed
3955 ************************************************************************/
3957 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3959 struct adapter *adapter;
3960 int error, advertise;
3962 adapter = (struct adapter *)arg1;
3963 advertise = adapter->advertise;
3965 error = sysctl_handle_int(oidp, &advertise, 0, req);
3966 if ((error) || (req->newptr == NULL))
3969 return ixgbe_set_advertise(adapter, advertise);
3970 } /* ixgbe_sysctl_advertise */
3972 /************************************************************************
3973 * ixgbe_set_advertise - Control advertised link speed
3976 * 0x1 - advertise 100 Mb
3977 * 0x2 - advertise 1G
3978 * 0x4 - advertise 10G
3979 * 0x8 - advertise 10 Mb (yes, Mb)
3980 ************************************************************************/
3982 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3984 device_t dev = iflib_get_dev(adapter->ctx);
3985 struct ixgbe_hw *hw;
3986 ixgbe_link_speed speed = 0;
3987 ixgbe_link_speed link_caps = 0;
3988 s32 err = IXGBE_NOT_IMPLEMENTED;
3989 bool negotiate = FALSE;
3991 /* Checks to validate new value */
3992 if (adapter->advertise == advertise) /* no change */
3997 /* No speed changes for backplane media */
3998 if (hw->phy.media_type == ixgbe_media_type_backplane)
4001 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4002 (hw->phy.multispeed_fiber))) {
4003 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4007 if (advertise < 0x1 || advertise > 0xF) {
4008 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4012 if (hw->mac.ops.get_link_capabilities) {
4013 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4015 if (err != IXGBE_SUCCESS) {
4016 device_printf(dev, "Unable to determine supported advertise speeds\n");
4021 /* Set new value and report new advertised mode */
4022 if (advertise & 0x1) {
4023 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4024 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4027 speed |= IXGBE_LINK_SPEED_100_FULL;
4029 if (advertise & 0x2) {
4030 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4031 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4034 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4036 if (advertise & 0x4) {
4037 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4038 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4041 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4043 if (advertise & 0x8) {
4044 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4045 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4048 speed |= IXGBE_LINK_SPEED_10_FULL;
4051 hw->mac.autotry_restart = TRUE;
4052 hw->mac.ops.setup_link(hw, speed, TRUE);
4053 adapter->advertise = advertise;
4056 } /* ixgbe_set_advertise */
4058 /************************************************************************
4059 * ixgbe_get_advertise - Get current advertised speed settings
4061 * Formatted for sysctl usage.
4063 * 0x1 - advertise 100 Mb
4064 * 0x2 - advertise 1G
4065 * 0x4 - advertise 10G
4066 * 0x8 - advertise 10 Mb (yes, Mb)
4067 ************************************************************************/
4069 ixgbe_get_advertise(struct adapter *adapter)
4071 struct ixgbe_hw *hw = &adapter->hw;
4073 ixgbe_link_speed link_caps = 0;
4075 bool negotiate = FALSE;
4078 * Advertised speed means nothing unless it's copper or
4081 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4082 !(hw->phy.multispeed_fiber))
4085 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4086 if (err != IXGBE_SUCCESS)
4090 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4091 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
4092 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
4093 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
4096 } /* ixgbe_get_advertise */
4098 /************************************************************************
4099 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4102 * 0/1 - off / on (use default value of 1000)
4104 * Legal timer values are:
4105 * 50,100,250,500,1000,2000,5000,10000
4107 * Turning off interrupt moderation will also turn this off.
4108 ************************************************************************/
4110 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4112 struct adapter *adapter = (struct adapter *)arg1;
4113 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
4117 newval = adapter->dmac;
4118 error = sysctl_handle_16(oidp, &newval, 0, req);
4119 if ((error) || (req->newptr == NULL))
4128 /* Enable and use default */
4129 adapter->dmac = 1000;
4139 /* Legal values - allow */
4140 adapter->dmac = newval;
4143 /* Do nothing, illegal value */
4147 /* Re-initialize hardware if it's already running */
4148 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4152 } /* ixgbe_sysctl_dmac */
4155 /************************************************************************
4156 * ixgbe_sysctl_power_state
4158 * Sysctl to test power states
4160 * 0 - set device to D0
4161 * 3 - set device to D3
4162 * (none) - get current device power state
4163 ************************************************************************/
4165 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4167 struct adapter *adapter = (struct adapter *)arg1;
4168 device_t dev = adapter->dev;
4169 int curr_ps, new_ps, error = 0;
4171 curr_ps = new_ps = pci_get_powerstate(dev);
4173 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4174 if ((error) || (req->newptr == NULL))
4177 if (new_ps == curr_ps)
4180 if (new_ps == 3 && curr_ps == 0)
4181 error = DEVICE_SUSPEND(dev);
4182 else if (new_ps == 0 && curr_ps == 3)
4183 error = DEVICE_RESUME(dev);
4187 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4190 } /* ixgbe_sysctl_power_state */
4193 /************************************************************************
4194 * ixgbe_sysctl_wol_enable
4196 * Sysctl to enable/disable the WoL capability,
4197 * if supported by the adapter.
4202 ************************************************************************/
4204 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4206 struct adapter *adapter = (struct adapter *)arg1;
4207 struct ixgbe_hw *hw = &adapter->hw;
4208 int new_wol_enabled;
4211 new_wol_enabled = hw->wol_enabled;
4212 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4213 if ((error) || (req->newptr == NULL))
4215 new_wol_enabled = !!(new_wol_enabled);
4216 if (new_wol_enabled == hw->wol_enabled)
4219 if (new_wol_enabled > 0 && !adapter->wol_support)
4222 hw->wol_enabled = new_wol_enabled;
4225 } /* ixgbe_sysctl_wol_enable */
4227 /************************************************************************
4228 * ixgbe_sysctl_wufc - Wake Up Filter Control
4230 * Sysctl to enable/disable the types of packets that the
4231 * adapter will wake up on upon receipt.
4233 * 0x1 - Link Status Change
4234 * 0x2 - Magic Packet
4235 * 0x4 - Direct Exact
4236 * 0x8 - Directed Multicast
4238 * 0x20 - ARP/IPv4 Request Packet
4239 * 0x40 - Direct IPv4 Packet
4240 * 0x80 - Direct IPv6 Packet
4242 * Settings not listed above will cause the sysctl to return an error.
4243 ************************************************************************/
4245 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4247 struct adapter *adapter = (struct adapter *)arg1;
4251 new_wufc = adapter->wufc;
4253 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4254 if ((error) || (req->newptr == NULL))
4256 if (new_wufc == adapter->wufc)
4259 if (new_wufc & 0xffffff00)
4263 new_wufc |= (0xffffff & adapter->wufc);
4264 adapter->wufc = new_wufc;
4267 } /* ixgbe_sysctl_wufc */
4270 /************************************************************************
4271 * ixgbe_sysctl_print_rss_config
4272 ************************************************************************/
4274 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4276 struct adapter *adapter = (struct adapter *)arg1;
4277 struct ixgbe_hw *hw = &adapter->hw;
4278 device_t dev = adapter->dev;
4280 int error = 0, reta_size;
4283 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4285 device_printf(dev, "Could not allocate sbuf for output.\n");
4289 // TODO: use sbufs to make a string to print out
4290 /* Set multiplier for RETA setup and table size based on MAC */
4291 switch (adapter->hw.mac.type) {
4292 case ixgbe_mac_X550:
4293 case ixgbe_mac_X550EM_x:
4294 case ixgbe_mac_X550EM_a:
4302 /* Print out the redirection table */
4303 sbuf_cat(buf, "\n");
4304 for (int i = 0; i < reta_size; i++) {
4306 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4307 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4309 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4310 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4314 // TODO: print more config
4316 error = sbuf_finish(buf);
4318 device_printf(dev, "Error finishing sbuf: %d\n", error);
4323 } /* ixgbe_sysctl_print_rss_config */
4324 #endif /* IXGBE_DEBUG */
4326 /************************************************************************
4327 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4329 * For X552/X557-AT devices using an external PHY
4330 ************************************************************************/
4332 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4334 struct adapter *adapter = (struct adapter *)arg1;
4335 struct ixgbe_hw *hw = &adapter->hw;
4338 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4339 device_printf(iflib_get_dev(adapter->ctx),
4340 "Device has no supported external thermal sensor.\n");
4344 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4345 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4346 device_printf(iflib_get_dev(adapter->ctx),
4347 "Error reading from PHY's current temperature register\n");
4351 /* Shift temp for output */
4354 return (sysctl_handle_16(oidp, NULL, reg, req));
4355 } /* ixgbe_sysctl_phy_temp */
4357 /************************************************************************
4358 * ixgbe_sysctl_phy_overtemp_occurred
4360 * Reports (directly from the PHY) whether the current PHY
4361 * temperature is over the overtemp threshold.
4362 ************************************************************************/
4364 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4366 struct adapter *adapter = (struct adapter *)arg1;
4367 struct ixgbe_hw *hw = &adapter->hw;
4370 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4371 device_printf(iflib_get_dev(adapter->ctx),
4372 "Device has no supported external thermal sensor.\n");
4376 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4377 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4378 device_printf(iflib_get_dev(adapter->ctx),
4379 "Error reading from PHY's temperature status register\n");
4383 /* Get occurrence bit */
4384 reg = !!(reg & 0x4000);
4386 return (sysctl_handle_16(oidp, 0, reg, req));
4387 } /* ixgbe_sysctl_phy_overtemp_occurred */
4389 /************************************************************************
4390 * ixgbe_sysctl_eee_state
4392 * Sysctl to set EEE power saving feature
4396 * (none) - get current device EEE state
4397 ************************************************************************/
4399 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4401 struct adapter *adapter = (struct adapter *)arg1;
4402 device_t dev = adapter->dev;
4403 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
4404 int curr_eee, new_eee, error = 0;
4407 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4409 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4410 if ((error) || (req->newptr == NULL))
4414 if (new_eee == curr_eee)
4418 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4421 /* Bounds checking */
4422 if ((new_eee < 0) || (new_eee > 1))
4425 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4427 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4431 /* Restart auto-neg */
4434 device_printf(dev, "New EEE state: %d\n", new_eee);
4436 /* Cache new value */
4438 adapter->feat_en |= IXGBE_FEATURE_EEE;
4440 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4443 } /* ixgbe_sysctl_eee_state */
4445 /************************************************************************
4446 * ixgbe_init_device_features
4447 ************************************************************************/
4449 ixgbe_init_device_features(struct adapter *adapter)
4451 adapter->feat_cap = IXGBE_FEATURE_NETMAP
4454 | IXGBE_FEATURE_MSIX
4455 | IXGBE_FEATURE_LEGACY_IRQ;
4457 /* Set capabilities first... */
4458 switch (adapter->hw.mac.type) {
4459 case ixgbe_mac_82598EB:
4460 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4461 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4463 case ixgbe_mac_X540:
4464 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4465 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4466 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4467 (adapter->hw.bus.func == 0))
4468 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4470 case ixgbe_mac_X550:
4471 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4472 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4473 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4475 case ixgbe_mac_X550EM_x:
4476 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4477 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4478 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4479 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4481 case ixgbe_mac_X550EM_a:
4482 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4483 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4484 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4485 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4486 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4487 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4488 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4491 case ixgbe_mac_82599EB:
4492 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4493 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4494 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4495 (adapter->hw.bus.func == 0))
4496 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4497 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4498 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4504 /* Enabled by default... */
4505 /* Fan failure detection */
4506 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4507 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4509 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4510 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4512 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4513 adapter->feat_en |= IXGBE_FEATURE_EEE;
4514 /* Thermal Sensor */
4515 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4516 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4518 /* Enabled via global sysctl... */
4520 if (ixgbe_enable_fdir) {
4521 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4522 adapter->feat_en |= IXGBE_FEATURE_FDIR;
4524 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4527 * Message Signal Interrupts - Extended (MSI-X)
4528 * Normal MSI is only enabled if MSI-X calls fail.
4530 if (!ixgbe_enable_msix)
4531 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4532 /* Receive-Side Scaling (RSS) */
4533 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4534 adapter->feat_en |= IXGBE_FEATURE_RSS;
4536 /* Disable features with unmet dependencies... */
4538 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4539 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4540 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4541 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4542 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4544 } /* ixgbe_init_device_features */
4546 /************************************************************************
4547 * ixgbe_check_fan_failure
4548 ************************************************************************/
4550 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4554 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4558 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4559 } /* ixgbe_check_fan_failure */