1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 #include "opt_inet6.h"
41 #include "ixgbe_sriov.h"
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
47 /************************************************************************
49 ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
53 /************************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixgbe_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 /* required last entry */
112 static void *ixgbe_register(device_t dev);
113 static int ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int ixgbe_if_attach_post(if_ctx_t ctx);
115 static int ixgbe_if_detach(if_ctx_t ctx);
116 static int ixgbe_if_shutdown(if_ctx_t ctx);
117 static int ixgbe_if_suspend(if_ctx_t ctx);
118 static int ixgbe_if_resume(if_ctx_t ctx);
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int ixgbe_if_media_change(if_ctx_t ctx);
127 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133 uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135 uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 static bool ixgbe_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
143 int ixgbe_intr(void *arg);
145 /************************************************************************
146 * Function prototypes
147 ************************************************************************/
148 #if __FreeBSD_version >= 1100036
149 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
152 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
154 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
155 static int ixgbe_allocate_pci_resources(if_ctx_t ctx);
156 static int ixgbe_setup_low_power_mode(if_ctx_t ctx);
158 static void ixgbe_config_dmac(struct adapter *adapter);
159 static void ixgbe_configure_ivars(struct adapter *adapter);
160 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
162 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
163 static bool ixgbe_sfp_probe(if_ctx_t ctx);
165 static void ixgbe_free_pci_resources(if_ctx_t ctx);
167 static int ixgbe_msix_link(void *arg);
168 static int ixgbe_msix_que(void *arg);
169 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
170 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
171 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
173 static int ixgbe_setup_interface(if_ctx_t ctx);
174 static void ixgbe_init_device_features(struct adapter *adapter);
175 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
176 static void ixgbe_add_media_types(if_ctx_t ctx);
177 static void ixgbe_update_stats_counters(struct adapter *adapter);
178 static void ixgbe_config_link(if_ctx_t ctx);
179 static void ixgbe_get_slot_info(struct adapter *);
180 static void ixgbe_check_wol_support(struct adapter *adapter);
181 static void ixgbe_enable_rx_drop(struct adapter *);
182 static void ixgbe_disable_rx_drop(struct adapter *);
184 static void ixgbe_add_hw_stats(struct adapter *adapter);
185 static int ixgbe_set_flowcntl(struct adapter *, int);
186 static int ixgbe_set_advertise(struct adapter *, int);
187 static int ixgbe_get_advertise(struct adapter *);
188 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
189 static void ixgbe_config_gpie(struct adapter *adapter);
190 static void ixgbe_config_delay_values(struct adapter *adapter);
192 /* Sysctl handlers */
193 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
194 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
195 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
196 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
197 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
198 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
200 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
201 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
203 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
204 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
205 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
206 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
207 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
208 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
209 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
211 /* Deferred interrupt tasklets */
212 static void ixgbe_handle_msf(void *);
213 static void ixgbe_handle_mod(void *);
214 static void ixgbe_handle_phy(void *);
216 /************************************************************************
217 * FreeBSD Device Interface Entry Points
218 ************************************************************************/
219 static device_method_t ix_methods[] = {
220 /* Device interface */
221 DEVMETHOD(device_register, ixgbe_register),
222 DEVMETHOD(device_probe, iflib_device_probe),
223 DEVMETHOD(device_attach, iflib_device_attach),
224 DEVMETHOD(device_detach, iflib_device_detach),
225 DEVMETHOD(device_shutdown, iflib_device_shutdown),
226 DEVMETHOD(device_suspend, iflib_device_suspend),
227 DEVMETHOD(device_resume, iflib_device_resume),
229 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
230 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
231 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
236 static driver_t ix_driver = {
237 "ix", ix_methods, sizeof(struct adapter),
240 devclass_t ix_devclass;
241 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
242 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
243 MODULE_DEPEND(ix, pci, 1, 1, 1);
244 MODULE_DEPEND(ix, ether, 1, 1, 1);
245 MODULE_DEPEND(ix, iflib, 1, 1, 1);
247 static device_method_t ixgbe_if_methods[] = {
248 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
249 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
250 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
251 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
252 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
253 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
254 DEVMETHOD(ifdi_init, ixgbe_if_init),
255 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
256 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
257 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
258 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
259 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
260 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
262 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
263 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
264 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
265 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
266 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
267 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
268 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
269 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
270 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
271 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
272 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
273 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
274 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
275 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
276 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
277 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
279 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
280 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
281 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
287 * TUNEABLE PARAMETERS:
290 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
291 "IXGBE driver parameters");
292 static driver_t ixgbe_if_driver = {
293 "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
296 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
297 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
298 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
300 /* Flow control setting, default to full */
301 static int ixgbe_flow_control = ixgbe_fc_full;
302 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
303 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
305 /* Advertise Speed, default to 0 (auto) */
306 static int ixgbe_advertise_speed = 0;
307 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
308 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
311 * Smart speed setting, default to on
312 * this only works as a compile option
313 * right now as its during attach, set
314 * this to 'ixgbe_smart_speed_off' to
317 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
320 * MSI-X should be the default for best performance,
321 * but this allows it to be forced off for testing.
323 static int ixgbe_enable_msix = 1;
324 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
325 "Enable MSI-X interrupts");
328 * Defining this on will allow the use
329 * of unsupported SFP+ modules, note that
330 * doing so you are on your own :)
332 static int allow_unsupported_sfp = FALSE;
333 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
334 &allow_unsupported_sfp, 0,
335 "Allow unsupported SFP modules...use at your own risk");
338 * Not sure if Flow Director is fully baked,
339 * so we'll default to turning it off.
341 static int ixgbe_enable_fdir = 0;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
343 "Enable Flow Director");
345 /* Receive-Side Scaling */
346 static int ixgbe_enable_rss = 1;
347 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
348 "Enable Receive-Side Scaling (RSS)");
351 /* Keep running tab on them for sanity check */
352 static int ixgbe_total_ports;
355 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
358 * For Flow Director: this is the number of TX packets we sample
359 * for the filter pool, this means every 20th packet will be probed.
361 * This feature can be disabled by setting this to 0.
363 static int atr_sample_rate = 20;
365 extern struct if_txrx ixgbe_txrx;
367 static struct if_shared_ctx ixgbe_sctx_init = {
368 .isc_magic = IFLIB_MAGIC,
369 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
370 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
371 .isc_tx_maxsegsize = PAGE_SIZE,
372 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
373 .isc_tso_maxsegsize = PAGE_SIZE,
374 .isc_rx_maxsize = PAGE_SIZE*4,
375 .isc_rx_nsegments = 1,
376 .isc_rx_maxsegsize = PAGE_SIZE*4,
381 .isc_admin_intrcnt = 1,
382 .isc_vendor_info = ixgbe_vendor_info_array,
383 .isc_driver_version = ixgbe_driver_version,
384 .isc_driver = &ixgbe_if_driver,
385 .isc_flags = IFLIB_TSO_INIT_IP,
387 .isc_nrxd_min = {MIN_RXD},
388 .isc_ntxd_min = {MIN_TXD},
389 .isc_nrxd_max = {MAX_RXD},
390 .isc_ntxd_max = {MAX_TXD},
391 .isc_nrxd_default = {DEFAULT_RXD},
392 .isc_ntxd_default = {DEFAULT_TXD},
395 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
397 /************************************************************************
398 * ixgbe_if_tx_queues_alloc
399 ************************************************************************/
401 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
402 int ntxqs, int ntxqsets)
404 struct adapter *adapter = iflib_get_softc(ctx);
405 if_softc_ctx_t scctx = adapter->shared;
406 struct ix_tx_queue *que;
409 MPASS(adapter->num_tx_queues > 0);
410 MPASS(adapter->num_tx_queues == ntxqsets);
413 /* Allocate queue structure memory */
415 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
416 M_IXGBE, M_NOWAIT | M_ZERO);
417 if (!adapter->tx_queues) {
418 device_printf(iflib_get_dev(ctx),
419 "Unable to allocate TX ring memory\n");
423 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
424 struct tx_ring *txr = &que->txr;
426 /* In case SR-IOV is enabled, align the index properly */
427 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
430 txr->adapter = que->adapter = adapter;
432 /* Allocate report status array */
433 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
434 if (txr->tx_rsq == NULL) {
438 for (j = 0; j < scctx->isc_ntxd[0]; j++)
439 txr->tx_rsq[j] = QIDX_INVALID;
440 /* get the virtual and physical address of the hardware queues */
441 txr->tail = IXGBE_TDT(txr->me);
442 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
443 txr->tx_paddr = paddrs[i];
446 txr->total_packets = 0;
448 /* Set the rate at which we sample packets */
449 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
450 txr->atr_sample = atr_sample_rate;
454 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
455 adapter->num_tx_queues);
460 ixgbe_if_queues_free(ctx);
463 } /* ixgbe_if_tx_queues_alloc */
465 /************************************************************************
466 * ixgbe_if_rx_queues_alloc
467 ************************************************************************/
469 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
470 int nrxqs, int nrxqsets)
472 struct adapter *adapter = iflib_get_softc(ctx);
473 struct ix_rx_queue *que;
476 MPASS(adapter->num_rx_queues > 0);
477 MPASS(adapter->num_rx_queues == nrxqsets);
480 /* Allocate queue structure memory */
482 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
483 M_IXGBE, M_NOWAIT | M_ZERO);
484 if (!adapter->rx_queues) {
485 device_printf(iflib_get_dev(ctx),
486 "Unable to allocate TX ring memory\n");
490 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
491 struct rx_ring *rxr = &que->rxr;
493 /* In case SR-IOV is enabled, align the index properly */
494 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
497 rxr->adapter = que->adapter = adapter;
499 /* get the virtual and physical address of the hw queues */
500 rxr->tail = IXGBE_RDT(rxr->me);
501 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
502 rxr->rx_paddr = paddrs[i];
507 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
508 adapter->num_rx_queues);
511 } /* ixgbe_if_rx_queues_alloc */
513 /************************************************************************
514 * ixgbe_if_queues_free
515 ************************************************************************/
517 ixgbe_if_queues_free(if_ctx_t ctx)
519 struct adapter *adapter = iflib_get_softc(ctx);
520 struct ix_tx_queue *tx_que = adapter->tx_queues;
521 struct ix_rx_queue *rx_que = adapter->rx_queues;
524 if (tx_que != NULL) {
525 for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
526 struct tx_ring *txr = &tx_que->txr;
527 if (txr->tx_rsq == NULL)
530 free(txr->tx_rsq, M_IXGBE);
534 free(adapter->tx_queues, M_IXGBE);
535 adapter->tx_queues = NULL;
537 if (rx_que != NULL) {
538 free(adapter->rx_queues, M_IXGBE);
539 adapter->rx_queues = NULL;
541 } /* ixgbe_if_queues_free */
543 /************************************************************************
544 * ixgbe_initialize_rss_mapping
545 ************************************************************************/
547 ixgbe_initialize_rss_mapping(struct adapter *adapter)
549 struct ixgbe_hw *hw = &adapter->hw;
550 u32 reta = 0, mrqc, rss_key[10];
551 int queue_id, table_size, index_mult;
555 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
556 /* Fetch the configured RSS key */
557 rss_getkey((uint8_t *)&rss_key);
559 /* set up random bits */
560 arc4rand(&rss_key, sizeof(rss_key), 0);
563 /* Set multiplier for RETA setup and table size based on MAC */
566 switch (adapter->hw.mac.type) {
567 case ixgbe_mac_82598EB:
571 case ixgbe_mac_X550EM_x:
572 case ixgbe_mac_X550EM_a:
579 /* Set up the redirection table */
580 for (i = 0, j = 0; i < table_size; i++, j++) {
581 if (j == adapter->num_rx_queues)
584 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
586 * Fetch the RSS bucket id for the given indirection
587 * entry. Cap it at the number of configured buckets
588 * (which is num_rx_queues.)
590 queue_id = rss_get_indirection_to_bucket(i);
591 queue_id = queue_id % adapter->num_rx_queues;
593 queue_id = (j * index_mult);
596 * The low 8 bits are for hash value (n+0);
597 * The next 8 bits are for hash value (n+1), etc.
600 reta = reta | (((uint32_t)queue_id) << 24);
603 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
605 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
611 /* Now fill our hash function seeds */
612 for (i = 0; i < 10; i++)
613 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
615 /* Perform hash on these packet types */
616 if (adapter->feat_en & IXGBE_FEATURE_RSS)
617 rss_hash_config = rss_gethashconfig();
620 * Disable UDP - IP fragments aren't currently being handled
621 * and so we end up with a mix of 2-tuple and 4-tuple
624 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
625 | RSS_HASHTYPE_RSS_TCP_IPV4
626 | RSS_HASHTYPE_RSS_IPV6
627 | RSS_HASHTYPE_RSS_TCP_IPV6
628 | RSS_HASHTYPE_RSS_IPV6_EX
629 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
632 mrqc = IXGBE_MRQC_RSSEN;
633 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
634 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
635 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
636 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
637 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
638 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
639 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
640 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
641 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
642 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
643 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
645 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
647 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
649 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
650 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
651 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
652 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
653 } /* ixgbe_initialize_rss_mapping */
655 /************************************************************************
656 * ixgbe_initialize_receive_units - Setup receive registers and features.
657 ************************************************************************/
658 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
661 ixgbe_initialize_receive_units(if_ctx_t ctx)
663 struct adapter *adapter = iflib_get_softc(ctx);
664 if_softc_ctx_t scctx = adapter->shared;
665 struct ixgbe_hw *hw = &adapter->hw;
666 struct ifnet *ifp = iflib_get_ifp(ctx);
667 struct ix_rx_queue *que;
669 u32 bufsz, fctrl, srrctl, rxcsum;
673 * Make sure receives are disabled while
674 * setting up the descriptor ring
676 ixgbe_disable_rx(hw);
678 /* Enable broadcasts */
679 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
680 fctrl |= IXGBE_FCTRL_BAM;
681 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
682 fctrl |= IXGBE_FCTRL_DPF;
683 fctrl |= IXGBE_FCTRL_PMCF;
685 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
687 /* Set for Jumbo Frames? */
688 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
689 if (ifp->if_mtu > ETHERMTU)
690 hlreg |= IXGBE_HLREG0_JUMBOEN;
692 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
693 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
695 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
696 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
698 /* Setup the Base and Length of the Rx Descriptor Ring */
699 for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
700 struct rx_ring *rxr = &que->rxr;
701 u64 rdba = rxr->rx_paddr;
705 /* Setup the Base and Length of the Rx Descriptor Ring */
706 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
707 (rdba & 0x00000000ffffffffULL));
708 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
709 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
710 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
712 /* Set up the SRRCTL register */
713 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
714 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
715 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
717 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
720 * Set DROP_EN iff we have no flow control and >1 queue.
721 * Note that srrctl was cleared shortly before during reset,
722 * so we do not need to clear the bit, but do it just in case
723 * this code is moved elsewhere.
725 if (adapter->num_rx_queues > 1 &&
726 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
727 srrctl |= IXGBE_SRRCTL_DROP_EN;
729 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
732 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
734 /* Setup the HW Rx Head and Tail Descriptor Pointers */
735 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
736 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
738 /* Set the driver rx tail address */
739 rxr->tail = IXGBE_RDT(rxr->me);
742 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
743 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
744 | IXGBE_PSRTYPE_UDPHDR
745 | IXGBE_PSRTYPE_IPV4HDR
746 | IXGBE_PSRTYPE_IPV6HDR;
747 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
750 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
752 ixgbe_initialize_rss_mapping(adapter);
754 if (adapter->num_rx_queues > 1) {
755 /* RSS and RX IPP Checksum are mutually exclusive */
756 rxcsum |= IXGBE_RXCSUM_PCSD;
759 if (ifp->if_capenable & IFCAP_RXCSUM)
760 rxcsum |= IXGBE_RXCSUM_PCSD;
762 /* This is useful for calculating UDP/IP fragment checksums */
763 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
764 rxcsum |= IXGBE_RXCSUM_IPPCSE;
766 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
768 } /* ixgbe_initialize_receive_units */
770 /************************************************************************
771 * ixgbe_initialize_transmit_units - Enable transmit units.
772 ************************************************************************/
774 ixgbe_initialize_transmit_units(if_ctx_t ctx)
776 struct adapter *adapter = iflib_get_softc(ctx);
777 struct ixgbe_hw *hw = &adapter->hw;
778 if_softc_ctx_t scctx = adapter->shared;
779 struct ix_tx_queue *que;
782 /* Setup the Base and Length of the Tx Descriptor Ring */
783 for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
785 struct tx_ring *txr = &que->txr;
786 u64 tdba = txr->tx_paddr;
790 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
791 (tdba & 0x00000000ffffffffULL));
792 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
793 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
794 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
796 /* Setup the HW Tx Head and Tail descriptor pointers */
797 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
798 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
800 /* Cache the tail address */
801 txr->tail = IXGBE_TDT(txr->me);
803 txr->tx_rs_cidx = txr->tx_rs_pidx;
804 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
805 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
806 txr->tx_rsq[k] = QIDX_INVALID;
808 /* Disable Head Writeback */
810 * Note: for X550 series devices, these registers are actually
811 * prefixed with TPH_ isntead of DCA_, but the addresses and
812 * fields remain the same.
814 switch (hw->mac.type) {
815 case ixgbe_mac_82598EB:
816 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
819 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
822 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
823 switch (hw->mac.type) {
824 case ixgbe_mac_82598EB:
825 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
828 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
834 if (hw->mac.type != ixgbe_mac_82598EB) {
835 u32 dmatxctl, rttdcs;
837 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
838 dmatxctl |= IXGBE_DMATXCTL_TE;
839 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
840 /* Disable arbiter to set MTQC */
841 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
842 rttdcs |= IXGBE_RTTDCS_ARBDIS;
843 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
844 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
845 ixgbe_get_mtqc(adapter->iov_mode));
846 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
847 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
850 } /* ixgbe_initialize_transmit_units */
852 /************************************************************************
854 ************************************************************************/
856 ixgbe_register(device_t dev)
859 } /* ixgbe_register */
861 /************************************************************************
862 * ixgbe_if_attach_pre - Device initialization routine, part 1
864 * Called when the driver is being loaded.
865 * Identifies the type of hardware, initializes the hardware,
866 * and initializes iflib structures.
868 * return 0 on success, positive on failure
869 ************************************************************************/
871 ixgbe_if_attach_pre(if_ctx_t ctx)
873 struct adapter *adapter;
875 if_softc_ctx_t scctx;
880 INIT_DEBUGOUT("ixgbe_attach: begin");
882 /* Allocate, clear, and link in our adapter structure */
883 dev = iflib_get_dev(ctx);
884 adapter = iflib_get_softc(ctx);
885 adapter->hw.back = adapter;
888 scctx = adapter->shared = iflib_get_softc_ctx(ctx);
889 adapter->media = iflib_get_media(ctx);
892 /* Determine hardware revision */
893 hw->vendor_id = pci_get_vendor(dev);
894 hw->device_id = pci_get_device(dev);
895 hw->revision_id = pci_get_revid(dev);
896 hw->subsystem_vendor_id = pci_get_subvendor(dev);
897 hw->subsystem_device_id = pci_get_subdevice(dev);
899 /* Do base PCI setup - map BAR0 */
900 if (ixgbe_allocate_pci_resources(ctx)) {
901 device_printf(dev, "Allocation of PCI resources failed\n");
905 /* let hardware know driver is loaded */
906 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
907 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
908 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
911 * Initialize the shared code
913 if (ixgbe_init_shared_code(hw) != 0) {
914 device_printf(dev, "Unable to initialize the shared code\n");
919 if (hw->mbx.ops.init_params)
920 hw->mbx.ops.init_params(hw);
922 hw->allow_unsupported_sfp = allow_unsupported_sfp;
924 if (hw->mac.type != ixgbe_mac_82598EB)
925 hw->phy.smart_speed = ixgbe_smart_speed;
927 ixgbe_init_device_features(adapter);
929 /* Enable WoL (if supported) */
930 ixgbe_check_wol_support(adapter);
932 /* Verify adapter fan is still functional (if applicable) */
933 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
934 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
935 ixgbe_check_fan_failure(adapter, esdp, FALSE);
938 /* Ensure SW/FW semaphore is free */
939 ixgbe_init_swfw_semaphore(hw);
941 /* Set an initial default flow control value */
942 hw->fc.requested_mode = ixgbe_flow_control;
944 hw->phy.reset_if_overtemp = TRUE;
945 error = ixgbe_reset_hw(hw);
946 hw->phy.reset_if_overtemp = FALSE;
947 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
949 * No optics in this port, set up
950 * so the timer routine will probe
951 * for later insertion.
953 adapter->sfp_probe = TRUE;
955 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
956 device_printf(dev, "Unsupported SFP+ module detected!\n");
960 device_printf(dev, "Hardware initialization failed\n");
965 /* Make sure we have a good EEPROM before we read from it */
966 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
967 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
972 error = ixgbe_start_hw(hw);
974 case IXGBE_ERR_EEPROM_VERSION:
975 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
977 case IXGBE_ERR_SFP_NOT_SUPPORTED:
978 device_printf(dev, "Unsupported SFP+ Module\n");
981 case IXGBE_ERR_SFP_NOT_PRESENT:
982 device_printf(dev, "No SFP+ Module found\n");
988 /* Most of the iflib initialization... */
990 iflib_set_mac(ctx, hw->mac.addr);
991 switch (adapter->hw.mac.type) {
993 case ixgbe_mac_X550EM_x:
994 case ixgbe_mac_X550EM_a:
995 scctx->isc_rss_table_size = 512;
996 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
999 scctx->isc_rss_table_size = 128;
1000 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1003 /* Allow legacy interrupts */
1004 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1006 scctx->isc_txqsizes[0] =
1007 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1008 sizeof(u32), DBA_ALIGN),
1009 scctx->isc_rxqsizes[0] =
1010 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1014 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1015 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1016 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1017 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1019 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1020 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1023 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1025 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1026 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1027 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1029 scctx->isc_txrx = &ixgbe_txrx;
1031 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1036 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1037 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1038 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1039 ixgbe_free_pci_resources(ctx);
1042 } /* ixgbe_if_attach_pre */
1044 /*********************************************************************
1045 * ixgbe_if_attach_post - Device initialization routine, part 2
1047 * Called during driver load, but after interrupts and
1048 * resources have been allocated and configured.
1049 * Sets up some data structures not relevant to iflib.
1051 * return 0 on success, positive on failure
1052 *********************************************************************/
1054 ixgbe_if_attach_post(if_ctx_t ctx)
1057 struct adapter *adapter;
1058 struct ixgbe_hw *hw;
1061 dev = iflib_get_dev(ctx);
1062 adapter = iflib_get_softc(ctx);
1066 if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1067 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1068 device_printf(dev, "Device does not support legacy interrupts");
1073 /* Allocate multicast array memory. */
1074 adapter->mta = malloc(sizeof(*adapter->mta) *
1075 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1076 if (adapter->mta == NULL) {
1077 device_printf(dev, "Can not allocate multicast setup array\n");
1082 /* hw.ix defaults init */
1083 ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1085 /* Enable the optics for 82599 SFP+ fiber */
1086 ixgbe_enable_tx_laser(hw);
1088 /* Enable power to the phy. */
1089 ixgbe_set_phy_power(hw, TRUE);
1091 ixgbe_initialize_iov(adapter);
1093 error = ixgbe_setup_interface(ctx);
1095 device_printf(dev, "Interface setup failed: %d\n", error);
1099 ixgbe_if_update_admin_status(ctx);
1101 /* Initialize statistics */
1102 ixgbe_update_stats_counters(adapter);
1103 ixgbe_add_hw_stats(adapter);
1105 /* Check PCIE slot type/speed/width */
1106 ixgbe_get_slot_info(adapter);
1109 * Do time init and sysctl init here, but
1110 * only on the first port of a bypass adapter.
1112 ixgbe_bypass_init(adapter);
1114 /* Set an initial dmac value */
1116 /* Set initial advertised speeds (if applicable) */
1117 adapter->advertise = ixgbe_get_advertise(adapter);
1119 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1120 ixgbe_define_iov_schemas(dev, &error);
1123 ixgbe_add_device_sysctls(ctx);
1128 } /* ixgbe_if_attach_post */
1130 /************************************************************************
1131 * ixgbe_check_wol_support
1133 * Checks whether the adapter's ports are capable of
1134 * Wake On LAN by reading the adapter's NVM.
1136 * Sets each port's hw->wol_enabled value depending
1137 * on the value read here.
1138 ************************************************************************/
1140 ixgbe_check_wol_support(struct adapter *adapter)
1142 struct ixgbe_hw *hw = &adapter->hw;
1145 /* Find out WoL support for port */
1146 adapter->wol_support = hw->wol_enabled = 0;
1147 ixgbe_get_device_caps(hw, &dev_caps);
1148 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1149 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1151 adapter->wol_support = hw->wol_enabled = 1;
1153 /* Save initial wake up filter configuration */
1154 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1157 } /* ixgbe_check_wol_support */
1159 /************************************************************************
1160 * ixgbe_setup_interface
1162 * Setup networking device structure and register an interface.
1163 ************************************************************************/
1165 ixgbe_setup_interface(if_ctx_t ctx)
1167 struct ifnet *ifp = iflib_get_ifp(ctx);
1168 struct adapter *adapter = iflib_get_softc(ctx);
1170 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1172 if_setbaudrate(ifp, IF_Gbps(10));
1174 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1176 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1178 ixgbe_add_media_types(ctx);
1180 /* Autoselect media by default */
1181 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1184 } /* ixgbe_setup_interface */
1186 /************************************************************************
1187 * ixgbe_if_get_counter
1188 ************************************************************************/
1190 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1192 struct adapter *adapter = iflib_get_softc(ctx);
1193 if_t ifp = iflib_get_ifp(ctx);
1196 case IFCOUNTER_IPACKETS:
1197 return (adapter->ipackets);
1198 case IFCOUNTER_OPACKETS:
1199 return (adapter->opackets);
1200 case IFCOUNTER_IBYTES:
1201 return (adapter->ibytes);
1202 case IFCOUNTER_OBYTES:
1203 return (adapter->obytes);
1204 case IFCOUNTER_IMCASTS:
1205 return (adapter->imcasts);
1206 case IFCOUNTER_OMCASTS:
1207 return (adapter->omcasts);
1208 case IFCOUNTER_COLLISIONS:
1210 case IFCOUNTER_IQDROPS:
1211 return (adapter->iqdrops);
1212 case IFCOUNTER_OQDROPS:
1214 case IFCOUNTER_IERRORS:
1215 return (adapter->ierrors);
1217 return (if_get_counter_default(ifp, cnt));
1219 } /* ixgbe_if_get_counter */
1221 /************************************************************************
1223 ************************************************************************/
1225 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1227 struct adapter *adapter = iflib_get_softc(ctx);
1228 struct ixgbe_hw *hw = &adapter->hw;
1232 if (hw->phy.ops.read_i2c_byte == NULL)
1234 for (i = 0; i < req->len; i++)
1235 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1236 req->dev_addr, &req->data[i]);
1238 } /* ixgbe_if_i2c_req */
1240 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1241 * @ctx: iflib context
1242 * @event: event code to check
1244 * Defaults to returning true for unknown events.
1246 * @returns true if iflib needs to reinit the interface
1249 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1252 case IFLIB_RESTART_VLAN_CONFIG:
1259 /************************************************************************
1260 * ixgbe_add_media_types
1261 ************************************************************************/
1263 ixgbe_add_media_types(if_ctx_t ctx)
1265 struct adapter *adapter = iflib_get_softc(ctx);
1266 struct ixgbe_hw *hw = &adapter->hw;
1267 device_t dev = iflib_get_dev(ctx);
1270 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1272 /* Media types with matching FreeBSD media defines */
1273 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1274 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1275 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1276 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1277 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1278 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1279 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1280 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1282 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1283 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1284 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1287 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1288 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1289 if (hw->phy.multispeed_fiber)
1290 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1293 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1294 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1295 if (hw->phy.multispeed_fiber)
1296 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1298 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1299 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1300 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1301 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1303 #ifdef IFM_ETH_XTYPE
1304 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1305 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1306 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1307 ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1308 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1309 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1310 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1311 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1313 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1314 device_printf(dev, "Media supported: 10GbaseKR\n");
1315 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1316 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1318 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1319 device_printf(dev, "Media supported: 10GbaseKX4\n");
1320 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1321 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1323 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1324 device_printf(dev, "Media supported: 1000baseKX\n");
1325 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1326 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1328 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1329 device_printf(dev, "Media supported: 2500baseKX\n");
1330 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1331 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1334 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1335 device_printf(dev, "Media supported: 1000baseBX\n");
1337 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1338 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1340 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1343 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1344 } /* ixgbe_add_media_types */
1346 /************************************************************************
1348 ************************************************************************/
1350 ixgbe_is_sfp(struct ixgbe_hw *hw)
1352 switch (hw->mac.type) {
1353 case ixgbe_mac_82598EB:
1354 if (hw->phy.type == ixgbe_phy_nl)
1357 case ixgbe_mac_82599EB:
1358 switch (hw->mac.ops.get_media_type(hw)) {
1359 case ixgbe_media_type_fiber:
1360 case ixgbe_media_type_fiber_qsfp:
1365 case ixgbe_mac_X550EM_x:
1366 case ixgbe_mac_X550EM_a:
1367 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1373 } /* ixgbe_is_sfp */
1375 /************************************************************************
1377 ************************************************************************/
1379 ixgbe_config_link(if_ctx_t ctx)
1381 struct adapter *adapter = iflib_get_softc(ctx);
1382 struct ixgbe_hw *hw = &adapter->hw;
1383 u32 autoneg, err = 0;
1384 bool sfp, negotiate;
1386 sfp = ixgbe_is_sfp(hw);
1389 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1390 iflib_admin_intr_deferred(ctx);
1392 if (hw->mac.ops.check_link)
1393 err = ixgbe_check_link(hw, &adapter->link_speed,
1394 &adapter->link_up, FALSE);
1397 autoneg = hw->phy.autoneg_advertised;
1398 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1399 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1403 if (hw->mac.ops.setup_link)
1404 err = hw->mac.ops.setup_link(hw, autoneg,
1407 } /* ixgbe_config_link */
1409 /************************************************************************
1410 * ixgbe_update_stats_counters - Update board statistics counters.
1411 ************************************************************************/
1413 ixgbe_update_stats_counters(struct adapter *adapter)
1415 struct ixgbe_hw *hw = &adapter->hw;
1416 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1417 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1419 u64 total_missed_rx = 0;
1421 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1422 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1423 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1424 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1425 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1427 for (int i = 0; i < 16; i++) {
1428 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1429 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1430 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1432 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1433 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1434 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1436 /* Hardware workaround, gprc counts missed packets */
1437 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1438 stats->gprc -= missed_rx;
1440 if (hw->mac.type != ixgbe_mac_82598EB) {
1441 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1442 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1443 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1444 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1445 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1446 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1447 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1448 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1449 stats->lxoffrxc += lxoffrxc;
1451 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1452 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1453 stats->lxoffrxc += lxoffrxc;
1454 /* 82598 only has a counter in the high register */
1455 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1456 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1457 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1461 * For watchdog management we need to know if we have been paused
1462 * during the last interval, so capture that here.
1465 adapter->shared->isc_pause_frames = 1;
1468 * Workaround: mprc hardware is incorrectly counting
1469 * broadcasts, so for now we subtract those.
1471 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1472 stats->bprc += bprc;
1473 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1474 if (hw->mac.type == ixgbe_mac_82598EB)
1475 stats->mprc -= bprc;
1477 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1478 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1479 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1480 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1481 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1482 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1484 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1485 stats->lxontxc += lxon;
1486 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1487 stats->lxofftxc += lxoff;
1488 total = lxon + lxoff;
1490 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1491 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1492 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1493 stats->gptc -= total;
1494 stats->mptc -= total;
1495 stats->ptc64 -= total;
1496 stats->gotc -= total * ETHER_MIN_LEN;
1498 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1499 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1500 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1501 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1502 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1503 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1504 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1505 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1506 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1507 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1508 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1509 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1510 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1511 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1512 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1513 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1514 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1515 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1516 /* Only read FCOE on 82599 */
1517 if (hw->mac.type != ixgbe_mac_82598EB) {
1518 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1519 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1520 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1521 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1522 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1525 /* Fill out the OS statistics structure */
1526 IXGBE_SET_IPACKETS(adapter, stats->gprc);
1527 IXGBE_SET_OPACKETS(adapter, stats->gptc);
1528 IXGBE_SET_IBYTES(adapter, stats->gorc);
1529 IXGBE_SET_OBYTES(adapter, stats->gotc);
1530 IXGBE_SET_IMCASTS(adapter, stats->mprc);
1531 IXGBE_SET_OMCASTS(adapter, stats->mptc);
1532 IXGBE_SET_COLLISIONS(adapter, 0);
1533 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1536 * Aggregate following types of errors as RX errors:
1537 * - CRC error count,
1538 * - illegal byte error count,
1539 * - checksum error count,
1540 * - missed packets count,
1541 * - length error count,
1542 * - undersized packets count,
1543 * - fragmented packets count,
1544 * - oversized packets count,
1547 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->illerrc + stats->xec +
1548 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1550 } /* ixgbe_update_stats_counters */
1552 /************************************************************************
1553 * ixgbe_add_hw_stats
1555 * Add sysctl variables, one per statistic, to the system.
1556 ************************************************************************/
1558 ixgbe_add_hw_stats(struct adapter *adapter)
1560 device_t dev = iflib_get_dev(adapter->ctx);
1561 struct ix_rx_queue *rx_que;
1562 struct ix_tx_queue *tx_que;
1563 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1564 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1565 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1566 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1567 struct sysctl_oid *stat_node, *queue_node;
1568 struct sysctl_oid_list *stat_list, *queue_list;
1571 #define QUEUE_NAME_LEN 32
1572 char namebuf[QUEUE_NAME_LEN];
1574 /* Driver Statistics */
1575 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1576 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1577 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1578 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1579 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1580 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1582 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1583 struct tx_ring *txr = &tx_que->txr;
1584 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1585 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1586 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1587 queue_list = SYSCTL_CHILDREN(queue_node);
1589 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1590 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1591 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1592 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1593 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1594 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1595 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1596 CTLFLAG_RD, &txr->tso_tx, "TSO");
1597 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1598 CTLFLAG_RD, &txr->total_packets,
1599 "Queue Packets Transmitted");
1602 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1603 struct rx_ring *rxr = &rx_que->rxr;
1604 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1605 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1606 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1607 queue_list = SYSCTL_CHILDREN(queue_node);
1609 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1610 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1611 &adapter->rx_queues[i], 0,
1612 ixgbe_sysctl_interrupt_rate_handler, "IU",
1614 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1615 CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1616 "irqs on this queue");
1617 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1618 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1619 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1620 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1621 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1622 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1623 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1624 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1625 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1626 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1627 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1628 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1629 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1630 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1633 /* MAC stats get their own sub node */
1635 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1636 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1637 stat_list = SYSCTL_CHILDREN(stat_node);
1639 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1640 CTLFLAG_RD, &adapter->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1641 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1642 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1643 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1644 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1645 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1646 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1647 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1648 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1649 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1650 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1651 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1652 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1653 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1654 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1655 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1656 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1658 /* Flow Control stats */
1659 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1660 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1661 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1662 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1663 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1664 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1665 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1666 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1668 /* Packet Reception Stats */
1669 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1670 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1671 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1672 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1673 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1674 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1675 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1676 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1677 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1678 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1679 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1680 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1681 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1682 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1683 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1684 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1685 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1686 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1687 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1688 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1689 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1690 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1691 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1692 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1693 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1694 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1695 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1696 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1697 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1698 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1699 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1700 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1701 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1702 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1703 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1704 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1705 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1706 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1708 /* Packet Transmission Stats */
1709 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1710 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1711 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1712 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1713 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1714 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1715 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1716 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1717 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1718 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1719 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1720 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1721 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1722 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1723 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1724 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1725 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1726 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1728 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1729 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1730 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1731 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1732 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1733 } /* ixgbe_add_hw_stats */
1735 /************************************************************************
1736 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1738 * Retrieves the TDH value from the hardware
1739 ************************************************************************/
1741 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1743 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1750 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1751 error = sysctl_handle_int(oidp, &val, 0, req);
1752 if (error || !req->newptr)
1756 } /* ixgbe_sysctl_tdh_handler */
1758 /************************************************************************
1759 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1761 * Retrieves the TDT value from the hardware
1762 ************************************************************************/
1764 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1766 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1773 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1774 error = sysctl_handle_int(oidp, &val, 0, req);
1775 if (error || !req->newptr)
1779 } /* ixgbe_sysctl_tdt_handler */
1781 /************************************************************************
1782 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1784 * Retrieves the RDH value from the hardware
1785 ************************************************************************/
1787 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1789 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1796 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1797 error = sysctl_handle_int(oidp, &val, 0, req);
1798 if (error || !req->newptr)
1802 } /* ixgbe_sysctl_rdh_handler */
1804 /************************************************************************
1805 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1807 * Retrieves the RDT value from the hardware
1808 ************************************************************************/
1810 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1812 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1819 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1820 error = sysctl_handle_int(oidp, &val, 0, req);
1821 if (error || !req->newptr)
1825 } /* ixgbe_sysctl_rdt_handler */
1827 /************************************************************************
1828 * ixgbe_if_vlan_register
1830 * Run via vlan config EVENT, it enables us to use the
1831 * HW Filter table since we can get the vlan id. This
1832 * just creates the entry in the soft version of the
1833 * VFTA, init will repopulate the real table.
1834 ************************************************************************/
1836 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1838 struct adapter *adapter = iflib_get_softc(ctx);
1841 index = (vtag >> 5) & 0x7F;
1843 adapter->shadow_vfta[index] |= (1 << bit);
1844 ++adapter->num_vlans;
1845 ixgbe_setup_vlan_hw_support(ctx);
1846 } /* ixgbe_if_vlan_register */
1848 /************************************************************************
1849 * ixgbe_if_vlan_unregister
1851 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1852 ************************************************************************/
1854 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1856 struct adapter *adapter = iflib_get_softc(ctx);
1859 index = (vtag >> 5) & 0x7F;
1861 adapter->shadow_vfta[index] &= ~(1 << bit);
1862 --adapter->num_vlans;
1863 /* Re-init to load the changes */
1864 ixgbe_setup_vlan_hw_support(ctx);
1865 } /* ixgbe_if_vlan_unregister */
1867 /************************************************************************
1868 * ixgbe_setup_vlan_hw_support
1869 ************************************************************************/
1871 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1873 struct ifnet *ifp = iflib_get_ifp(ctx);
1874 struct adapter *adapter = iflib_get_softc(ctx);
1875 struct ixgbe_hw *hw = &adapter->hw;
1876 struct rx_ring *rxr;
1882 * We get here thru init_locked, meaning
1883 * a soft reset, this has already cleared
1884 * the VFTA and other state, so if there
1885 * have been no vlan's registered do nothing.
1887 if (adapter->num_vlans == 0)
1890 /* Setup the queues for vlans */
1891 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1892 for (i = 0; i < adapter->num_rx_queues; i++) {
1893 rxr = &adapter->rx_queues[i].rxr;
1894 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1895 if (hw->mac.type != ixgbe_mac_82598EB) {
1896 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1897 ctrl |= IXGBE_RXDCTL_VME;
1898 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1900 rxr->vtag_strip = TRUE;
1904 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1907 * A soft reset zero's out the VFTA, so
1908 * we need to repopulate it now.
1910 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1911 if (adapter->shadow_vfta[i] != 0)
1912 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1913 adapter->shadow_vfta[i]);
1915 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1916 /* Enable the Filter Table if enabled */
1917 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1918 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1919 ctrl |= IXGBE_VLNCTRL_VFE;
1921 if (hw->mac.type == ixgbe_mac_82598EB)
1922 ctrl |= IXGBE_VLNCTRL_VME;
1923 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1924 } /* ixgbe_setup_vlan_hw_support */
1926 /************************************************************************
1927 * ixgbe_get_slot_info
1929 * Get the width and transaction speed of
1930 * the slot this adapter is plugged into.
1931 ************************************************************************/
1933 ixgbe_get_slot_info(struct adapter *adapter)
1935 device_t dev = iflib_get_dev(adapter->ctx);
1936 struct ixgbe_hw *hw = &adapter->hw;
1937 int bus_info_valid = TRUE;
1941 /* Some devices are behind an internal bridge */
1942 switch (hw->device_id) {
1943 case IXGBE_DEV_ID_82599_SFP_SF_QP:
1944 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1945 goto get_parent_info;
1950 ixgbe_get_bus_info(hw);
1953 * Some devices don't use PCI-E, but there is no need
1954 * to display "Unknown" for bus speed and width.
1956 switch (hw->mac.type) {
1957 case ixgbe_mac_X550EM_x:
1958 case ixgbe_mac_X550EM_a:
1966 * For the Quad port adapter we need to parse back
1967 * up the PCI tree to find the speed of the expansion
1968 * slot into which this adapter is plugged. A bit more work.
1970 dev = device_get_parent(device_get_parent(dev));
1972 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1973 pci_get_slot(dev), pci_get_function(dev));
1975 dev = device_get_parent(device_get_parent(dev));
1977 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1978 pci_get_slot(dev), pci_get_function(dev));
1980 /* Now get the PCI Express Capabilities offset */
1981 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1983 * Hmm...can't get PCI-Express capabilities.
1984 * Falling back to default method.
1986 bus_info_valid = FALSE;
1987 ixgbe_get_bus_info(hw);
1990 /* ...and read the Link Status Register */
1991 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1992 ixgbe_set_pci_config_data_generic(hw, link);
1995 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1996 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
1997 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
1998 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2000 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2001 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2002 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2005 if (bus_info_valid) {
2006 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2007 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2008 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2009 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2010 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2012 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2013 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2014 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2015 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2016 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2019 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2022 } /* ixgbe_get_slot_info */
2024 /************************************************************************
2025 * ixgbe_if_msix_intr_assign
2027 * Setup MSI-X Interrupt resources and handlers
2028 ************************************************************************/
2030 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2032 struct adapter *adapter = iflib_get_softc(ctx);
2033 struct ix_rx_queue *rx_que = adapter->rx_queues;
2034 struct ix_tx_queue *tx_que;
2035 int error, rid, vector = 0;
2039 /* Admin Que is vector 0*/
2041 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2044 snprintf(buf, sizeof(buf), "rxq%d", i);
2045 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2046 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2049 device_printf(iflib_get_dev(ctx),
2050 "Failed to allocate que int %d err: %d", i, error);
2051 adapter->num_rx_queues = i + 1;
2055 rx_que->msix = vector;
2056 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2058 * The queue ID is used as the RSS layer bucket ID.
2059 * We look up the queue ID -> RSS CPU ID and select
2062 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2065 * Bind the MSI-X vector, and thus the
2066 * rings to the corresponding cpu.
2068 * This just happens to match the default RSS
2069 * round-robin bucket -> queue -> CPU allocation.
2071 if (adapter->num_rx_queues > 1)
2076 for (int i = 0; i < adapter->num_tx_queues; i++) {
2077 snprintf(buf, sizeof(buf), "txq%d", i);
2078 tx_que = &adapter->tx_queues[i];
2079 tx_que->msix = i % adapter->num_rx_queues;
2080 iflib_softirq_alloc_generic(ctx,
2081 &adapter->rx_queues[tx_que->msix].que_irq,
2082 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2085 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2086 IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2088 device_printf(iflib_get_dev(ctx),
2089 "Failed to register admin handler");
2093 adapter->vector = vector;
2097 iflib_irq_free(ctx, &adapter->irq);
2098 rx_que = adapter->rx_queues;
2099 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2100 iflib_irq_free(ctx, &rx_que->que_irq);
2103 } /* ixgbe_if_msix_intr_assign */
2105 /*********************************************************************
2106 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2107 **********************************************************************/
2109 ixgbe_msix_que(void *arg)
2111 struct ix_rx_queue *que = arg;
2112 struct adapter *adapter = que->adapter;
2113 struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx);
2115 /* Protect against spurious interrupts */
2116 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2117 return (FILTER_HANDLED);
2119 ixgbe_disable_queue(adapter, que->msix);
2122 return (FILTER_SCHEDULE_THREAD);
2123 } /* ixgbe_msix_que */
2125 /************************************************************************
2126 * ixgbe_media_status - Media Ioctl callback
2128 * Called whenever the user queries the status of
2129 * the interface using ifconfig.
2130 ************************************************************************/
2132 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2134 struct adapter *adapter = iflib_get_softc(ctx);
2135 struct ixgbe_hw *hw = &adapter->hw;
2138 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2140 ifmr->ifm_status = IFM_AVALID;
2141 ifmr->ifm_active = IFM_ETHER;
2143 if (!adapter->link_active)
2146 ifmr->ifm_status |= IFM_ACTIVE;
2147 layer = adapter->phy_layer;
2149 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2150 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2151 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2152 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2153 switch (adapter->link_speed) {
2154 case IXGBE_LINK_SPEED_10GB_FULL:
2155 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2157 case IXGBE_LINK_SPEED_1GB_FULL:
2158 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2160 case IXGBE_LINK_SPEED_100_FULL:
2161 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2163 case IXGBE_LINK_SPEED_10_FULL:
2164 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2167 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2168 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2169 switch (adapter->link_speed) {
2170 case IXGBE_LINK_SPEED_10GB_FULL:
2171 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2174 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2175 switch (adapter->link_speed) {
2176 case IXGBE_LINK_SPEED_10GB_FULL:
2177 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2179 case IXGBE_LINK_SPEED_1GB_FULL:
2180 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2183 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2184 switch (adapter->link_speed) {
2185 case IXGBE_LINK_SPEED_10GB_FULL:
2186 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2188 case IXGBE_LINK_SPEED_1GB_FULL:
2189 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2192 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2193 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2194 switch (adapter->link_speed) {
2195 case IXGBE_LINK_SPEED_10GB_FULL:
2196 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2198 case IXGBE_LINK_SPEED_1GB_FULL:
2199 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2202 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2203 switch (adapter->link_speed) {
2204 case IXGBE_LINK_SPEED_10GB_FULL:
2205 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2209 * XXX: These need to use the proper media types once
2212 #ifndef IFM_ETH_XTYPE
2213 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2214 switch (adapter->link_speed) {
2215 case IXGBE_LINK_SPEED_10GB_FULL:
2216 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2218 case IXGBE_LINK_SPEED_2_5GB_FULL:
2219 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2221 case IXGBE_LINK_SPEED_1GB_FULL:
2222 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2225 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2226 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2227 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2228 switch (adapter->link_speed) {
2229 case IXGBE_LINK_SPEED_10GB_FULL:
2230 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2232 case IXGBE_LINK_SPEED_2_5GB_FULL:
2233 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2235 case IXGBE_LINK_SPEED_1GB_FULL:
2236 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2240 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2241 switch (adapter->link_speed) {
2242 case IXGBE_LINK_SPEED_10GB_FULL:
2243 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2245 case IXGBE_LINK_SPEED_2_5GB_FULL:
2246 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2248 case IXGBE_LINK_SPEED_1GB_FULL:
2249 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2252 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2253 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2254 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2255 switch (adapter->link_speed) {
2256 case IXGBE_LINK_SPEED_10GB_FULL:
2257 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2259 case IXGBE_LINK_SPEED_2_5GB_FULL:
2260 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2262 case IXGBE_LINK_SPEED_1GB_FULL:
2263 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2268 /* If nothing is recognized... */
2269 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2270 ifmr->ifm_active |= IFM_UNKNOWN;
2272 /* Display current flow control setting used on link */
2273 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2274 hw->fc.current_mode == ixgbe_fc_full)
2275 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2276 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2277 hw->fc.current_mode == ixgbe_fc_full)
2278 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2279 } /* ixgbe_media_status */
2281 /************************************************************************
2282 * ixgbe_media_change - Media Ioctl callback
2284 * Called when the user changes speed/duplex using
2285 * media/mediopt option with ifconfig.
2286 ************************************************************************/
2288 ixgbe_if_media_change(if_ctx_t ctx)
2290 struct adapter *adapter = iflib_get_softc(ctx);
2291 struct ifmedia *ifm = iflib_get_media(ctx);
2292 struct ixgbe_hw *hw = &adapter->hw;
2293 ixgbe_link_speed speed = 0;
2295 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2297 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2300 if (hw->phy.media_type == ixgbe_media_type_backplane)
2304 * We don't actually need to check against the supported
2305 * media types of the adapter; ifmedia will take care of
2308 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2311 speed |= IXGBE_LINK_SPEED_100_FULL;
2312 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2313 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2317 #ifndef IFM_ETH_XTYPE
2318 case IFM_10G_SR: /* KR, too */
2319 case IFM_10G_CX4: /* KX4 */
2324 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2325 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2327 #ifndef IFM_ETH_XTYPE
2328 case IFM_1000_CX: /* KX */
2334 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2337 speed |= IXGBE_LINK_SPEED_100_FULL;
2338 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2340 case IFM_10G_TWINAX:
2341 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2344 speed |= IXGBE_LINK_SPEED_100_FULL;
2347 speed |= IXGBE_LINK_SPEED_10_FULL;
2353 hw->mac.autotry_restart = TRUE;
2354 hw->mac.ops.setup_link(hw, speed, TRUE);
2355 adapter->advertise =
2356 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2357 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
2358 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
2359 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
2364 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2367 } /* ixgbe_if_media_change */
2369 /************************************************************************
2371 ************************************************************************/
2373 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2375 struct adapter *adapter = iflib_get_softc(ctx);
2376 struct ifnet *ifp = iflib_get_ifp(ctx);
2380 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2381 rctl &= (~IXGBE_FCTRL_UPE);
2382 if (ifp->if_flags & IFF_ALLMULTI)
2383 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2385 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2387 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2388 rctl &= (~IXGBE_FCTRL_MPE);
2389 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2391 if (ifp->if_flags & IFF_PROMISC) {
2392 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2393 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2394 } else if (ifp->if_flags & IFF_ALLMULTI) {
2395 rctl |= IXGBE_FCTRL_MPE;
2396 rctl &= ~IXGBE_FCTRL_UPE;
2397 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2400 } /* ixgbe_if_promisc_set */
2402 /************************************************************************
2403 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2404 ************************************************************************/
2406 ixgbe_msix_link(void *arg)
2408 struct adapter *adapter = arg;
2409 struct ixgbe_hw *hw = &adapter->hw;
2410 u32 eicr, eicr_mask;
2413 ++adapter->link_irq;
2415 /* Pause other interrupts */
2416 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2418 /* First get the cause */
2419 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2420 /* Be sure the queue bits are not cleared */
2421 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2422 /* Clear interrupt with write */
2423 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2425 /* Link status change */
2426 if (eicr & IXGBE_EICR_LSC) {
2427 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2428 adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2431 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2432 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2433 (eicr & IXGBE_EICR_FLOW_DIR)) {
2434 /* This is probably overkill :) */
2435 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2436 return (FILTER_HANDLED);
2437 /* Disable the interrupt */
2438 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2439 adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2441 if (eicr & IXGBE_EICR_ECC) {
2442 device_printf(iflib_get_dev(adapter->ctx),
2443 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2444 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2447 /* Check for over temp condition */
2448 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2449 switch (adapter->hw.mac.type) {
2450 case ixgbe_mac_X550EM_a:
2451 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2453 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2454 IXGBE_EICR_GPI_SDP0_X550EM_a);
2455 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2456 IXGBE_EICR_GPI_SDP0_X550EM_a);
2457 retval = hw->phy.ops.check_overtemp(hw);
2458 if (retval != IXGBE_ERR_OVERTEMP)
2460 device_printf(iflib_get_dev(adapter->ctx),
2461 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2462 device_printf(iflib_get_dev(adapter->ctx),
2463 "System shutdown required!\n");
2466 if (!(eicr & IXGBE_EICR_TS))
2468 retval = hw->phy.ops.check_overtemp(hw);
2469 if (retval != IXGBE_ERR_OVERTEMP)
2471 device_printf(iflib_get_dev(adapter->ctx),
2472 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2473 device_printf(iflib_get_dev(adapter->ctx),
2474 "System shutdown required!\n");
2475 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2480 /* Check for VF message */
2481 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2482 (eicr & IXGBE_EICR_MAILBOX))
2483 adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2486 if (ixgbe_is_sfp(hw)) {
2487 /* Pluggable optics-related interrupt */
2488 if (hw->mac.type >= ixgbe_mac_X540)
2489 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2491 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2493 if (eicr & eicr_mask) {
2494 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2495 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2498 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2499 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2500 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2501 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2502 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2506 /* Check for fan failure */
2507 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2508 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2509 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2512 /* External PHY interrupt */
2513 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2514 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2515 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2516 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2519 return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2520 } /* ixgbe_msix_link */
2522 /************************************************************************
2523 * ixgbe_sysctl_interrupt_rate_handler
2524 ************************************************************************/
2526 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2528 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2530 unsigned int reg, usec, rate;
2532 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2533 usec = ((reg & 0x0FF8) >> 3);
2535 rate = 500000 / usec;
2538 error = sysctl_handle_int(oidp, &rate, 0, req);
2539 if (error || !req->newptr)
2541 reg &= ~0xfff; /* default, no limitation */
2542 ixgbe_max_interrupt_rate = 0;
2543 if (rate > 0 && rate < 500000) {
2546 ixgbe_max_interrupt_rate = rate;
2547 reg |= ((4000000/rate) & 0xff8);
2549 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2552 } /* ixgbe_sysctl_interrupt_rate_handler */
2554 /************************************************************************
2555 * ixgbe_add_device_sysctls
2556 ************************************************************************/
2558 ixgbe_add_device_sysctls(if_ctx_t ctx)
2560 struct adapter *adapter = iflib_get_softc(ctx);
2561 device_t dev = iflib_get_dev(ctx);
2562 struct ixgbe_hw *hw = &adapter->hw;
2563 struct sysctl_oid_list *child;
2564 struct sysctl_ctx_list *ctx_list;
2566 ctx_list = device_get_sysctl_ctx(dev);
2567 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2569 /* Sysctls for all devices */
2570 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2571 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2572 adapter, 0, ixgbe_sysctl_flowcntl, "I",
2573 IXGBE_SYSCTL_DESC_SET_FC);
2575 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2576 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2577 adapter, 0, ixgbe_sysctl_advertise, "I",
2578 IXGBE_SYSCTL_DESC_ADV_SPEED);
2581 /* testing sysctls (for all devices) */
2582 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2583 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2584 adapter, 0, ixgbe_sysctl_power_state,
2585 "I", "PCI Power State");
2587 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2588 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2589 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2591 /* for X550 series devices */
2592 if (hw->mac.type >= ixgbe_mac_X550)
2593 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2594 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2595 adapter, 0, ixgbe_sysctl_dmac,
2596 "I", "DMA Coalesce");
2598 /* for WoL-capable devices */
2599 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2600 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2601 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2602 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2604 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2605 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2606 adapter, 0, ixgbe_sysctl_wufc,
2607 "I", "Enable/Disable Wake Up Filters");
2610 /* for X552/X557-AT devices */
2611 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2612 struct sysctl_oid *phy_node;
2613 struct sysctl_oid_list *phy_list;
2615 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2616 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2617 phy_list = SYSCTL_CHILDREN(phy_node);
2619 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2620 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2621 adapter, 0, ixgbe_sysctl_phy_temp,
2622 "I", "Current External PHY Temperature (Celsius)");
2624 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2625 "overtemp_occurred",
2626 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2627 ixgbe_sysctl_phy_overtemp_occurred, "I",
2628 "External PHY High Temperature Event Occurred");
2631 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2632 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2633 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2634 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2636 } /* ixgbe_add_device_sysctls */
2638 /************************************************************************
2639 * ixgbe_allocate_pci_resources
2640 ************************************************************************/
2642 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2644 struct adapter *adapter = iflib_get_softc(ctx);
2645 device_t dev = iflib_get_dev(ctx);
2649 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2652 if (!(adapter->pci_mem)) {
2653 device_printf(dev, "Unable to allocate bus resource: memory\n");
2657 /* Save bus_space values for READ/WRITE_REG macros */
2658 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2659 adapter->osdep.mem_bus_space_handle =
2660 rman_get_bushandle(adapter->pci_mem);
2661 /* Set hw values for shared code */
2662 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2665 } /* ixgbe_allocate_pci_resources */
2667 /************************************************************************
2668 * ixgbe_detach - Device removal routine
2670 * Called when the driver is being removed.
2671 * Stops the adapter and deallocates all the resources
2672 * that were allocated for driver operation.
2674 * return 0 on success, positive on failure
2675 ************************************************************************/
2677 ixgbe_if_detach(if_ctx_t ctx)
2679 struct adapter *adapter = iflib_get_softc(ctx);
2680 device_t dev = iflib_get_dev(ctx);
2683 INIT_DEBUGOUT("ixgbe_detach: begin");
2685 if (ixgbe_pci_iov_detach(dev) != 0) {
2686 device_printf(dev, "SR-IOV in use; detach first.\n");
2690 ixgbe_setup_low_power_mode(ctx);
2692 /* let hardware know driver is unloading */
2693 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2694 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2695 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2697 ixgbe_free_pci_resources(ctx);
2698 free(adapter->mta, M_IXGBE);
2701 } /* ixgbe_if_detach */
2703 /************************************************************************
2704 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2706 * Prepare the adapter/port for LPLU and/or WoL
2707 ************************************************************************/
2709 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2711 struct adapter *adapter = iflib_get_softc(ctx);
2712 struct ixgbe_hw *hw = &adapter->hw;
2713 device_t dev = iflib_get_dev(ctx);
2716 if (!hw->wol_enabled)
2717 ixgbe_set_phy_power(hw, FALSE);
2719 /* Limit power management flow to X550EM baseT */
2720 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2721 hw->phy.ops.enter_lplu) {
2722 /* Turn off support for APM wakeup. (Using ACPI instead) */
2723 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2724 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2727 * Clear Wake Up Status register to prevent any previous wakeup
2728 * events from waking us up immediately after we suspend.
2730 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2733 * Program the Wakeup Filter Control register with user filter
2736 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2738 /* Enable wakeups and power management in Wakeup Control */
2739 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2740 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2742 /* X550EM baseT adapters need a special LPLU flow */
2743 hw->phy.reset_disable = TRUE;
2745 error = hw->phy.ops.enter_lplu(hw);
2747 device_printf(dev, "Error entering LPLU: %d\n", error);
2748 hw->phy.reset_disable = FALSE;
2750 /* Just stop for other adapters */
2755 } /* ixgbe_setup_low_power_mode */
2757 /************************************************************************
2758 * ixgbe_shutdown - Shutdown entry point
2759 ************************************************************************/
2761 ixgbe_if_shutdown(if_ctx_t ctx)
2765 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2767 error = ixgbe_setup_low_power_mode(ctx);
2770 } /* ixgbe_if_shutdown */
2772 /************************************************************************
2776 ************************************************************************/
2778 ixgbe_if_suspend(if_ctx_t ctx)
2782 INIT_DEBUGOUT("ixgbe_suspend: begin");
2784 error = ixgbe_setup_low_power_mode(ctx);
2787 } /* ixgbe_if_suspend */
2789 /************************************************************************
2793 ************************************************************************/
2795 ixgbe_if_resume(if_ctx_t ctx)
2797 struct adapter *adapter = iflib_get_softc(ctx);
2798 device_t dev = iflib_get_dev(ctx);
2799 struct ifnet *ifp = iflib_get_ifp(ctx);
2800 struct ixgbe_hw *hw = &adapter->hw;
2803 INIT_DEBUGOUT("ixgbe_resume: begin");
2805 /* Read & clear WUS register */
2806 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2808 device_printf(dev, "Woken up by (WUS): %#010x\n",
2809 IXGBE_READ_REG(hw, IXGBE_WUS));
2810 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2811 /* And clear WUFC until next low-power transition */
2812 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2815 * Required after D3->D0 transition;
2816 * will re-advertise all previous advertised speeds
2818 if (ifp->if_flags & IFF_UP)
2822 } /* ixgbe_if_resume */
2824 /************************************************************************
2825 * ixgbe_if_mtu_set - Ioctl mtu entry point
2827 * Return 0 on success, EINVAL on failure
2828 ************************************************************************/
2830 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2832 struct adapter *adapter = iflib_get_softc(ctx);
2835 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2837 if (mtu > IXGBE_MAX_MTU) {
2840 adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2844 } /* ixgbe_if_mtu_set */
2846 /************************************************************************
2847 * ixgbe_if_crcstrip_set
2848 ************************************************************************/
2850 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2852 struct adapter *sc = iflib_get_softc(ctx);
2853 struct ixgbe_hw *hw = &sc->hw;
2854 /* crc stripping is set in two places:
2855 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2856 * IXGBE_RDRXCTL (set by the original driver in
2857 * ixgbe_setup_hw_rsc() called in init_locked.
2858 * We disable the setting when netmap is compiled in).
2859 * We update the values here, but also in ixgbe.c because
2860 * init_locked sometimes is called outside our control.
2864 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2865 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2868 D("%s read HLREG 0x%x rxc 0x%x",
2869 onoff ? "enter" : "exit", hl, rxc);
2871 /* hw requirements ... */
2872 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2873 rxc |= IXGBE_RDRXCTL_RSCACKC;
2874 if (onoff && !crcstrip) {
2875 /* keep the crc. Fast rx */
2876 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2877 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2879 /* reset default mode */
2880 hl |= IXGBE_HLREG0_RXCRCSTRP;
2881 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2885 D("%s write HLREG 0x%x rxc 0x%x",
2886 onoff ? "enter" : "exit", hl, rxc);
2888 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2889 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2890 } /* ixgbe_if_crcstrip_set */
2892 /*********************************************************************
2893 * ixgbe_if_init - Init entry point
2895 * Used in two ways: It is used by the stack as an init
2896 * entry point in network interface structure. It is also
2897 * used by the driver as a hw/sw initialization routine to
2898 * get to a consistent state.
2900 * Return 0 on success, positive on failure
2901 **********************************************************************/
2903 ixgbe_if_init(if_ctx_t ctx)
2905 struct adapter *adapter = iflib_get_softc(ctx);
2906 struct ifnet *ifp = iflib_get_ifp(ctx);
2907 device_t dev = iflib_get_dev(ctx);
2908 struct ixgbe_hw *hw = &adapter->hw;
2909 struct ix_rx_queue *rx_que;
2910 struct ix_tx_queue *tx_que;
2917 INIT_DEBUGOUT("ixgbe_if_init: begin");
2919 /* Queue indices may change with IOV mode */
2920 ixgbe_align_all_queue_indices(adapter);
2922 /* reprogram the RAR[0] in case user changed it. */
2923 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2925 /* Get the latest mac address, User can use a LAA */
2926 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2927 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2928 hw->addr_ctrl.rar_used_count = 1;
2932 ixgbe_initialize_iov(adapter);
2934 ixgbe_initialize_transmit_units(ctx);
2936 /* Setup Multicast table */
2937 ixgbe_if_multi_set(ctx);
2939 /* Determine the correct mbuf pool, based on frame size */
2940 adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2942 /* Configure RX settings */
2943 ixgbe_initialize_receive_units(ctx);
2946 * Initialize variable holding task enqueue requests
2947 * from MSI-X interrupts
2949 adapter->task_requests = 0;
2951 /* Enable SDP & MSI-X interrupts based on adapter */
2952 ixgbe_config_gpie(adapter);
2955 if (ifp->if_mtu > ETHERMTU) {
2956 /* aka IXGBE_MAXFRS on 82599 and newer */
2957 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2958 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2959 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2960 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2963 /* Now enable all the queues */
2964 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2965 struct tx_ring *txr = &tx_que->txr;
2967 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2968 txdctl |= IXGBE_TXDCTL_ENABLE;
2969 /* Set WTHRESH to 8, burst writeback */
2970 txdctl |= (8 << 16);
2972 * When the internal queue falls below PTHRESH (32),
2973 * start prefetching as long as there are at least
2974 * HTHRESH (1) buffers ready. The values are taken
2975 * from the Intel linux driver 3.8.21.
2976 * Prefetching enables tx line rate even with 1 queue.
2978 txdctl |= (32 << 0) | (1 << 8);
2979 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2982 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2983 struct rx_ring *rxr = &rx_que->rxr;
2985 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2986 if (hw->mac.type == ixgbe_mac_82598EB) {
2992 rxdctl &= ~0x3FFFFF;
2995 rxdctl |= IXGBE_RXDCTL_ENABLE;
2996 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2997 for (j = 0; j < 10; j++) {
2998 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2999 IXGBE_RXDCTL_ENABLE)
3007 /* Enable Receive engine */
3008 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3009 if (hw->mac.type == ixgbe_mac_82598EB)
3010 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3011 rxctrl |= IXGBE_RXCTRL_RXEN;
3012 ixgbe_enable_rx_dma(hw, rxctrl);
3014 /* Set up MSI/MSI-X routing */
3015 if (ixgbe_enable_msix) {
3016 ixgbe_configure_ivars(adapter);
3017 /* Set up auto-mask */
3018 if (hw->mac.type == ixgbe_mac_82598EB)
3019 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3021 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3022 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3024 } else { /* Simple settings for Legacy/MSI */
3025 ixgbe_set_ivar(adapter, 0, 0, 0);
3026 ixgbe_set_ivar(adapter, 0, 0, 1);
3027 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3030 ixgbe_init_fdir(adapter);
3033 * Check on any SFP devices that
3034 * need to be kick-started
3036 if (hw->phy.type == ixgbe_phy_none) {
3037 err = hw->phy.ops.identify(hw);
3038 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3040 "Unsupported SFP+ module type was detected.\n");
3045 /* Set moderation on the Link interrupt */
3046 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3048 /* Enable power to the phy. */
3049 ixgbe_set_phy_power(hw, TRUE);
3051 /* Config/Enable Link */
3052 ixgbe_config_link(ctx);
3054 /* Hardware Packet Buffer & Flow Control setup */
3055 ixgbe_config_delay_values(adapter);
3057 /* Initialize the FC settings */
3060 /* Set up VLAN support and filter */
3061 ixgbe_setup_vlan_hw_support(ctx);
3063 /* Setup DMA Coalescing */
3064 ixgbe_config_dmac(adapter);
3066 /* And now turn on interrupts */
3067 ixgbe_if_enable_intr(ctx);
3069 /* Enable the use of the MBX by the VF's */
3070 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3071 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3072 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3073 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3076 } /* ixgbe_init_locked */
3078 /************************************************************************
3081 * Setup the correct IVAR register for a particular MSI-X interrupt
3082 * (yes this is all very magic and confusing :)
3083 * - entry is the register array entry
3084 * - vector is the MSI-X vector for this queue
3085 * - type is RX/TX/MISC
3086 ************************************************************************/
3088 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3090 struct ixgbe_hw *hw = &adapter->hw;
3093 vector |= IXGBE_IVAR_ALLOC_VAL;
3095 switch (hw->mac.type) {
3096 case ixgbe_mac_82598EB:
3098 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3100 entry += (type * 64);
3101 index = (entry >> 2) & 0x1F;
3102 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3103 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3104 ivar |= (vector << (8 * (entry & 0x3)));
3105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3107 case ixgbe_mac_82599EB:
3108 case ixgbe_mac_X540:
3109 case ixgbe_mac_X550:
3110 case ixgbe_mac_X550EM_x:
3111 case ixgbe_mac_X550EM_a:
3112 if (type == -1) { /* MISC IVAR */
3113 index = (entry & 1) * 8;
3114 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3115 ivar &= ~(0xFF << index);
3116 ivar |= (vector << index);
3117 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3118 } else { /* RX/TX IVARS */
3119 index = (16 * (entry & 1)) + (8 * type);
3120 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3121 ivar &= ~(0xFF << index);
3122 ivar |= (vector << index);
3123 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3128 } /* ixgbe_set_ivar */
3130 /************************************************************************
3131 * ixgbe_configure_ivars
3132 ************************************************************************/
3134 ixgbe_configure_ivars(struct adapter *adapter)
3136 struct ix_rx_queue *rx_que = adapter->rx_queues;
3137 struct ix_tx_queue *tx_que = adapter->tx_queues;
3140 if (ixgbe_max_interrupt_rate > 0)
3141 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3144 * Disable DMA coalescing if interrupt moderation is
3151 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3152 struct rx_ring *rxr = &rx_que->rxr;
3154 /* First the RX queue entry */
3155 ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3157 /* Set an Initial EITR value */
3158 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3160 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3161 struct tx_ring *txr = &tx_que->txr;
3163 /* ... and the TX */
3164 ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3166 /* For the Link interrupt */
3167 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3168 } /* ixgbe_configure_ivars */
3170 /************************************************************************
3172 ************************************************************************/
3174 ixgbe_config_gpie(struct adapter *adapter)
3176 struct ixgbe_hw *hw = &adapter->hw;
3179 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3181 if (adapter->intr_type == IFLIB_INTR_MSIX) {
3182 /* Enable Enhanced MSI-X mode */
3183 gpie |= IXGBE_GPIE_MSIX_MODE
3185 | IXGBE_GPIE_PBA_SUPPORT
3189 /* Fan Failure Interrupt */
3190 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3191 gpie |= IXGBE_SDP1_GPIEN;
3193 /* Thermal Sensor Interrupt */
3194 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3195 gpie |= IXGBE_SDP0_GPIEN_X540;
3197 /* Link detection */
3198 switch (hw->mac.type) {
3199 case ixgbe_mac_82599EB:
3200 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3202 case ixgbe_mac_X550EM_x:
3203 case ixgbe_mac_X550EM_a:
3204 gpie |= IXGBE_SDP0_GPIEN_X540;
3210 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3212 } /* ixgbe_config_gpie */
3214 /************************************************************************
3215 * ixgbe_config_delay_values
3217 * Requires adapter->max_frame_size to be set.
3218 ************************************************************************/
3220 ixgbe_config_delay_values(struct adapter *adapter)
3222 struct ixgbe_hw *hw = &adapter->hw;
3223 u32 rxpb, frame, size, tmp;
3225 frame = adapter->max_frame_size;
3227 /* Calculate High Water */
3228 switch (hw->mac.type) {
3229 case ixgbe_mac_X540:
3230 case ixgbe_mac_X550:
3231 case ixgbe_mac_X550EM_x:
3232 case ixgbe_mac_X550EM_a:
3233 tmp = IXGBE_DV_X540(frame, frame);
3236 tmp = IXGBE_DV(frame, frame);
3239 size = IXGBE_BT2KB(tmp);
3240 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3241 hw->fc.high_water[0] = rxpb - size;
3243 /* Now calculate Low Water */
3244 switch (hw->mac.type) {
3245 case ixgbe_mac_X540:
3246 case ixgbe_mac_X550:
3247 case ixgbe_mac_X550EM_x:
3248 case ixgbe_mac_X550EM_a:
3249 tmp = IXGBE_LOW_DV_X540(frame);
3252 tmp = IXGBE_LOW_DV(frame);
3255 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3257 hw->fc.pause_time = IXGBE_FC_PAUSE;
3258 hw->fc.send_xon = TRUE;
3259 } /* ixgbe_config_delay_values */
3261 /************************************************************************
3262 * ixgbe_set_multi - Multicast Update
3264 * Called whenever multicast address list is updated.
3265 ************************************************************************/
3267 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count)
3269 struct adapter *adapter = arg;
3270 struct ixgbe_mc_addr *mta = adapter->mta;
3272 if (count == MAX_NUM_MULTICAST_ADDRESSES)
3274 bcopy(LLADDR(sdl), mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3275 mta[count].vmdq = adapter->pool;
3278 } /* ixgbe_mc_filter_apply */
3281 ixgbe_if_multi_set(if_ctx_t ctx)
3283 struct adapter *adapter = iflib_get_softc(ctx);
3284 struct ixgbe_mc_addr *mta;
3285 struct ifnet *ifp = iflib_get_ifp(ctx);
3290 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3293 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3295 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3298 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3299 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3300 if (ifp->if_flags & IFF_PROMISC)
3301 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3302 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3303 ifp->if_flags & IFF_ALLMULTI) {
3304 fctrl |= IXGBE_FCTRL_MPE;
3305 fctrl &= ~IXGBE_FCTRL_UPE;
3307 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3309 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3311 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3312 update_ptr = (u8 *)mta;
3313 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3314 ixgbe_mc_array_itr, TRUE);
3317 } /* ixgbe_if_multi_set */
3319 /************************************************************************
3320 * ixgbe_mc_array_itr
3322 * An iterator function needed by the multicast shared code.
3323 * It feeds the shared code routine the addresses in the
3324 * array of ixgbe_set_multi() one by one.
3325 ************************************************************************/
3327 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3329 struct ixgbe_mc_addr *mta;
3331 mta = (struct ixgbe_mc_addr *)*update_ptr;
3334 *update_ptr = (u8*)(mta + 1);
3337 } /* ixgbe_mc_array_itr */
3339 /************************************************************************
3340 * ixgbe_local_timer - Timer routine
3342 * Checks for link status, updates statistics,
3343 * and runs the watchdog check.
3344 ************************************************************************/
3346 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3348 struct adapter *adapter = iflib_get_softc(ctx);
3353 /* Check for pluggable optics */
3354 if (adapter->sfp_probe)
3355 if (!ixgbe_sfp_probe(ctx))
3356 return; /* Nothing to do */
3358 ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3359 &adapter->link_up, 0);
3361 /* Fire off the adminq task */
3362 iflib_admin_intr_deferred(ctx);
3364 } /* ixgbe_if_timer */
3366 /************************************************************************
3369 * Determine if a port had optics inserted.
3370 ************************************************************************/
3372 ixgbe_sfp_probe(if_ctx_t ctx)
3374 struct adapter *adapter = iflib_get_softc(ctx);
3375 struct ixgbe_hw *hw = &adapter->hw;
3376 device_t dev = iflib_get_dev(ctx);
3377 bool result = FALSE;
3379 if ((hw->phy.type == ixgbe_phy_nl) &&
3380 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3381 s32 ret = hw->phy.ops.identify_sfp(hw);
3384 ret = hw->phy.ops.reset(hw);
3385 adapter->sfp_probe = FALSE;
3386 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3387 device_printf(dev, "Unsupported SFP+ module detected!");
3389 "Reload driver with supported module.\n");
3392 device_printf(dev, "SFP+ module detected!\n");
3393 /* We now have supported optics */
3399 } /* ixgbe_sfp_probe */
3401 /************************************************************************
3402 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3403 ************************************************************************/
3405 ixgbe_handle_mod(void *context)
3407 if_ctx_t ctx = context;
3408 struct adapter *adapter = iflib_get_softc(ctx);
3409 struct ixgbe_hw *hw = &adapter->hw;
3410 device_t dev = iflib_get_dev(ctx);
3411 u32 err, cage_full = 0;
3413 if (adapter->hw.need_crosstalk_fix) {
3414 switch (hw->mac.type) {
3415 case ixgbe_mac_82599EB:
3416 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3419 case ixgbe_mac_X550EM_x:
3420 case ixgbe_mac_X550EM_a:
3421 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3429 goto handle_mod_out;
3432 err = hw->phy.ops.identify_sfp(hw);
3433 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3435 "Unsupported SFP+ module type was detected.\n");
3436 goto handle_mod_out;
3439 if (hw->mac.type == ixgbe_mac_82598EB)
3440 err = hw->phy.ops.reset(hw);
3442 err = hw->mac.ops.setup_sfp(hw);
3444 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3446 "Setup failure - unsupported SFP+ module type.\n");
3447 goto handle_mod_out;
3449 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3453 adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3454 } /* ixgbe_handle_mod */
3457 /************************************************************************
3458 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3459 ************************************************************************/
3461 ixgbe_handle_msf(void *context)
3463 if_ctx_t ctx = context;
3464 struct adapter *adapter = iflib_get_softc(ctx);
3465 struct ixgbe_hw *hw = &adapter->hw;
3469 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3470 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3472 autoneg = hw->phy.autoneg_advertised;
3473 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3474 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3475 if (hw->mac.ops.setup_link)
3476 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3478 /* Adjust media types shown in ifconfig */
3479 ifmedia_removeall(adapter->media);
3480 ixgbe_add_media_types(adapter->ctx);
3481 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3482 } /* ixgbe_handle_msf */
3484 /************************************************************************
3485 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3486 ************************************************************************/
3488 ixgbe_handle_phy(void *context)
3490 if_ctx_t ctx = context;
3491 struct adapter *adapter = iflib_get_softc(ctx);
3492 struct ixgbe_hw *hw = &adapter->hw;
3495 error = hw->phy.ops.handle_lasi(hw);
3496 if (error == IXGBE_ERR_OVERTEMP)
3497 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3499 device_printf(adapter->dev,
3500 "Error handling LASI interrupt: %d\n", error);
3501 } /* ixgbe_handle_phy */
3503 /************************************************************************
3504 * ixgbe_if_stop - Stop the hardware
3506 * Disables all traffic on the adapter by issuing a
3507 * global reset on the MAC and deallocates TX/RX buffers.
3508 ************************************************************************/
3510 ixgbe_if_stop(if_ctx_t ctx)
3512 struct adapter *adapter = iflib_get_softc(ctx);
3513 struct ixgbe_hw *hw = &adapter->hw;
3515 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3518 hw->adapter_stopped = FALSE;
3519 ixgbe_stop_adapter(hw);
3520 if (hw->mac.type == ixgbe_mac_82599EB)
3521 ixgbe_stop_mac_link_on_d3_82599(hw);
3522 /* Turn off the laser - noop with no optics */
3523 ixgbe_disable_tx_laser(hw);
3525 /* Update the stack */
3526 adapter->link_up = FALSE;
3527 ixgbe_if_update_admin_status(ctx);
3529 /* reprogram the RAR[0] in case user changed it. */
3530 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3533 } /* ixgbe_if_stop */
3535 /************************************************************************
3536 * ixgbe_update_link_status - Update OS on link state
3538 * Note: Only updates the OS on the cached link state.
3539 * The real check of the hardware only happens with
3541 ************************************************************************/
3543 ixgbe_if_update_admin_status(if_ctx_t ctx)
3545 struct adapter *adapter = iflib_get_softc(ctx);
3546 device_t dev = iflib_get_dev(ctx);
3548 if (adapter->link_up) {
3549 if (adapter->link_active == FALSE) {
3551 device_printf(dev, "Link is up %d Gbps %s \n",
3552 ((adapter->link_speed == 128) ? 10 : 1),
3554 adapter->link_active = TRUE;
3555 /* Update any Flow Control changes */
3556 ixgbe_fc_enable(&adapter->hw);
3557 /* Update DMA coalescing config */
3558 ixgbe_config_dmac(adapter);
3559 /* should actually be negotiated value */
3560 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3562 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3563 ixgbe_ping_all_vfs(adapter);
3565 } else { /* Link down */
3566 if (adapter->link_active == TRUE) {
3568 device_printf(dev, "Link is Down\n");
3569 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3570 adapter->link_active = FALSE;
3571 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3572 ixgbe_ping_all_vfs(adapter);
3576 /* Handle task requests from msix_link() */
3577 if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3578 ixgbe_handle_mod(ctx);
3579 if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3580 ixgbe_handle_msf(ctx);
3581 if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3582 ixgbe_handle_mbx(ctx);
3583 if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3584 ixgbe_reinit_fdir(ctx);
3585 if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3586 ixgbe_handle_phy(ctx);
3587 adapter->task_requests = 0;
3589 ixgbe_update_stats_counters(adapter);
3590 } /* ixgbe_if_update_admin_status */
3592 /************************************************************************
3593 * ixgbe_config_dmac - Configure DMA Coalescing
3594 ************************************************************************/
3596 ixgbe_config_dmac(struct adapter *adapter)
3598 struct ixgbe_hw *hw = &adapter->hw;
3599 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3601 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3604 if (dcfg->watchdog_timer ^ adapter->dmac ||
3605 dcfg->link_speed ^ adapter->link_speed) {
3606 dcfg->watchdog_timer = adapter->dmac;
3607 dcfg->fcoe_en = FALSE;
3608 dcfg->link_speed = adapter->link_speed;
3611 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3612 dcfg->watchdog_timer, dcfg->link_speed);
3614 hw->mac.ops.dmac_config(hw);
3616 } /* ixgbe_config_dmac */
3618 /************************************************************************
3619 * ixgbe_if_enable_intr
3620 ************************************************************************/
3622 ixgbe_if_enable_intr(if_ctx_t ctx)
3624 struct adapter *adapter = iflib_get_softc(ctx);
3625 struct ixgbe_hw *hw = &adapter->hw;
3626 struct ix_rx_queue *que = adapter->rx_queues;
3629 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3631 switch (adapter->hw.mac.type) {
3632 case ixgbe_mac_82599EB:
3633 mask |= IXGBE_EIMS_ECC;
3634 /* Temperature sensor on some adapters */
3635 mask |= IXGBE_EIMS_GPI_SDP0;
3636 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3637 mask |= IXGBE_EIMS_GPI_SDP1;
3638 mask |= IXGBE_EIMS_GPI_SDP2;
3640 case ixgbe_mac_X540:
3641 /* Detect if Thermal Sensor is enabled */
3642 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3643 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3644 mask |= IXGBE_EIMS_TS;
3645 mask |= IXGBE_EIMS_ECC;
3647 case ixgbe_mac_X550:
3648 /* MAC thermal sensor is automatically enabled */
3649 mask |= IXGBE_EIMS_TS;
3650 mask |= IXGBE_EIMS_ECC;
3652 case ixgbe_mac_X550EM_x:
3653 case ixgbe_mac_X550EM_a:
3654 /* Some devices use SDP0 for important information */
3655 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3656 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3657 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3658 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3659 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3660 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3661 mask |= IXGBE_EICR_GPI_SDP0_X540;
3662 mask |= IXGBE_EIMS_ECC;
3668 /* Enable Fan Failure detection */
3669 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3670 mask |= IXGBE_EIMS_GPI_SDP1;
3672 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3673 mask |= IXGBE_EIMS_MAILBOX;
3674 /* Enable Flow Director */
3675 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3676 mask |= IXGBE_EIMS_FLOW_DIR;
3678 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3680 /* With MSI-X we use auto clear */
3681 if (adapter->intr_type == IFLIB_INTR_MSIX) {
3682 mask = IXGBE_EIMS_ENABLE_MASK;
3683 /* Don't autoclear Link */
3684 mask &= ~IXGBE_EIMS_OTHER;
3685 mask &= ~IXGBE_EIMS_LSC;
3686 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3687 mask &= ~IXGBE_EIMS_MAILBOX;
3688 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3692 * Now enable all queues, this is done separately to
3693 * allow for handling the extended (beyond 32) MSI-X
3694 * vectors that can be used by 82599
3696 for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3697 ixgbe_enable_queue(adapter, que->msix);
3699 IXGBE_WRITE_FLUSH(hw);
3701 } /* ixgbe_if_enable_intr */
3703 /************************************************************************
3704 * ixgbe_disable_intr
3705 ************************************************************************/
3707 ixgbe_if_disable_intr(if_ctx_t ctx)
3709 struct adapter *adapter = iflib_get_softc(ctx);
3711 if (adapter->intr_type == IFLIB_INTR_MSIX)
3712 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3713 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3714 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3716 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3717 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3718 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3720 IXGBE_WRITE_FLUSH(&adapter->hw);
3722 } /* ixgbe_if_disable_intr */
3724 /************************************************************************
3725 * ixgbe_link_intr_enable
3726 ************************************************************************/
3728 ixgbe_link_intr_enable(if_ctx_t ctx)
3730 struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3732 /* Re-enable other interrupts */
3733 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3734 } /* ixgbe_link_intr_enable */
3736 /************************************************************************
3737 * ixgbe_if_rx_queue_intr_enable
3738 ************************************************************************/
3740 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3742 struct adapter *adapter = iflib_get_softc(ctx);
3743 struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3745 ixgbe_enable_queue(adapter, que->msix);
3748 } /* ixgbe_if_rx_queue_intr_enable */
3750 /************************************************************************
3751 * ixgbe_enable_queue
3752 ************************************************************************/
3754 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3756 struct ixgbe_hw *hw = &adapter->hw;
3757 u64 queue = 1ULL << vector;
3760 if (hw->mac.type == ixgbe_mac_82598EB) {
3761 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3762 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3764 mask = (queue & 0xFFFFFFFF);
3766 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3767 mask = (queue >> 32);
3769 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3771 } /* ixgbe_enable_queue */
3773 /************************************************************************
3774 * ixgbe_disable_queue
3775 ************************************************************************/
3777 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3779 struct ixgbe_hw *hw = &adapter->hw;
3780 u64 queue = 1ULL << vector;
3783 if (hw->mac.type == ixgbe_mac_82598EB) {
3784 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3785 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3787 mask = (queue & 0xFFFFFFFF);
3789 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3790 mask = (queue >> 32);
3792 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3794 } /* ixgbe_disable_queue */
3796 /************************************************************************
3797 * ixgbe_intr - Legacy Interrupt Service Routine
3798 ************************************************************************/
3800 ixgbe_intr(void *arg)
3802 struct adapter *adapter = arg;
3803 struct ix_rx_queue *que = adapter->rx_queues;
3804 struct ixgbe_hw *hw = &adapter->hw;
3805 if_ctx_t ctx = adapter->ctx;
3806 u32 eicr, eicr_mask;
3808 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3812 ixgbe_if_enable_intr(ctx);
3813 return (FILTER_HANDLED);
3816 /* Check for fan failure */
3817 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3818 (eicr & IXGBE_EICR_GPI_SDP1)) {
3819 device_printf(adapter->dev,
3820 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3821 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3824 /* Link status change */
3825 if (eicr & IXGBE_EICR_LSC) {
3826 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3827 iflib_admin_intr_deferred(ctx);
3830 if (ixgbe_is_sfp(hw)) {
3831 /* Pluggable optics-related interrupt */
3832 if (hw->mac.type >= ixgbe_mac_X540)
3833 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3835 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3837 if (eicr & eicr_mask) {
3838 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3839 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3842 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3843 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3844 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3845 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3846 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3850 /* External PHY interrupt */
3851 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3852 (eicr & IXGBE_EICR_GPI_SDP0_X540))
3853 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3855 return (FILTER_SCHEDULE_THREAD);
3858 /************************************************************************
3859 * ixgbe_free_pci_resources
3860 ************************************************************************/
3862 ixgbe_free_pci_resources(if_ctx_t ctx)
3864 struct adapter *adapter = iflib_get_softc(ctx);
3865 struct ix_rx_queue *que = adapter->rx_queues;
3866 device_t dev = iflib_get_dev(ctx);
3868 /* Release all MSI-X queue resources */
3869 if (adapter->intr_type == IFLIB_INTR_MSIX)
3870 iflib_irq_free(ctx, &adapter->irq);
3873 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3874 iflib_irq_free(ctx, &que->que_irq);
3878 if (adapter->pci_mem != NULL)
3879 bus_release_resource(dev, SYS_RES_MEMORY,
3880 rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3881 } /* ixgbe_free_pci_resources */
3883 /************************************************************************
3884 * ixgbe_sysctl_flowcntl
3886 * SYSCTL wrapper around setting Flow Control
3887 ************************************************************************/
3889 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3891 struct adapter *adapter;
3894 adapter = (struct adapter *)arg1;
3895 fc = adapter->hw.fc.current_mode;
3897 error = sysctl_handle_int(oidp, &fc, 0, req);
3898 if ((error) || (req->newptr == NULL))
3901 /* Don't bother if it's not changed */
3902 if (fc == adapter->hw.fc.current_mode)
3905 return ixgbe_set_flowcntl(adapter, fc);
3906 } /* ixgbe_sysctl_flowcntl */
3908 /************************************************************************
3909 * ixgbe_set_flowcntl - Set flow control
3911 * Flow control values:
3916 ************************************************************************/
3918 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3921 case ixgbe_fc_rx_pause:
3922 case ixgbe_fc_tx_pause:
3924 adapter->hw.fc.requested_mode = fc;
3925 if (adapter->num_rx_queues > 1)
3926 ixgbe_disable_rx_drop(adapter);
3929 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3930 if (adapter->num_rx_queues > 1)
3931 ixgbe_enable_rx_drop(adapter);
3937 /* Don't autoneg if forcing a value */
3938 adapter->hw.fc.disable_fc_autoneg = TRUE;
3939 ixgbe_fc_enable(&adapter->hw);
3942 } /* ixgbe_set_flowcntl */
3944 /************************************************************************
3945 * ixgbe_enable_rx_drop
3947 * Enable the hardware to drop packets when the buffer is
3948 * full. This is useful with multiqueue, so that no single
3949 * queue being full stalls the entire RX engine. We only
3950 * enable this when Multiqueue is enabled AND Flow Control
3952 ************************************************************************/
3954 ixgbe_enable_rx_drop(struct adapter *adapter)
3956 struct ixgbe_hw *hw = &adapter->hw;
3957 struct rx_ring *rxr;
3960 for (int i = 0; i < adapter->num_rx_queues; i++) {
3961 rxr = &adapter->rx_queues[i].rxr;
3962 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3963 srrctl |= IXGBE_SRRCTL_DROP_EN;
3964 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3967 /* enable drop for each vf */
3968 for (int i = 0; i < adapter->num_vfs; i++) {
3969 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3970 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3973 } /* ixgbe_enable_rx_drop */
3975 /************************************************************************
3976 * ixgbe_disable_rx_drop
3977 ************************************************************************/
3979 ixgbe_disable_rx_drop(struct adapter *adapter)
3981 struct ixgbe_hw *hw = &adapter->hw;
3982 struct rx_ring *rxr;
3985 for (int i = 0; i < adapter->num_rx_queues; i++) {
3986 rxr = &adapter->rx_queues[i].rxr;
3987 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3988 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3989 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3992 /* disable drop for each vf */
3993 for (int i = 0; i < adapter->num_vfs; i++) {
3994 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3995 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3997 } /* ixgbe_disable_rx_drop */
3999 /************************************************************************
4000 * ixgbe_sysctl_advertise
4002 * SYSCTL wrapper around setting advertised speed
4003 ************************************************************************/
4005 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4007 struct adapter *adapter;
4008 int error, advertise;
4010 adapter = (struct adapter *)arg1;
4011 advertise = adapter->advertise;
4013 error = sysctl_handle_int(oidp, &advertise, 0, req);
4014 if ((error) || (req->newptr == NULL))
4017 return ixgbe_set_advertise(adapter, advertise);
4018 } /* ixgbe_sysctl_advertise */
4020 /************************************************************************
4021 * ixgbe_set_advertise - Control advertised link speed
4024 * 0x1 - advertise 100 Mb
4025 * 0x2 - advertise 1G
4026 * 0x4 - advertise 10G
4027 * 0x8 - advertise 10 Mb (yes, Mb)
4028 ************************************************************************/
4030 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4032 device_t dev = iflib_get_dev(adapter->ctx);
4033 struct ixgbe_hw *hw;
4034 ixgbe_link_speed speed = 0;
4035 ixgbe_link_speed link_caps = 0;
4036 s32 err = IXGBE_NOT_IMPLEMENTED;
4037 bool negotiate = FALSE;
4039 /* Checks to validate new value */
4040 if (adapter->advertise == advertise) /* no change */
4045 /* No speed changes for backplane media */
4046 if (hw->phy.media_type == ixgbe_media_type_backplane)
4049 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4050 (hw->phy.multispeed_fiber))) {
4051 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4055 if (advertise < 0x1 || advertise > 0xF) {
4056 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4060 if (hw->mac.ops.get_link_capabilities) {
4061 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4063 if (err != IXGBE_SUCCESS) {
4064 device_printf(dev, "Unable to determine supported advertise speeds\n");
4069 /* Set new value and report new advertised mode */
4070 if (advertise & 0x1) {
4071 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4072 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4075 speed |= IXGBE_LINK_SPEED_100_FULL;
4077 if (advertise & 0x2) {
4078 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4079 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4082 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4084 if (advertise & 0x4) {
4085 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4086 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4089 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4091 if (advertise & 0x8) {
4092 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4093 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4096 speed |= IXGBE_LINK_SPEED_10_FULL;
4099 hw->mac.autotry_restart = TRUE;
4100 hw->mac.ops.setup_link(hw, speed, TRUE);
4101 adapter->advertise = advertise;
4104 } /* ixgbe_set_advertise */
4106 /************************************************************************
4107 * ixgbe_get_advertise - Get current advertised speed settings
4109 * Formatted for sysctl usage.
4111 * 0x1 - advertise 100 Mb
4112 * 0x2 - advertise 1G
4113 * 0x4 - advertise 10G
4114 * 0x8 - advertise 10 Mb (yes, Mb)
4115 ************************************************************************/
4117 ixgbe_get_advertise(struct adapter *adapter)
4119 struct ixgbe_hw *hw = &adapter->hw;
4121 ixgbe_link_speed link_caps = 0;
4123 bool negotiate = FALSE;
4126 * Advertised speed means nothing unless it's copper or
4129 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4130 !(hw->phy.multispeed_fiber))
4133 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4134 if (err != IXGBE_SUCCESS)
4138 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4139 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
4140 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
4141 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
4144 } /* ixgbe_get_advertise */
4146 /************************************************************************
4147 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4150 * 0/1 - off / on (use default value of 1000)
4152 * Legal timer values are:
4153 * 50,100,250,500,1000,2000,5000,10000
4155 * Turning off interrupt moderation will also turn this off.
4156 ************************************************************************/
4158 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4160 struct adapter *adapter = (struct adapter *)arg1;
4161 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
4165 newval = adapter->dmac;
4166 error = sysctl_handle_16(oidp, &newval, 0, req);
4167 if ((error) || (req->newptr == NULL))
4176 /* Enable and use default */
4177 adapter->dmac = 1000;
4187 /* Legal values - allow */
4188 adapter->dmac = newval;
4191 /* Do nothing, illegal value */
4195 /* Re-initialize hardware if it's already running */
4196 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4200 } /* ixgbe_sysctl_dmac */
4203 /************************************************************************
4204 * ixgbe_sysctl_power_state
4206 * Sysctl to test power states
4208 * 0 - set device to D0
4209 * 3 - set device to D3
4210 * (none) - get current device power state
4211 ************************************************************************/
4213 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4215 struct adapter *adapter = (struct adapter *)arg1;
4216 device_t dev = adapter->dev;
4217 int curr_ps, new_ps, error = 0;
4219 curr_ps = new_ps = pci_get_powerstate(dev);
4221 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4222 if ((error) || (req->newptr == NULL))
4225 if (new_ps == curr_ps)
4228 if (new_ps == 3 && curr_ps == 0)
4229 error = DEVICE_SUSPEND(dev);
4230 else if (new_ps == 0 && curr_ps == 3)
4231 error = DEVICE_RESUME(dev);
4235 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4238 } /* ixgbe_sysctl_power_state */
4241 /************************************************************************
4242 * ixgbe_sysctl_wol_enable
4244 * Sysctl to enable/disable the WoL capability,
4245 * if supported by the adapter.
4250 ************************************************************************/
4252 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4254 struct adapter *adapter = (struct adapter *)arg1;
4255 struct ixgbe_hw *hw = &adapter->hw;
4256 int new_wol_enabled;
4259 new_wol_enabled = hw->wol_enabled;
4260 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4261 if ((error) || (req->newptr == NULL))
4263 new_wol_enabled = !!(new_wol_enabled);
4264 if (new_wol_enabled == hw->wol_enabled)
4267 if (new_wol_enabled > 0 && !adapter->wol_support)
4270 hw->wol_enabled = new_wol_enabled;
4273 } /* ixgbe_sysctl_wol_enable */
4275 /************************************************************************
4276 * ixgbe_sysctl_wufc - Wake Up Filter Control
4278 * Sysctl to enable/disable the types of packets that the
4279 * adapter will wake up on upon receipt.
4281 * 0x1 - Link Status Change
4282 * 0x2 - Magic Packet
4283 * 0x4 - Direct Exact
4284 * 0x8 - Directed Multicast
4286 * 0x20 - ARP/IPv4 Request Packet
4287 * 0x40 - Direct IPv4 Packet
4288 * 0x80 - Direct IPv6 Packet
4290 * Settings not listed above will cause the sysctl to return an error.
4291 ************************************************************************/
4293 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4295 struct adapter *adapter = (struct adapter *)arg1;
4299 new_wufc = adapter->wufc;
4301 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4302 if ((error) || (req->newptr == NULL))
4304 if (new_wufc == adapter->wufc)
4307 if (new_wufc & 0xffffff00)
4311 new_wufc |= (0xffffff & adapter->wufc);
4312 adapter->wufc = new_wufc;
4315 } /* ixgbe_sysctl_wufc */
4318 /************************************************************************
4319 * ixgbe_sysctl_print_rss_config
4320 ************************************************************************/
4322 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4324 struct adapter *adapter = (struct adapter *)arg1;
4325 struct ixgbe_hw *hw = &adapter->hw;
4326 device_t dev = adapter->dev;
4328 int error = 0, reta_size;
4331 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4333 device_printf(dev, "Could not allocate sbuf for output.\n");
4337 // TODO: use sbufs to make a string to print out
4338 /* Set multiplier for RETA setup and table size based on MAC */
4339 switch (adapter->hw.mac.type) {
4340 case ixgbe_mac_X550:
4341 case ixgbe_mac_X550EM_x:
4342 case ixgbe_mac_X550EM_a:
4350 /* Print out the redirection table */
4351 sbuf_cat(buf, "\n");
4352 for (int i = 0; i < reta_size; i++) {
4354 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4355 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4357 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4358 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4362 // TODO: print more config
4364 error = sbuf_finish(buf);
4366 device_printf(dev, "Error finishing sbuf: %d\n", error);
4371 } /* ixgbe_sysctl_print_rss_config */
4372 #endif /* IXGBE_DEBUG */
4374 /************************************************************************
4375 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4377 * For X552/X557-AT devices using an external PHY
4378 ************************************************************************/
4380 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4382 struct adapter *adapter = (struct adapter *)arg1;
4383 struct ixgbe_hw *hw = &adapter->hw;
4386 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4387 device_printf(iflib_get_dev(adapter->ctx),
4388 "Device has no supported external thermal sensor.\n");
4392 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4393 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4394 device_printf(iflib_get_dev(adapter->ctx),
4395 "Error reading from PHY's current temperature register\n");
4399 /* Shift temp for output */
4402 return (sysctl_handle_16(oidp, NULL, reg, req));
4403 } /* ixgbe_sysctl_phy_temp */
4405 /************************************************************************
4406 * ixgbe_sysctl_phy_overtemp_occurred
4408 * Reports (directly from the PHY) whether the current PHY
4409 * temperature is over the overtemp threshold.
4410 ************************************************************************/
4412 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4414 struct adapter *adapter = (struct adapter *)arg1;
4415 struct ixgbe_hw *hw = &adapter->hw;
4418 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4419 device_printf(iflib_get_dev(adapter->ctx),
4420 "Device has no supported external thermal sensor.\n");
4424 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4425 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4426 device_printf(iflib_get_dev(adapter->ctx),
4427 "Error reading from PHY's temperature status register\n");
4431 /* Get occurrence bit */
4432 reg = !!(reg & 0x4000);
4434 return (sysctl_handle_16(oidp, 0, reg, req));
4435 } /* ixgbe_sysctl_phy_overtemp_occurred */
4437 /************************************************************************
4438 * ixgbe_sysctl_eee_state
4440 * Sysctl to set EEE power saving feature
4444 * (none) - get current device EEE state
4445 ************************************************************************/
4447 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4449 struct adapter *adapter = (struct adapter *)arg1;
4450 device_t dev = adapter->dev;
4451 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
4452 int curr_eee, new_eee, error = 0;
4455 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4457 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4458 if ((error) || (req->newptr == NULL))
4462 if (new_eee == curr_eee)
4466 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4469 /* Bounds checking */
4470 if ((new_eee < 0) || (new_eee > 1))
4473 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
4475 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4479 /* Restart auto-neg */
4482 device_printf(dev, "New EEE state: %d\n", new_eee);
4484 /* Cache new value */
4486 adapter->feat_en |= IXGBE_FEATURE_EEE;
4488 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4491 } /* ixgbe_sysctl_eee_state */
4493 /************************************************************************
4494 * ixgbe_init_device_features
4495 ************************************************************************/
4497 ixgbe_init_device_features(struct adapter *adapter)
4499 adapter->feat_cap = IXGBE_FEATURE_NETMAP
4502 | IXGBE_FEATURE_MSIX
4503 | IXGBE_FEATURE_LEGACY_IRQ;
4505 /* Set capabilities first... */
4506 switch (adapter->hw.mac.type) {
4507 case ixgbe_mac_82598EB:
4508 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4509 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4511 case ixgbe_mac_X540:
4512 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4513 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4514 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4515 (adapter->hw.bus.func == 0))
4516 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4518 case ixgbe_mac_X550:
4519 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4520 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4521 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4523 case ixgbe_mac_X550EM_x:
4524 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4525 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4527 case ixgbe_mac_X550EM_a:
4528 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4529 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4530 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4531 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4532 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4533 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4534 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4537 case ixgbe_mac_82599EB:
4538 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4539 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4540 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4541 (adapter->hw.bus.func == 0))
4542 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4543 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4544 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4550 /* Enabled by default... */
4551 /* Fan failure detection */
4552 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4553 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4555 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4556 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4558 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4559 adapter->feat_en |= IXGBE_FEATURE_EEE;
4560 /* Thermal Sensor */
4561 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4562 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4564 /* Enabled via global sysctl... */
4566 if (ixgbe_enable_fdir) {
4567 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4568 adapter->feat_en |= IXGBE_FEATURE_FDIR;
4570 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4573 * Message Signal Interrupts - Extended (MSI-X)
4574 * Normal MSI is only enabled if MSI-X calls fail.
4576 if (!ixgbe_enable_msix)
4577 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4578 /* Receive-Side Scaling (RSS) */
4579 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4580 adapter->feat_en |= IXGBE_FEATURE_RSS;
4582 /* Disable features with unmet dependencies... */
4584 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4585 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4586 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4587 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4588 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4590 } /* ixgbe_init_device_features */
4592 /************************************************************************
4593 * ixgbe_check_fan_failure
4594 ************************************************************************/
4596 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4600 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4604 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4605 } /* ixgbe_check_fan_failure */