1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 #include "opt_inet6.h"
41 #include "ixgbe_sriov.h"
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
47 /************************************************************************
49 ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
53 /************************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixgbe_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 /* required last entry */
112 static void *ixgbe_register(device_t dev);
113 static int ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int ixgbe_if_attach_post(if_ctx_t ctx);
115 static int ixgbe_if_detach(if_ctx_t ctx);
116 static int ixgbe_if_shutdown(if_ctx_t ctx);
117 static int ixgbe_if_suspend(if_ctx_t ctx);
118 static int ixgbe_if_resume(if_ctx_t ctx);
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int ixgbe_if_media_change(if_ctx_t ctx);
127 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133 uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135 uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 int ixgbe_intr(void *arg);
144 /************************************************************************
145 * Function prototypes
146 ************************************************************************/
147 #if __FreeBSD_version >= 1100036
148 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
151 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
154 static int ixgbe_allocate_pci_resources(if_ctx_t ctx);
155 static int ixgbe_setup_low_power_mode(if_ctx_t ctx);
157 static void ixgbe_config_dmac(struct adapter *adapter);
158 static void ixgbe_configure_ivars(struct adapter *adapter);
159 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
161 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 static bool ixgbe_sfp_probe(if_ctx_t ctx);
164 static void ixgbe_free_pci_resources(if_ctx_t ctx);
166 static int ixgbe_msix_link(void *arg);
167 static int ixgbe_msix_que(void *arg);
168 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
169 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
170 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
172 static int ixgbe_setup_interface(if_ctx_t ctx);
173 static void ixgbe_init_device_features(struct adapter *adapter);
174 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
175 static void ixgbe_add_media_types(if_ctx_t ctx);
176 static void ixgbe_update_stats_counters(struct adapter *adapter);
177 static void ixgbe_config_link(if_ctx_t ctx);
178 static void ixgbe_get_slot_info(struct adapter *);
179 static void ixgbe_check_wol_support(struct adapter *adapter);
180 static void ixgbe_enable_rx_drop(struct adapter *);
181 static void ixgbe_disable_rx_drop(struct adapter *);
183 static void ixgbe_add_hw_stats(struct adapter *adapter);
184 static int ixgbe_set_flowcntl(struct adapter *, int);
185 static int ixgbe_set_advertise(struct adapter *, int);
186 static int ixgbe_get_advertise(struct adapter *);
187 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
188 static void ixgbe_config_gpie(struct adapter *adapter);
189 static void ixgbe_config_delay_values(struct adapter *adapter);
191 /* Sysctl handlers */
192 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
193 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
194 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
195 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
196 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
197 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
199 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
200 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
202 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
203 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
204 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
205 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
206 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
207 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
208 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
210 /* Deferred interrupt tasklets */
211 static void ixgbe_handle_msf(void *);
212 static void ixgbe_handle_mod(void *);
213 static void ixgbe_handle_phy(void *);
215 /************************************************************************
216 * FreeBSD Device Interface Entry Points
217 ************************************************************************/
218 static device_method_t ix_methods[] = {
219 /* Device interface */
220 DEVMETHOD(device_register, ixgbe_register),
221 DEVMETHOD(device_probe, iflib_device_probe),
222 DEVMETHOD(device_attach, iflib_device_attach),
223 DEVMETHOD(device_detach, iflib_device_detach),
224 DEVMETHOD(device_shutdown, iflib_device_shutdown),
225 DEVMETHOD(device_suspend, iflib_device_suspend),
226 DEVMETHOD(device_resume, iflib_device_resume),
228 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
229 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
230 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
235 static driver_t ix_driver = {
236 "ix", ix_methods, sizeof(struct adapter),
239 devclass_t ix_devclass;
240 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
241 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
242 MODULE_DEPEND(ix, pci, 1, 1, 1);
243 MODULE_DEPEND(ix, ether, 1, 1, 1);
244 MODULE_DEPEND(ix, iflib, 1, 1, 1);
246 static device_method_t ixgbe_if_methods[] = {
247 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
248 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
249 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
250 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
251 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
252 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
253 DEVMETHOD(ifdi_init, ixgbe_if_init),
254 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
255 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
256 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
257 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
258 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
259 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
260 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
262 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
263 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
264 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
265 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
266 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
267 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
268 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
269 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
270 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
271 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
272 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
273 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
274 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
275 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
277 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
278 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
279 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
285 * TUNEABLE PARAMETERS:
288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
289 static driver_t ixgbe_if_driver = {
290 "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
295 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
297 /* Flow control setting, default to full */
298 static int ixgbe_flow_control = ixgbe_fc_full;
299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
300 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
302 /* Advertise Speed, default to 0 (auto) */
303 static int ixgbe_advertise_speed = 0;
304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
305 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
308 * Smart speed setting, default to on
309 * this only works as a compile option
310 * right now as its during attach, set
311 * this to 'ixgbe_smart_speed_off' to
314 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
317 * MSI-X should be the default for best performance,
318 * but this allows it to be forced off for testing.
320 static int ixgbe_enable_msix = 1;
321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
322 "Enable MSI-X interrupts");
325 * Defining this on will allow the use
326 * of unsupported SFP+ modules, note that
327 * doing so you are on your own :)
329 static int allow_unsupported_sfp = FALSE;
330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
331 &allow_unsupported_sfp, 0,
332 "Allow unsupported SFP modules...use at your own risk");
335 * Not sure if Flow Director is fully baked,
336 * so we'll default to turning it off.
338 static int ixgbe_enable_fdir = 0;
339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
340 "Enable Flow Director");
342 /* Receive-Side Scaling */
343 static int ixgbe_enable_rss = 1;
344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
345 "Enable Receive-Side Scaling (RSS)");
348 /* Keep running tab on them for sanity check */
349 static int ixgbe_total_ports;
352 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
355 * For Flow Director: this is the number of TX packets we sample
356 * for the filter pool, this means every 20th packet will be probed.
358 * This feature can be disabled by setting this to 0.
360 static int atr_sample_rate = 20;
362 extern struct if_txrx ixgbe_txrx;
364 static struct if_shared_ctx ixgbe_sctx_init = {
365 .isc_magic = IFLIB_MAGIC,
366 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
367 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
368 .isc_tx_maxsegsize = PAGE_SIZE,
369 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
370 .isc_tso_maxsegsize = PAGE_SIZE,
371 .isc_rx_maxsize = PAGE_SIZE*4,
372 .isc_rx_nsegments = 1,
373 .isc_rx_maxsegsize = PAGE_SIZE*4,
378 .isc_admin_intrcnt = 1,
379 .isc_vendor_info = ixgbe_vendor_info_array,
380 .isc_driver_version = ixgbe_driver_version,
381 .isc_driver = &ixgbe_if_driver,
383 .isc_nrxd_min = {MIN_RXD},
384 .isc_ntxd_min = {MIN_TXD},
385 .isc_nrxd_max = {MAX_RXD},
386 .isc_ntxd_max = {MAX_TXD},
387 .isc_nrxd_default = {DEFAULT_RXD},
388 .isc_ntxd_default = {DEFAULT_TXD},
391 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
393 /************************************************************************
394 * ixgbe_if_tx_queues_alloc
395 ************************************************************************/
397 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
398 int ntxqs, int ntxqsets)
400 struct adapter *adapter = iflib_get_softc(ctx);
401 if_softc_ctx_t scctx = adapter->shared;
402 struct ix_tx_queue *que;
405 MPASS(adapter->num_tx_queues > 0);
406 MPASS(adapter->num_tx_queues == ntxqsets);
409 /* Allocate queue structure memory */
411 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
412 M_IXGBE, M_NOWAIT | M_ZERO);
413 if (!adapter->tx_queues) {
414 device_printf(iflib_get_dev(ctx),
415 "Unable to allocate TX ring memory\n");
419 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
420 struct tx_ring *txr = &que->txr;
422 /* In case SR-IOV is enabled, align the index properly */
423 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
426 txr->adapter = que->adapter = adapter;
427 adapter->active_queues |= (u64)1 << txr->me;
429 /* Allocate report status array */
430 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
431 if (txr->tx_rsq == NULL) {
435 for (j = 0; j < scctx->isc_ntxd[0]; j++)
436 txr->tx_rsq[j] = QIDX_INVALID;
437 /* get the virtual and physical address of the hardware queues */
438 txr->tail = IXGBE_TDT(txr->me);
439 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
440 txr->tx_paddr = paddrs[i];
443 txr->total_packets = 0;
445 /* Set the rate at which we sample packets */
446 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
447 txr->atr_sample = atr_sample_rate;
451 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
452 adapter->num_tx_queues);
457 ixgbe_if_queues_free(ctx);
460 } /* ixgbe_if_tx_queues_alloc */
462 /************************************************************************
463 * ixgbe_if_rx_queues_alloc
464 ************************************************************************/
466 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
467 int nrxqs, int nrxqsets)
469 struct adapter *adapter = iflib_get_softc(ctx);
470 struct ix_rx_queue *que;
473 MPASS(adapter->num_rx_queues > 0);
474 MPASS(adapter->num_rx_queues == nrxqsets);
477 /* Allocate queue structure memory */
479 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
480 M_IXGBE, M_NOWAIT | M_ZERO);
481 if (!adapter->rx_queues) {
482 device_printf(iflib_get_dev(ctx),
483 "Unable to allocate TX ring memory\n");
487 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
488 struct rx_ring *rxr = &que->rxr;
490 /* In case SR-IOV is enabled, align the index properly */
491 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
494 rxr->adapter = que->adapter = adapter;
496 /* get the virtual and physical address of the hw queues */
497 rxr->tail = IXGBE_RDT(rxr->me);
498 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
499 rxr->rx_paddr = paddrs[i];
504 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
505 adapter->num_rx_queues);
508 } /* ixgbe_if_rx_queues_alloc */
510 /************************************************************************
511 * ixgbe_if_queues_free
512 ************************************************************************/
514 ixgbe_if_queues_free(if_ctx_t ctx)
516 struct adapter *adapter = iflib_get_softc(ctx);
517 struct ix_tx_queue *tx_que = adapter->tx_queues;
518 struct ix_rx_queue *rx_que = adapter->rx_queues;
521 if (tx_que != NULL) {
522 for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
523 struct tx_ring *txr = &tx_que->txr;
524 if (txr->tx_rsq == NULL)
527 free(txr->tx_rsq, M_IXGBE);
531 free(adapter->tx_queues, M_IXGBE);
532 adapter->tx_queues = NULL;
534 if (rx_que != NULL) {
535 free(adapter->rx_queues, M_IXGBE);
536 adapter->rx_queues = NULL;
538 } /* ixgbe_if_queues_free */
540 /************************************************************************
541 * ixgbe_initialize_rss_mapping
542 ************************************************************************/
544 ixgbe_initialize_rss_mapping(struct adapter *adapter)
546 struct ixgbe_hw *hw = &adapter->hw;
547 u32 reta = 0, mrqc, rss_key[10];
548 int queue_id, table_size, index_mult;
552 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
553 /* Fetch the configured RSS key */
554 rss_getkey((uint8_t *)&rss_key);
556 /* set up random bits */
557 arc4rand(&rss_key, sizeof(rss_key), 0);
560 /* Set multiplier for RETA setup and table size based on MAC */
563 switch (adapter->hw.mac.type) {
564 case ixgbe_mac_82598EB:
568 case ixgbe_mac_X550EM_x:
569 case ixgbe_mac_X550EM_a:
576 /* Set up the redirection table */
577 for (i = 0, j = 0; i < table_size; i++, j++) {
578 if (j == adapter->num_rx_queues)
581 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
583 * Fetch the RSS bucket id for the given indirection
584 * entry. Cap it at the number of configured buckets
585 * (which is num_rx_queues.)
587 queue_id = rss_get_indirection_to_bucket(i);
588 queue_id = queue_id % adapter->num_rx_queues;
590 queue_id = (j * index_mult);
593 * The low 8 bits are for hash value (n+0);
594 * The next 8 bits are for hash value (n+1), etc.
597 reta = reta | (((uint32_t)queue_id) << 24);
600 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
602 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
608 /* Now fill our hash function seeds */
609 for (i = 0; i < 10; i++)
610 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
612 /* Perform hash on these packet types */
613 if (adapter->feat_en & IXGBE_FEATURE_RSS)
614 rss_hash_config = rss_gethashconfig();
617 * Disable UDP - IP fragments aren't currently being handled
618 * and so we end up with a mix of 2-tuple and 4-tuple
621 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
622 | RSS_HASHTYPE_RSS_TCP_IPV4
623 | RSS_HASHTYPE_RSS_IPV6
624 | RSS_HASHTYPE_RSS_TCP_IPV6
625 | RSS_HASHTYPE_RSS_IPV6_EX
626 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
629 mrqc = IXGBE_MRQC_RSSEN;
630 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
631 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
632 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
633 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
634 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
635 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
636 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
637 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
638 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
639 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
640 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
641 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
642 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
643 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
644 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
645 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
646 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
647 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
648 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
649 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
650 } /* ixgbe_initialize_rss_mapping */
652 /************************************************************************
653 * ixgbe_initialize_receive_units - Setup receive registers and features.
654 ************************************************************************/
655 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
658 ixgbe_initialize_receive_units(if_ctx_t ctx)
660 struct adapter *adapter = iflib_get_softc(ctx);
661 if_softc_ctx_t scctx = adapter->shared;
662 struct ixgbe_hw *hw = &adapter->hw;
663 struct ifnet *ifp = iflib_get_ifp(ctx);
664 struct ix_rx_queue *que;
666 u32 bufsz, fctrl, srrctl, rxcsum;
670 * Make sure receives are disabled while
671 * setting up the descriptor ring
673 ixgbe_disable_rx(hw);
675 /* Enable broadcasts */
676 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
677 fctrl |= IXGBE_FCTRL_BAM;
678 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
679 fctrl |= IXGBE_FCTRL_DPF;
680 fctrl |= IXGBE_FCTRL_PMCF;
682 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
684 /* Set for Jumbo Frames? */
685 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
686 if (ifp->if_mtu > ETHERMTU)
687 hlreg |= IXGBE_HLREG0_JUMBOEN;
689 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
690 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
692 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
693 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
695 /* Setup the Base and Length of the Rx Descriptor Ring */
696 for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
697 struct rx_ring *rxr = &que->rxr;
698 u64 rdba = rxr->rx_paddr;
702 /* Setup the Base and Length of the Rx Descriptor Ring */
703 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
704 (rdba & 0x00000000ffffffffULL));
705 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
706 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
707 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
709 /* Set up the SRRCTL register */
710 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
711 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
712 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
714 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
717 * Set DROP_EN iff we have no flow control and >1 queue.
718 * Note that srrctl was cleared shortly before during reset,
719 * so we do not need to clear the bit, but do it just in case
720 * this code is moved elsewhere.
722 if (adapter->num_rx_queues > 1 &&
723 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
724 srrctl |= IXGBE_SRRCTL_DROP_EN;
726 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
729 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
731 /* Setup the HW Rx Head and Tail Descriptor Pointers */
732 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
733 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
735 /* Set the driver rx tail address */
736 rxr->tail = IXGBE_RDT(rxr->me);
739 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
740 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
741 | IXGBE_PSRTYPE_UDPHDR
742 | IXGBE_PSRTYPE_IPV4HDR
743 | IXGBE_PSRTYPE_IPV6HDR;
744 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
747 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
749 ixgbe_initialize_rss_mapping(adapter);
751 if (adapter->num_rx_queues > 1) {
752 /* RSS and RX IPP Checksum are mutually exclusive */
753 rxcsum |= IXGBE_RXCSUM_PCSD;
756 if (ifp->if_capenable & IFCAP_RXCSUM)
757 rxcsum |= IXGBE_RXCSUM_PCSD;
759 /* This is useful for calculating UDP/IP fragment checksums */
760 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
761 rxcsum |= IXGBE_RXCSUM_IPPCSE;
763 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
765 } /* ixgbe_initialize_receive_units */
767 /************************************************************************
768 * ixgbe_initialize_transmit_units - Enable transmit units.
769 ************************************************************************/
771 ixgbe_initialize_transmit_units(if_ctx_t ctx)
773 struct adapter *adapter = iflib_get_softc(ctx);
774 struct ixgbe_hw *hw = &adapter->hw;
775 if_softc_ctx_t scctx = adapter->shared;
776 struct ix_tx_queue *que;
779 /* Setup the Base and Length of the Tx Descriptor Ring */
780 for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
782 struct tx_ring *txr = &que->txr;
783 u64 tdba = txr->tx_paddr;
787 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
788 (tdba & 0x00000000ffffffffULL));
789 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
790 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
791 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
793 /* Setup the HW Tx Head and Tail descriptor pointers */
794 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
795 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
797 /* Cache the tail address */
798 txr->tx_rs_cidx = txr->tx_rs_pidx;
799 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
800 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
801 txr->tx_rsq[k] = QIDX_INVALID;
803 /* Disable Head Writeback */
805 * Note: for X550 series devices, these registers are actually
806 * prefixed with TPH_ isntead of DCA_, but the addresses and
807 * fields remain the same.
809 switch (hw->mac.type) {
810 case ixgbe_mac_82598EB:
811 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
814 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
817 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
818 switch (hw->mac.type) {
819 case ixgbe_mac_82598EB:
820 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
823 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
829 if (hw->mac.type != ixgbe_mac_82598EB) {
830 u32 dmatxctl, rttdcs;
832 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
833 dmatxctl |= IXGBE_DMATXCTL_TE;
834 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
835 /* Disable arbiter to set MTQC */
836 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
837 rttdcs |= IXGBE_RTTDCS_ARBDIS;
838 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
839 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
840 ixgbe_get_mtqc(adapter->iov_mode));
841 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
842 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
845 } /* ixgbe_initialize_transmit_units */
847 /************************************************************************
849 ************************************************************************/
851 ixgbe_register(device_t dev)
854 } /* ixgbe_register */
856 /************************************************************************
857 * ixgbe_if_attach_pre - Device initialization routine, part 1
859 * Called when the driver is being loaded.
860 * Identifies the type of hardware, initializes the hardware,
861 * and initializes iflib structures.
863 * return 0 on success, positive on failure
864 ************************************************************************/
866 ixgbe_if_attach_pre(if_ctx_t ctx)
868 struct adapter *adapter;
870 if_softc_ctx_t scctx;
875 INIT_DEBUGOUT("ixgbe_attach: begin");
877 /* Allocate, clear, and link in our adapter structure */
878 dev = iflib_get_dev(ctx);
879 adapter = iflib_get_softc(ctx);
880 adapter->hw.back = adapter;
883 scctx = adapter->shared = iflib_get_softc_ctx(ctx);
884 adapter->media = iflib_get_media(ctx);
887 /* Determine hardware revision */
888 hw->vendor_id = pci_get_vendor(dev);
889 hw->device_id = pci_get_device(dev);
890 hw->revision_id = pci_get_revid(dev);
891 hw->subsystem_vendor_id = pci_get_subvendor(dev);
892 hw->subsystem_device_id = pci_get_subdevice(dev);
894 /* Do base PCI setup - map BAR0 */
895 if (ixgbe_allocate_pci_resources(ctx)) {
896 device_printf(dev, "Allocation of PCI resources failed\n");
900 /* let hardware know driver is loaded */
901 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
902 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
903 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
906 * Initialize the shared code
908 if (ixgbe_init_shared_code(hw) != 0) {
909 device_printf(dev, "Unable to initialize the shared code\n");
914 if (hw->mbx.ops.init_params)
915 hw->mbx.ops.init_params(hw);
917 hw->allow_unsupported_sfp = allow_unsupported_sfp;
919 if (hw->mac.type != ixgbe_mac_82598EB)
920 hw->phy.smart_speed = ixgbe_smart_speed;
922 ixgbe_init_device_features(adapter);
924 /* Enable WoL (if supported) */
925 ixgbe_check_wol_support(adapter);
927 /* Verify adapter fan is still functional (if applicable) */
928 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
929 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
930 ixgbe_check_fan_failure(adapter, esdp, FALSE);
933 /* Ensure SW/FW semaphore is free */
934 ixgbe_init_swfw_semaphore(hw);
936 /* Set an initial default flow control value */
937 hw->fc.requested_mode = ixgbe_flow_control;
939 hw->phy.reset_if_overtemp = TRUE;
940 error = ixgbe_reset_hw(hw);
941 hw->phy.reset_if_overtemp = FALSE;
942 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
944 * No optics in this port, set up
945 * so the timer routine will probe
946 * for later insertion.
948 adapter->sfp_probe = TRUE;
950 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
951 device_printf(dev, "Unsupported SFP+ module detected!\n");
955 device_printf(dev, "Hardware initialization failed\n");
960 /* Make sure we have a good EEPROM before we read from it */
961 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
962 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
967 error = ixgbe_start_hw(hw);
969 case IXGBE_ERR_EEPROM_VERSION:
970 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
972 case IXGBE_ERR_SFP_NOT_SUPPORTED:
973 device_printf(dev, "Unsupported SFP+ Module\n");
976 case IXGBE_ERR_SFP_NOT_PRESENT:
977 device_printf(dev, "No SFP+ Module found\n");
983 /* Most of the iflib initialization... */
985 iflib_set_mac(ctx, hw->mac.addr);
986 switch (adapter->hw.mac.type) {
988 case ixgbe_mac_X550EM_x:
989 case ixgbe_mac_X550EM_a:
990 scctx->isc_rss_table_size = 512;
991 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
994 scctx->isc_rss_table_size = 128;
995 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
998 /* Allow legacy interrupts */
999 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1001 scctx->isc_txqsizes[0] =
1002 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1003 sizeof(u32), DBA_ALIGN),
1004 scctx->isc_rxqsizes[0] =
1005 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1009 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1010 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1011 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1012 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1013 scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
1015 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1016 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1017 scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR);
1019 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1020 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1021 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1023 scctx->isc_txrx = &ixgbe_txrx;
1025 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1030 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1031 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1032 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1033 ixgbe_free_pci_resources(ctx);
1036 } /* ixgbe_if_attach_pre */
1038 /*********************************************************************
1039 * ixgbe_if_attach_post - Device initialization routine, part 2
1041 * Called during driver load, but after interrupts and
1042 * resources have been allocated and configured.
1043 * Sets up some data structures not relevant to iflib.
1045 * return 0 on success, positive on failure
1046 *********************************************************************/
1048 ixgbe_if_attach_post(if_ctx_t ctx)
1051 struct adapter *adapter;
1052 struct ixgbe_hw *hw;
1055 dev = iflib_get_dev(ctx);
1056 adapter = iflib_get_softc(ctx);
1060 if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1061 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1062 device_printf(dev, "Device does not support legacy interrupts");
1067 /* Allocate multicast array memory. */
1068 adapter->mta = malloc(sizeof(*adapter->mta) *
1069 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1070 if (adapter->mta == NULL) {
1071 device_printf(dev, "Can not allocate multicast setup array\n");
1076 /* hw.ix defaults init */
1077 ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1079 /* Enable the optics for 82599 SFP+ fiber */
1080 ixgbe_enable_tx_laser(hw);
1082 /* Enable power to the phy. */
1083 ixgbe_set_phy_power(hw, TRUE);
1085 ixgbe_initialize_iov(adapter);
1087 error = ixgbe_setup_interface(ctx);
1089 device_printf(dev, "Interface setup failed: %d\n", error);
1093 ixgbe_if_update_admin_status(ctx);
1095 /* Initialize statistics */
1096 ixgbe_update_stats_counters(adapter);
1097 ixgbe_add_hw_stats(adapter);
1099 /* Check PCIE slot type/speed/width */
1100 ixgbe_get_slot_info(adapter);
1103 * Do time init and sysctl init here, but
1104 * only on the first port of a bypass adapter.
1106 ixgbe_bypass_init(adapter);
1108 /* Set an initial dmac value */
1110 /* Set initial advertised speeds (if applicable) */
1111 adapter->advertise = ixgbe_get_advertise(adapter);
1113 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1114 ixgbe_define_iov_schemas(dev, &error);
1117 ixgbe_add_device_sysctls(ctx);
1122 } /* ixgbe_if_attach_post */
1124 /************************************************************************
1125 * ixgbe_check_wol_support
1127 * Checks whether the adapter's ports are capable of
1128 * Wake On LAN by reading the adapter's NVM.
1130 * Sets each port's hw->wol_enabled value depending
1131 * on the value read here.
1132 ************************************************************************/
1134 ixgbe_check_wol_support(struct adapter *adapter)
1136 struct ixgbe_hw *hw = &adapter->hw;
1139 /* Find out WoL support for port */
1140 adapter->wol_support = hw->wol_enabled = 0;
1141 ixgbe_get_device_caps(hw, &dev_caps);
1142 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1143 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1145 adapter->wol_support = hw->wol_enabled = 1;
1147 /* Save initial wake up filter configuration */
1148 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1151 } /* ixgbe_check_wol_support */
1153 /************************************************************************
1154 * ixgbe_setup_interface
1156 * Setup networking device structure and register an interface.
1157 ************************************************************************/
1159 ixgbe_setup_interface(if_ctx_t ctx)
1161 struct ifnet *ifp = iflib_get_ifp(ctx);
1162 struct adapter *adapter = iflib_get_softc(ctx);
1164 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1166 if_setbaudrate(ifp, IF_Gbps(10));
1168 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1170 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1172 ixgbe_add_media_types(ctx);
1174 /* Autoselect media by default */
1175 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1178 } /* ixgbe_setup_interface */
1180 /************************************************************************
1181 * ixgbe_if_get_counter
1182 ************************************************************************/
1184 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1186 struct adapter *adapter = iflib_get_softc(ctx);
1187 if_t ifp = iflib_get_ifp(ctx);
1190 case IFCOUNTER_IPACKETS:
1191 return (adapter->ipackets);
1192 case IFCOUNTER_OPACKETS:
1193 return (adapter->opackets);
1194 case IFCOUNTER_IBYTES:
1195 return (adapter->ibytes);
1196 case IFCOUNTER_OBYTES:
1197 return (adapter->obytes);
1198 case IFCOUNTER_IMCASTS:
1199 return (adapter->imcasts);
1200 case IFCOUNTER_OMCASTS:
1201 return (adapter->omcasts);
1202 case IFCOUNTER_COLLISIONS:
1204 case IFCOUNTER_IQDROPS:
1205 return (adapter->iqdrops);
1206 case IFCOUNTER_OQDROPS:
1208 case IFCOUNTER_IERRORS:
1209 return (adapter->ierrors);
1211 return (if_get_counter_default(ifp, cnt));
1213 } /* ixgbe_if_get_counter */
1215 /************************************************************************
1217 ************************************************************************/
1219 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1221 struct adapter *adapter = iflib_get_softc(ctx);
1222 struct ixgbe_hw *hw = &adapter->hw;
1226 if (hw->phy.ops.read_i2c_byte == NULL)
1228 for (i = 0; i < req->len; i++)
1229 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1230 req->dev_addr, &req->data[i]);
1232 } /* ixgbe_if_i2c_req */
1234 /************************************************************************
1235 * ixgbe_add_media_types
1236 ************************************************************************/
1238 ixgbe_add_media_types(if_ctx_t ctx)
1240 struct adapter *adapter = iflib_get_softc(ctx);
1241 struct ixgbe_hw *hw = &adapter->hw;
1242 device_t dev = iflib_get_dev(ctx);
1245 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1247 /* Media types with matching FreeBSD media defines */
1248 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1249 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1250 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1251 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1252 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1253 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1254 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1255 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1257 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1258 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1259 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1262 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1263 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1264 if (hw->phy.multispeed_fiber)
1265 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1268 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1269 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1270 if (hw->phy.multispeed_fiber)
1271 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1273 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1274 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1275 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1276 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1278 #ifdef IFM_ETH_XTYPE
1279 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1280 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1281 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1282 ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1283 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1284 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1285 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1286 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1288 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1289 device_printf(dev, "Media supported: 10GbaseKR\n");
1290 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1291 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1293 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1294 device_printf(dev, "Media supported: 10GbaseKX4\n");
1295 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1296 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1298 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1299 device_printf(dev, "Media supported: 1000baseKX\n");
1300 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1301 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1303 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1304 device_printf(dev, "Media supported: 2500baseKX\n");
1305 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1306 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1309 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1310 device_printf(dev, "Media supported: 1000baseBX\n");
1312 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1313 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1315 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1318 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1319 } /* ixgbe_add_media_types */
1321 /************************************************************************
1323 ************************************************************************/
1325 ixgbe_is_sfp(struct ixgbe_hw *hw)
1327 switch (hw->mac.type) {
1328 case ixgbe_mac_82598EB:
1329 if (hw->phy.type == ixgbe_phy_nl)
1332 case ixgbe_mac_82599EB:
1333 switch (hw->mac.ops.get_media_type(hw)) {
1334 case ixgbe_media_type_fiber:
1335 case ixgbe_media_type_fiber_qsfp:
1340 case ixgbe_mac_X550EM_x:
1341 case ixgbe_mac_X550EM_a:
1342 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1348 } /* ixgbe_is_sfp */
1350 /************************************************************************
1352 ************************************************************************/
1354 ixgbe_config_link(if_ctx_t ctx)
1356 struct adapter *adapter = iflib_get_softc(ctx);
1357 struct ixgbe_hw *hw = &adapter->hw;
1358 u32 autoneg, err = 0;
1359 bool sfp, negotiate;
1361 sfp = ixgbe_is_sfp(hw);
1364 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1365 iflib_admin_intr_deferred(ctx);
1367 if (hw->mac.ops.check_link)
1368 err = ixgbe_check_link(hw, &adapter->link_speed,
1369 &adapter->link_up, FALSE);
1372 autoneg = hw->phy.autoneg_advertised;
1373 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1374 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1378 if (hw->mac.ops.setup_link)
1379 err = hw->mac.ops.setup_link(hw, autoneg,
1382 } /* ixgbe_config_link */
1384 /************************************************************************
1385 * ixgbe_update_stats_counters - Update board statistics counters.
1386 ************************************************************************/
1388 ixgbe_update_stats_counters(struct adapter *adapter)
1390 struct ixgbe_hw *hw = &adapter->hw;
1391 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1392 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1393 u64 total_missed_rx = 0;
1395 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1396 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1397 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1398 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1399 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1401 for (int i = 0; i < 16; i++) {
1402 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1403 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1404 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1406 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1407 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1408 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1410 /* Hardware workaround, gprc counts missed packets */
1411 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1412 stats->gprc -= missed_rx;
1414 if (hw->mac.type != ixgbe_mac_82598EB) {
1415 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1416 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1417 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1418 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1419 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1420 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1421 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1422 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1424 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1425 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1426 /* 82598 only has a counter in the high register */
1427 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1428 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1429 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1433 * Workaround: mprc hardware is incorrectly counting
1434 * broadcasts, so for now we subtract those.
1436 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1437 stats->bprc += bprc;
1438 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1439 if (hw->mac.type == ixgbe_mac_82598EB)
1440 stats->mprc -= bprc;
1442 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1443 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1444 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1445 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1446 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1447 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1449 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1450 stats->lxontxc += lxon;
1451 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1452 stats->lxofftxc += lxoff;
1453 total = lxon + lxoff;
1455 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1456 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1457 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1458 stats->gptc -= total;
1459 stats->mptc -= total;
1460 stats->ptc64 -= total;
1461 stats->gotc -= total * ETHER_MIN_LEN;
1463 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1464 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1465 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1466 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1467 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1468 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1469 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1470 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1471 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1472 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1473 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1474 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1475 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1476 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1477 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1478 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1479 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1480 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1481 /* Only read FCOE on 82599 */
1482 if (hw->mac.type != ixgbe_mac_82598EB) {
1483 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1484 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1485 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1486 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1487 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1490 /* Fill out the OS statistics structure */
1491 IXGBE_SET_IPACKETS(adapter, stats->gprc);
1492 IXGBE_SET_OPACKETS(adapter, stats->gptc);
1493 IXGBE_SET_IBYTES(adapter, stats->gorc);
1494 IXGBE_SET_OBYTES(adapter, stats->gotc);
1495 IXGBE_SET_IMCASTS(adapter, stats->mprc);
1496 IXGBE_SET_OMCASTS(adapter, stats->mptc);
1497 IXGBE_SET_COLLISIONS(adapter, 0);
1498 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1499 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1500 } /* ixgbe_update_stats_counters */
1502 /************************************************************************
1503 * ixgbe_add_hw_stats
1505 * Add sysctl variables, one per statistic, to the system.
1506 ************************************************************************/
1508 ixgbe_add_hw_stats(struct adapter *adapter)
1510 device_t dev = iflib_get_dev(adapter->ctx);
1511 struct ix_rx_queue *rx_que;
1512 struct ix_tx_queue *tx_que;
1513 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1514 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1515 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1516 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1517 struct sysctl_oid *stat_node, *queue_node;
1518 struct sysctl_oid_list *stat_list, *queue_list;
1521 #define QUEUE_NAME_LEN 32
1522 char namebuf[QUEUE_NAME_LEN];
1524 /* Driver Statistics */
1525 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1526 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1527 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1528 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1529 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1530 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1532 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1533 struct tx_ring *txr = &tx_que->txr;
1534 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1535 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1536 CTLFLAG_RD, NULL, "Queue Name");
1537 queue_list = SYSCTL_CHILDREN(queue_node);
1539 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1540 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1541 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1542 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1543 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1544 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1545 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1546 CTLFLAG_RD, &txr->tso_tx, "TSO");
1547 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1548 CTLFLAG_RD, &txr->total_packets,
1549 "Queue Packets Transmitted");
1552 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1553 struct rx_ring *rxr = &rx_que->rxr;
1554 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1555 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1556 CTLFLAG_RD, NULL, "Queue Name");
1557 queue_list = SYSCTL_CHILDREN(queue_node);
1559 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1560 CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
1561 sizeof(&adapter->rx_queues[i]),
1562 ixgbe_sysctl_interrupt_rate_handler, "IU",
1564 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1565 CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1566 "irqs on this queue");
1567 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1568 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1569 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1570 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1571 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1572 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1573 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1574 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1575 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1576 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1577 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1578 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1579 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1580 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1583 /* MAC stats get their own sub node */
1585 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1586 CTLFLAG_RD, NULL, "MAC Statistics");
1587 stat_list = SYSCTL_CHILDREN(stat_node);
1589 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1590 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1591 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1592 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1593 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1594 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1595 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1596 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1597 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1598 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1599 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1600 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1601 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1602 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1603 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1604 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1606 /* Flow Control stats */
1607 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1608 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1609 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1610 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1611 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1612 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1613 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1614 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1616 /* Packet Reception Stats */
1617 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1618 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1619 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1620 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1621 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1622 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1623 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1624 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1625 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1626 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1627 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1628 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1629 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1630 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1631 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1632 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1633 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1634 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1635 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1636 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1637 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1638 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1639 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1640 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1641 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1642 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1643 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1644 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1645 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1646 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1647 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1648 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1649 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1650 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1651 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1652 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1653 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1654 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1656 /* Packet Transmission Stats */
1657 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1658 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1659 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1660 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1661 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1662 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1663 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1664 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1665 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1666 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1667 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1668 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1669 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1670 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1671 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1672 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1673 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1674 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1675 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1676 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1677 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1678 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1679 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1680 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1681 } /* ixgbe_add_hw_stats */
1683 /************************************************************************
1684 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1686 * Retrieves the TDH value from the hardware
1687 ************************************************************************/
1689 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1691 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1698 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1699 error = sysctl_handle_int(oidp, &val, 0, req);
1700 if (error || !req->newptr)
1704 } /* ixgbe_sysctl_tdh_handler */
1706 /************************************************************************
1707 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1709 * Retrieves the TDT value from the hardware
1710 ************************************************************************/
1712 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1714 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1721 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1722 error = sysctl_handle_int(oidp, &val, 0, req);
1723 if (error || !req->newptr)
1727 } /* ixgbe_sysctl_tdt_handler */
1729 /************************************************************************
1730 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1732 * Retrieves the RDH value from the hardware
1733 ************************************************************************/
1735 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1737 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1744 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1745 error = sysctl_handle_int(oidp, &val, 0, req);
1746 if (error || !req->newptr)
1750 } /* ixgbe_sysctl_rdh_handler */
1752 /************************************************************************
1753 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1755 * Retrieves the RDT value from the hardware
1756 ************************************************************************/
1758 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1760 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1767 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1768 error = sysctl_handle_int(oidp, &val, 0, req);
1769 if (error || !req->newptr)
1773 } /* ixgbe_sysctl_rdt_handler */
1775 /************************************************************************
1776 * ixgbe_if_vlan_register
1778 * Run via vlan config EVENT, it enables us to use the
1779 * HW Filter table since we can get the vlan id. This
1780 * just creates the entry in the soft version of the
1781 * VFTA, init will repopulate the real table.
1782 ************************************************************************/
1784 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1786 struct adapter *adapter = iflib_get_softc(ctx);
1789 index = (vtag >> 5) & 0x7F;
1791 adapter->shadow_vfta[index] |= (1 << bit);
1792 ++adapter->num_vlans;
1793 ixgbe_setup_vlan_hw_support(ctx);
1794 } /* ixgbe_if_vlan_register */
1796 /************************************************************************
1797 * ixgbe_if_vlan_unregister
1799 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1800 ************************************************************************/
1802 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1804 struct adapter *adapter = iflib_get_softc(ctx);
1807 index = (vtag >> 5) & 0x7F;
1809 adapter->shadow_vfta[index] &= ~(1 << bit);
1810 --adapter->num_vlans;
1811 /* Re-init to load the changes */
1812 ixgbe_setup_vlan_hw_support(ctx);
1813 } /* ixgbe_if_vlan_unregister */
1815 /************************************************************************
1816 * ixgbe_setup_vlan_hw_support
1817 ************************************************************************/
1819 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1821 struct ifnet *ifp = iflib_get_ifp(ctx);
1822 struct adapter *adapter = iflib_get_softc(ctx);
1823 struct ixgbe_hw *hw = &adapter->hw;
1824 struct rx_ring *rxr;
1830 * We get here thru init_locked, meaning
1831 * a soft reset, this has already cleared
1832 * the VFTA and other state, so if there
1833 * have been no vlan's registered do nothing.
1835 if (adapter->num_vlans == 0)
1838 /* Setup the queues for vlans */
1839 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1840 for (i = 0; i < adapter->num_rx_queues; i++) {
1841 rxr = &adapter->rx_queues[i].rxr;
1842 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1843 if (hw->mac.type != ixgbe_mac_82598EB) {
1844 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1845 ctrl |= IXGBE_RXDCTL_VME;
1846 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1848 rxr->vtag_strip = TRUE;
1852 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1855 * A soft reset zero's out the VFTA, so
1856 * we need to repopulate it now.
1858 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1859 if (adapter->shadow_vfta[i] != 0)
1860 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1861 adapter->shadow_vfta[i]);
1863 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1864 /* Enable the Filter Table if enabled */
1865 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1866 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1867 ctrl |= IXGBE_VLNCTRL_VFE;
1869 if (hw->mac.type == ixgbe_mac_82598EB)
1870 ctrl |= IXGBE_VLNCTRL_VME;
1871 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1872 } /* ixgbe_setup_vlan_hw_support */
1874 /************************************************************************
1875 * ixgbe_get_slot_info
1877 * Get the width and transaction speed of
1878 * the slot this adapter is plugged into.
1879 ************************************************************************/
1881 ixgbe_get_slot_info(struct adapter *adapter)
1883 device_t dev = iflib_get_dev(adapter->ctx);
1884 struct ixgbe_hw *hw = &adapter->hw;
1885 int bus_info_valid = TRUE;
1889 /* Some devices are behind an internal bridge */
1890 switch (hw->device_id) {
1891 case IXGBE_DEV_ID_82599_SFP_SF_QP:
1892 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1893 goto get_parent_info;
1898 ixgbe_get_bus_info(hw);
1901 * Some devices don't use PCI-E, but there is no need
1902 * to display "Unknown" for bus speed and width.
1904 switch (hw->mac.type) {
1905 case ixgbe_mac_X550EM_x:
1906 case ixgbe_mac_X550EM_a:
1914 * For the Quad port adapter we need to parse back
1915 * up the PCI tree to find the speed of the expansion
1916 * slot into which this adapter is plugged. A bit more work.
1918 dev = device_get_parent(device_get_parent(dev));
1920 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1921 pci_get_slot(dev), pci_get_function(dev));
1923 dev = device_get_parent(device_get_parent(dev));
1925 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1926 pci_get_slot(dev), pci_get_function(dev));
1928 /* Now get the PCI Express Capabilities offset */
1929 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1931 * Hmm...can't get PCI-Express capabilities.
1932 * Falling back to default method.
1934 bus_info_valid = FALSE;
1935 ixgbe_get_bus_info(hw);
1938 /* ...and read the Link Status Register */
1939 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1940 ixgbe_set_pci_config_data_generic(hw, link);
1943 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1944 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
1945 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
1946 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
1948 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1949 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1950 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1953 if (bus_info_valid) {
1954 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1955 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1956 (hw->bus.speed == ixgbe_bus_speed_2500))) {
1957 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1958 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1960 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1961 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1962 (hw->bus.speed < ixgbe_bus_speed_8000))) {
1963 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1964 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1967 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1970 } /* ixgbe_get_slot_info */
1972 /************************************************************************
1973 * ixgbe_if_msix_intr_assign
1975 * Setup MSI-X Interrupt resources and handlers
1976 ************************************************************************/
1978 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1980 struct adapter *adapter = iflib_get_softc(ctx);
1981 struct ix_rx_queue *rx_que = adapter->rx_queues;
1982 struct ix_tx_queue *tx_que;
1983 int error, rid, vector = 0;
1987 /* Admin Que is vector 0*/
1989 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1992 snprintf(buf, sizeof(buf), "rxq%d", i);
1993 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1994 IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
1997 device_printf(iflib_get_dev(ctx),
1998 "Failed to allocate que int %d err: %d", i, error);
1999 adapter->num_rx_queues = i + 1;
2003 rx_que->msix = vector;
2004 adapter->active_queues |= (u64)(1 << rx_que->msix);
2005 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2007 * The queue ID is used as the RSS layer bucket ID.
2008 * We look up the queue ID -> RSS CPU ID and select
2011 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2014 * Bind the MSI-X vector, and thus the
2015 * rings to the corresponding cpu.
2017 * This just happens to match the default RSS
2018 * round-robin bucket -> queue -> CPU allocation.
2020 if (adapter->num_rx_queues > 1)
2025 for (int i = 0; i < adapter->num_tx_queues; i++) {
2026 snprintf(buf, sizeof(buf), "txq%d", i);
2027 tx_que = &adapter->tx_queues[i];
2028 tx_que->msix = i % adapter->num_rx_queues;
2029 iflib_softirq_alloc_generic(ctx,
2030 &adapter->rx_queues[tx_que->msix].que_irq,
2031 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2034 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2035 IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2037 device_printf(iflib_get_dev(ctx),
2038 "Failed to register admin handler");
2042 adapter->vector = vector;
2046 iflib_irq_free(ctx, &adapter->irq);
2047 rx_que = adapter->rx_queues;
2048 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2049 iflib_irq_free(ctx, &rx_que->que_irq);
2052 } /* ixgbe_if_msix_intr_assign */
2054 /*********************************************************************
2055 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2056 **********************************************************************/
2058 ixgbe_msix_que(void *arg)
2060 struct ix_rx_queue *que = arg;
2061 struct adapter *adapter = que->adapter;
2062 struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx);
2064 /* Protect against spurious interrupts */
2065 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2068 ixgbe_disable_queue(adapter, que->msix);
2071 return (FILTER_SCHEDULE_THREAD);
2072 } /* ixgbe_msix_que */
2074 /************************************************************************
2075 * ixgbe_media_status - Media Ioctl callback
2077 * Called whenever the user queries the status of
2078 * the interface using ifconfig.
2079 ************************************************************************/
2081 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2083 struct adapter *adapter = iflib_get_softc(ctx);
2084 struct ixgbe_hw *hw = &adapter->hw;
2087 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2089 ifmr->ifm_status = IFM_AVALID;
2090 ifmr->ifm_active = IFM_ETHER;
2092 if (!adapter->link_active)
2095 ifmr->ifm_status |= IFM_ACTIVE;
2096 layer = adapter->phy_layer;
2098 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2099 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2100 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2101 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2102 switch (adapter->link_speed) {
2103 case IXGBE_LINK_SPEED_10GB_FULL:
2104 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2106 case IXGBE_LINK_SPEED_1GB_FULL:
2107 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2109 case IXGBE_LINK_SPEED_100_FULL:
2110 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2112 case IXGBE_LINK_SPEED_10_FULL:
2113 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2116 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2117 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2118 switch (adapter->link_speed) {
2119 case IXGBE_LINK_SPEED_10GB_FULL:
2120 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2123 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2124 switch (adapter->link_speed) {
2125 case IXGBE_LINK_SPEED_10GB_FULL:
2126 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2128 case IXGBE_LINK_SPEED_1GB_FULL:
2129 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2132 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2133 switch (adapter->link_speed) {
2134 case IXGBE_LINK_SPEED_10GB_FULL:
2135 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2137 case IXGBE_LINK_SPEED_1GB_FULL:
2138 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2141 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2142 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2143 switch (adapter->link_speed) {
2144 case IXGBE_LINK_SPEED_10GB_FULL:
2145 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2147 case IXGBE_LINK_SPEED_1GB_FULL:
2148 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2151 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2152 switch (adapter->link_speed) {
2153 case IXGBE_LINK_SPEED_10GB_FULL:
2154 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2158 * XXX: These need to use the proper media types once
2161 #ifndef IFM_ETH_XTYPE
2162 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2163 switch (adapter->link_speed) {
2164 case IXGBE_LINK_SPEED_10GB_FULL:
2165 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2167 case IXGBE_LINK_SPEED_2_5GB_FULL:
2168 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2170 case IXGBE_LINK_SPEED_1GB_FULL:
2171 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2174 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2175 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2176 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2177 switch (adapter->link_speed) {
2178 case IXGBE_LINK_SPEED_10GB_FULL:
2179 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2181 case IXGBE_LINK_SPEED_2_5GB_FULL:
2182 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2184 case IXGBE_LINK_SPEED_1GB_FULL:
2185 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2189 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2190 switch (adapter->link_speed) {
2191 case IXGBE_LINK_SPEED_10GB_FULL:
2192 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2194 case IXGBE_LINK_SPEED_2_5GB_FULL:
2195 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2197 case IXGBE_LINK_SPEED_1GB_FULL:
2198 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2201 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2202 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2203 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2204 switch (adapter->link_speed) {
2205 case IXGBE_LINK_SPEED_10GB_FULL:
2206 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2208 case IXGBE_LINK_SPEED_2_5GB_FULL:
2209 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2211 case IXGBE_LINK_SPEED_1GB_FULL:
2212 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2217 /* If nothing is recognized... */
2218 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2219 ifmr->ifm_active |= IFM_UNKNOWN;
2221 /* Display current flow control setting used on link */
2222 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2223 hw->fc.current_mode == ixgbe_fc_full)
2224 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2225 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2226 hw->fc.current_mode == ixgbe_fc_full)
2227 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2228 } /* ixgbe_media_status */
2230 /************************************************************************
2231 * ixgbe_media_change - Media Ioctl callback
2233 * Called when the user changes speed/duplex using
2234 * media/mediopt option with ifconfig.
2235 ************************************************************************/
2237 ixgbe_if_media_change(if_ctx_t ctx)
2239 struct adapter *adapter = iflib_get_softc(ctx);
2240 struct ifmedia *ifm = iflib_get_media(ctx);
2241 struct ixgbe_hw *hw = &adapter->hw;
2242 ixgbe_link_speed speed = 0;
2244 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2246 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2249 if (hw->phy.media_type == ixgbe_media_type_backplane)
2253 * We don't actually need to check against the supported
2254 * media types of the adapter; ifmedia will take care of
2257 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2260 speed |= IXGBE_LINK_SPEED_100_FULL;
2261 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2262 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2266 #ifndef IFM_ETH_XTYPE
2267 case IFM_10G_SR: /* KR, too */
2268 case IFM_10G_CX4: /* KX4 */
2273 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2274 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2276 #ifndef IFM_ETH_XTYPE
2277 case IFM_1000_CX: /* KX */
2283 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2286 speed |= IXGBE_LINK_SPEED_100_FULL;
2287 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2289 case IFM_10G_TWINAX:
2290 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2293 speed |= IXGBE_LINK_SPEED_100_FULL;
2296 speed |= IXGBE_LINK_SPEED_10_FULL;
2302 hw->mac.autotry_restart = TRUE;
2303 hw->mac.ops.setup_link(hw, speed, TRUE);
2304 adapter->advertise =
2305 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2306 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
2307 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
2308 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
2313 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2316 } /* ixgbe_if_media_change */
2318 /************************************************************************
2320 ************************************************************************/
2322 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2324 struct adapter *adapter = iflib_get_softc(ctx);
2325 struct ifnet *ifp = iflib_get_ifp(ctx);
2329 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2330 rctl &= (~IXGBE_FCTRL_UPE);
2331 if (ifp->if_flags & IFF_ALLMULTI)
2332 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2334 mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2336 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2337 rctl &= (~IXGBE_FCTRL_MPE);
2338 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2340 if (ifp->if_flags & IFF_PROMISC) {
2341 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2342 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2343 } else if (ifp->if_flags & IFF_ALLMULTI) {
2344 rctl |= IXGBE_FCTRL_MPE;
2345 rctl &= ~IXGBE_FCTRL_UPE;
2346 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2349 } /* ixgbe_if_promisc_set */
2351 /************************************************************************
2352 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2353 ************************************************************************/
2355 ixgbe_msix_link(void *arg)
2357 struct adapter *adapter = arg;
2358 struct ixgbe_hw *hw = &adapter->hw;
2359 u32 eicr, eicr_mask;
2362 ++adapter->link_irq;
2364 /* Pause other interrupts */
2365 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2367 /* First get the cause */
2368 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2369 /* Be sure the queue bits are not cleared */
2370 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2371 /* Clear interrupt with write */
2372 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2374 /* Link status change */
2375 if (eicr & IXGBE_EICR_LSC) {
2376 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2377 adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2380 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2381 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2382 (eicr & IXGBE_EICR_FLOW_DIR)) {
2383 /* This is probably overkill :) */
2384 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2385 return (FILTER_HANDLED);
2386 /* Disable the interrupt */
2387 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2388 adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2390 if (eicr & IXGBE_EICR_ECC) {
2391 device_printf(iflib_get_dev(adapter->ctx),
2392 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2393 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2396 /* Check for over temp condition */
2397 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2398 switch (adapter->hw.mac.type) {
2399 case ixgbe_mac_X550EM_a:
2400 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2402 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2403 IXGBE_EICR_GPI_SDP0_X550EM_a);
2404 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2405 IXGBE_EICR_GPI_SDP0_X550EM_a);
2406 retval = hw->phy.ops.check_overtemp(hw);
2407 if (retval != IXGBE_ERR_OVERTEMP)
2409 device_printf(iflib_get_dev(adapter->ctx),
2410 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2411 device_printf(iflib_get_dev(adapter->ctx),
2412 "System shutdown required!\n");
2415 if (!(eicr & IXGBE_EICR_TS))
2417 retval = hw->phy.ops.check_overtemp(hw);
2418 if (retval != IXGBE_ERR_OVERTEMP)
2420 device_printf(iflib_get_dev(adapter->ctx),
2421 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2422 device_printf(iflib_get_dev(adapter->ctx),
2423 "System shutdown required!\n");
2424 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2429 /* Check for VF message */
2430 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2431 (eicr & IXGBE_EICR_MAILBOX))
2432 adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2435 if (ixgbe_is_sfp(hw)) {
2436 /* Pluggable optics-related interrupt */
2437 if (hw->mac.type >= ixgbe_mac_X540)
2438 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2440 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2442 if (eicr & eicr_mask) {
2443 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2444 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2447 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2448 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2449 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2450 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2451 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2455 /* Check for fan failure */
2456 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2457 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2458 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2461 /* External PHY interrupt */
2462 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2463 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2464 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2465 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2468 return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2469 } /* ixgbe_msix_link */
2471 /************************************************************************
2472 * ixgbe_sysctl_interrupt_rate_handler
2473 ************************************************************************/
2475 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2477 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2479 unsigned int reg, usec, rate;
2481 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2482 usec = ((reg & 0x0FF8) >> 3);
2484 rate = 500000 / usec;
2487 error = sysctl_handle_int(oidp, &rate, 0, req);
2488 if (error || !req->newptr)
2490 reg &= ~0xfff; /* default, no limitation */
2491 ixgbe_max_interrupt_rate = 0;
2492 if (rate > 0 && rate < 500000) {
2495 ixgbe_max_interrupt_rate = rate;
2496 reg |= ((4000000/rate) & 0xff8);
2498 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2501 } /* ixgbe_sysctl_interrupt_rate_handler */
2503 /************************************************************************
2504 * ixgbe_add_device_sysctls
2505 ************************************************************************/
2507 ixgbe_add_device_sysctls(if_ctx_t ctx)
2509 struct adapter *adapter = iflib_get_softc(ctx);
2510 device_t dev = iflib_get_dev(ctx);
2511 struct ixgbe_hw *hw = &adapter->hw;
2512 struct sysctl_oid_list *child;
2513 struct sysctl_ctx_list *ctx_list;
2515 ctx_list = device_get_sysctl_ctx(dev);
2516 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2518 /* Sysctls for all devices */
2519 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2520 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2521 IXGBE_SYSCTL_DESC_SET_FC);
2523 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2524 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2525 IXGBE_SYSCTL_DESC_ADV_SPEED);
2528 /* testing sysctls (for all devices) */
2529 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2530 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2531 "I", "PCI Power State");
2533 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2534 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2535 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2537 /* for X550 series devices */
2538 if (hw->mac.type >= ixgbe_mac_X550)
2539 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2540 CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2541 "I", "DMA Coalesce");
2543 /* for WoL-capable devices */
2544 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2545 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2546 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2547 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2549 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2550 CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2551 "I", "Enable/Disable Wake Up Filters");
2554 /* for X552/X557-AT devices */
2555 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2556 struct sysctl_oid *phy_node;
2557 struct sysctl_oid_list *phy_list;
2559 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2560 CTLFLAG_RD, NULL, "External PHY sysctls");
2561 phy_list = SYSCTL_CHILDREN(phy_node);
2563 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2564 CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2565 "I", "Current External PHY Temperature (Celsius)");
2567 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2568 "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2569 ixgbe_sysctl_phy_overtemp_occurred, "I",
2570 "External PHY High Temperature Event Occurred");
2573 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2574 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2575 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2576 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2578 } /* ixgbe_add_device_sysctls */
2580 /************************************************************************
2581 * ixgbe_allocate_pci_resources
2582 ************************************************************************/
2584 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2586 struct adapter *adapter = iflib_get_softc(ctx);
2587 device_t dev = iflib_get_dev(ctx);
2591 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2594 if (!(adapter->pci_mem)) {
2595 device_printf(dev, "Unable to allocate bus resource: memory\n");
2599 /* Save bus_space values for READ/WRITE_REG macros */
2600 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2601 adapter->osdep.mem_bus_space_handle =
2602 rman_get_bushandle(adapter->pci_mem);
2603 /* Set hw values for shared code */
2604 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2607 } /* ixgbe_allocate_pci_resources */
2609 /************************************************************************
2610 * ixgbe_detach - Device removal routine
2612 * Called when the driver is being removed.
2613 * Stops the adapter and deallocates all the resources
2614 * that were allocated for driver operation.
2616 * return 0 on success, positive on failure
2617 ************************************************************************/
2619 ixgbe_if_detach(if_ctx_t ctx)
2621 struct adapter *adapter = iflib_get_softc(ctx);
2622 device_t dev = iflib_get_dev(ctx);
2625 INIT_DEBUGOUT("ixgbe_detach: begin");
2627 if (ixgbe_pci_iov_detach(dev) != 0) {
2628 device_printf(dev, "SR-IOV in use; detach first.\n");
2632 ixgbe_setup_low_power_mode(ctx);
2634 /* let hardware know driver is unloading */
2635 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2636 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2637 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2639 ixgbe_free_pci_resources(ctx);
2640 free(adapter->mta, M_IXGBE);
2643 } /* ixgbe_if_detach */
2645 /************************************************************************
2646 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2648 * Prepare the adapter/port for LPLU and/or WoL
2649 ************************************************************************/
2651 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2653 struct adapter *adapter = iflib_get_softc(ctx);
2654 struct ixgbe_hw *hw = &adapter->hw;
2655 device_t dev = iflib_get_dev(ctx);
2658 if (!hw->wol_enabled)
2659 ixgbe_set_phy_power(hw, FALSE);
2661 /* Limit power management flow to X550EM baseT */
2662 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2663 hw->phy.ops.enter_lplu) {
2664 /* Turn off support for APM wakeup. (Using ACPI instead) */
2665 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2666 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2669 * Clear Wake Up Status register to prevent any previous wakeup
2670 * events from waking us up immediately after we suspend.
2672 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2675 * Program the Wakeup Filter Control register with user filter
2678 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2680 /* Enable wakeups and power management in Wakeup Control */
2681 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2682 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2684 /* X550EM baseT adapters need a special LPLU flow */
2685 hw->phy.reset_disable = TRUE;
2687 error = hw->phy.ops.enter_lplu(hw);
2689 device_printf(dev, "Error entering LPLU: %d\n", error);
2690 hw->phy.reset_disable = FALSE;
2692 /* Just stop for other adapters */
2697 } /* ixgbe_setup_low_power_mode */
2699 /************************************************************************
2700 * ixgbe_shutdown - Shutdown entry point
2701 ************************************************************************/
2703 ixgbe_if_shutdown(if_ctx_t ctx)
2707 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2709 error = ixgbe_setup_low_power_mode(ctx);
2712 } /* ixgbe_if_shutdown */
2714 /************************************************************************
2718 ************************************************************************/
2720 ixgbe_if_suspend(if_ctx_t ctx)
2724 INIT_DEBUGOUT("ixgbe_suspend: begin");
2726 error = ixgbe_setup_low_power_mode(ctx);
2729 } /* ixgbe_if_suspend */
2731 /************************************************************************
2735 ************************************************************************/
2737 ixgbe_if_resume(if_ctx_t ctx)
2739 struct adapter *adapter = iflib_get_softc(ctx);
2740 device_t dev = iflib_get_dev(ctx);
2741 struct ifnet *ifp = iflib_get_ifp(ctx);
2742 struct ixgbe_hw *hw = &adapter->hw;
2745 INIT_DEBUGOUT("ixgbe_resume: begin");
2747 /* Read & clear WUS register */
2748 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2750 device_printf(dev, "Woken up by (WUS): %#010x\n",
2751 IXGBE_READ_REG(hw, IXGBE_WUS));
2752 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2753 /* And clear WUFC until next low-power transition */
2754 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2757 * Required after D3->D0 transition;
2758 * will re-advertise all previous advertised speeds
2760 if (ifp->if_flags & IFF_UP)
2764 } /* ixgbe_if_resume */
2766 /************************************************************************
2767 * ixgbe_if_mtu_set - Ioctl mtu entry point
2769 * Return 0 on success, EINVAL on failure
2770 ************************************************************************/
2772 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2774 struct adapter *adapter = iflib_get_softc(ctx);
2777 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2779 if (mtu > IXGBE_MAX_MTU) {
2782 adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2786 } /* ixgbe_if_mtu_set */
2788 /************************************************************************
2789 * ixgbe_if_crcstrip_set
2790 ************************************************************************/
2792 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2794 struct adapter *sc = iflib_get_softc(ctx);
2795 struct ixgbe_hw *hw = &sc->hw;
2796 /* crc stripping is set in two places:
2797 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2798 * IXGBE_RDRXCTL (set by the original driver in
2799 * ixgbe_setup_hw_rsc() called in init_locked.
2800 * We disable the setting when netmap is compiled in).
2801 * We update the values here, but also in ixgbe.c because
2802 * init_locked sometimes is called outside our control.
2806 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2807 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2810 D("%s read HLREG 0x%x rxc 0x%x",
2811 onoff ? "enter" : "exit", hl, rxc);
2813 /* hw requirements ... */
2814 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2815 rxc |= IXGBE_RDRXCTL_RSCACKC;
2816 if (onoff && !crcstrip) {
2817 /* keep the crc. Fast rx */
2818 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2819 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2821 /* reset default mode */
2822 hl |= IXGBE_HLREG0_RXCRCSTRP;
2823 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2827 D("%s write HLREG 0x%x rxc 0x%x",
2828 onoff ? "enter" : "exit", hl, rxc);
2830 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2831 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2832 } /* ixgbe_if_crcstrip_set */
2834 /*********************************************************************
2835 * ixgbe_if_init - Init entry point
2837 * Used in two ways: It is used by the stack as an init
2838 * entry point in network interface structure. It is also
2839 * used by the driver as a hw/sw initialization routine to
2840 * get to a consistent state.
2842 * Return 0 on success, positive on failure
2843 **********************************************************************/
2845 ixgbe_if_init(if_ctx_t ctx)
2847 struct adapter *adapter = iflib_get_softc(ctx);
2848 struct ifnet *ifp = iflib_get_ifp(ctx);
2849 device_t dev = iflib_get_dev(ctx);
2850 struct ixgbe_hw *hw = &adapter->hw;
2851 struct ix_rx_queue *rx_que;
2852 struct ix_tx_queue *tx_que;
2859 INIT_DEBUGOUT("ixgbe_if_init: begin");
2861 /* Queue indices may change with IOV mode */
2862 ixgbe_align_all_queue_indices(adapter);
2864 /* reprogram the RAR[0] in case user changed it. */
2865 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2867 /* Get the latest mac address, User can use a LAA */
2868 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2869 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2870 hw->addr_ctrl.rar_used_count = 1;
2874 ixgbe_initialize_iov(adapter);
2876 ixgbe_initialize_transmit_units(ctx);
2878 /* Setup Multicast table */
2879 ixgbe_if_multi_set(ctx);
2881 /* Determine the correct mbuf pool, based on frame size */
2882 if (adapter->max_frame_size <= MCLBYTES)
2883 adapter->rx_mbuf_sz = MCLBYTES;
2885 adapter->rx_mbuf_sz = MJUMPAGESIZE;
2887 /* Configure RX settings */
2888 ixgbe_initialize_receive_units(ctx);
2891 * Initialize variable holding task enqueue requests
2892 * from MSI-X interrupts
2894 adapter->task_requests = 0;
2896 /* Enable SDP & MSI-X interrupts based on adapter */
2897 ixgbe_config_gpie(adapter);
2900 if (ifp->if_mtu > ETHERMTU) {
2901 /* aka IXGBE_MAXFRS on 82599 and newer */
2902 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2903 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2904 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2905 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2908 /* Now enable all the queues */
2909 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2910 struct tx_ring *txr = &tx_que->txr;
2912 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2913 txdctl |= IXGBE_TXDCTL_ENABLE;
2914 /* Set WTHRESH to 8, burst writeback */
2915 txdctl |= (8 << 16);
2917 * When the internal queue falls below PTHRESH (32),
2918 * start prefetching as long as there are at least
2919 * HTHRESH (1) buffers ready. The values are taken
2920 * from the Intel linux driver 3.8.21.
2921 * Prefetching enables tx line rate even with 1 queue.
2923 txdctl |= (32 << 0) | (1 << 8);
2924 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2927 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2928 struct rx_ring *rxr = &rx_que->rxr;
2930 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2931 if (hw->mac.type == ixgbe_mac_82598EB) {
2937 rxdctl &= ~0x3FFFFF;
2940 rxdctl |= IXGBE_RXDCTL_ENABLE;
2941 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2942 for (j = 0; j < 10; j++) {
2943 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2944 IXGBE_RXDCTL_ENABLE)
2952 /* Enable Receive engine */
2953 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2954 if (hw->mac.type == ixgbe_mac_82598EB)
2955 rxctrl |= IXGBE_RXCTRL_DMBYPS;
2956 rxctrl |= IXGBE_RXCTRL_RXEN;
2957 ixgbe_enable_rx_dma(hw, rxctrl);
2959 /* Set up MSI/MSI-X routing */
2960 if (ixgbe_enable_msix) {
2961 ixgbe_configure_ivars(adapter);
2962 /* Set up auto-mask */
2963 if (hw->mac.type == ixgbe_mac_82598EB)
2964 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2966 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2967 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2969 } else { /* Simple settings for Legacy/MSI */
2970 ixgbe_set_ivar(adapter, 0, 0, 0);
2971 ixgbe_set_ivar(adapter, 0, 0, 1);
2972 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2975 ixgbe_init_fdir(adapter);
2978 * Check on any SFP devices that
2979 * need to be kick-started
2981 if (hw->phy.type == ixgbe_phy_none) {
2982 err = hw->phy.ops.identify(hw);
2983 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2985 "Unsupported SFP+ module type was detected.\n");
2990 /* Set moderation on the Link interrupt */
2991 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
2993 /* Enable power to the phy. */
2994 ixgbe_set_phy_power(hw, TRUE);
2996 /* Config/Enable Link */
2997 ixgbe_config_link(ctx);
2999 /* Hardware Packet Buffer & Flow Control setup */
3000 ixgbe_config_delay_values(adapter);
3002 /* Initialize the FC settings */
3005 /* Set up VLAN support and filter */
3006 ixgbe_setup_vlan_hw_support(ctx);
3008 /* Setup DMA Coalescing */
3009 ixgbe_config_dmac(adapter);
3011 /* And now turn on interrupts */
3012 ixgbe_if_enable_intr(ctx);
3014 /* Enable the use of the MBX by the VF's */
3015 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3016 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3017 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3018 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3021 } /* ixgbe_init_locked */
3023 /************************************************************************
3026 * Setup the correct IVAR register for a particular MSI-X interrupt
3027 * (yes this is all very magic and confusing :)
3028 * - entry is the register array entry
3029 * - vector is the MSI-X vector for this queue
3030 * - type is RX/TX/MISC
3031 ************************************************************************/
3033 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3035 struct ixgbe_hw *hw = &adapter->hw;
3038 vector |= IXGBE_IVAR_ALLOC_VAL;
3040 switch (hw->mac.type) {
3041 case ixgbe_mac_82598EB:
3043 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3045 entry += (type * 64);
3046 index = (entry >> 2) & 0x1F;
3047 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3048 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3049 ivar |= (vector << (8 * (entry & 0x3)));
3050 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3052 case ixgbe_mac_82599EB:
3053 case ixgbe_mac_X540:
3054 case ixgbe_mac_X550:
3055 case ixgbe_mac_X550EM_x:
3056 case ixgbe_mac_X550EM_a:
3057 if (type == -1) { /* MISC IVAR */
3058 index = (entry & 1) * 8;
3059 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3060 ivar &= ~(0xFF << index);
3061 ivar |= (vector << index);
3062 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3063 } else { /* RX/TX IVARS */
3064 index = (16 * (entry & 1)) + (8 * type);
3065 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3066 ivar &= ~(0xFF << index);
3067 ivar |= (vector << index);
3068 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3073 } /* ixgbe_set_ivar */
3075 /************************************************************************
3076 * ixgbe_configure_ivars
3077 ************************************************************************/
3079 ixgbe_configure_ivars(struct adapter *adapter)
3081 struct ix_rx_queue *rx_que = adapter->rx_queues;
3082 struct ix_tx_queue *tx_que = adapter->tx_queues;
3085 if (ixgbe_max_interrupt_rate > 0)
3086 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3089 * Disable DMA coalescing if interrupt moderation is
3096 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3097 struct rx_ring *rxr = &rx_que->rxr;
3099 /* First the RX queue entry */
3100 ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3102 /* Set an Initial EITR value */
3103 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3105 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3106 struct tx_ring *txr = &tx_que->txr;
3108 /* ... and the TX */
3109 ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3111 /* For the Link interrupt */
3112 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3113 } /* ixgbe_configure_ivars */
3115 /************************************************************************
3117 ************************************************************************/
3119 ixgbe_config_gpie(struct adapter *adapter)
3121 struct ixgbe_hw *hw = &adapter->hw;
3124 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3126 if (adapter->intr_type == IFLIB_INTR_MSIX) {
3127 /* Enable Enhanced MSI-X mode */
3128 gpie |= IXGBE_GPIE_MSIX_MODE
3130 | IXGBE_GPIE_PBA_SUPPORT
3134 /* Fan Failure Interrupt */
3135 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3136 gpie |= IXGBE_SDP1_GPIEN;
3138 /* Thermal Sensor Interrupt */
3139 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3140 gpie |= IXGBE_SDP0_GPIEN_X540;
3142 /* Link detection */
3143 switch (hw->mac.type) {
3144 case ixgbe_mac_82599EB:
3145 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3147 case ixgbe_mac_X550EM_x:
3148 case ixgbe_mac_X550EM_a:
3149 gpie |= IXGBE_SDP0_GPIEN_X540;
3155 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3157 } /* ixgbe_config_gpie */
3159 /************************************************************************
3160 * ixgbe_config_delay_values
3162 * Requires adapter->max_frame_size to be set.
3163 ************************************************************************/
3165 ixgbe_config_delay_values(struct adapter *adapter)
3167 struct ixgbe_hw *hw = &adapter->hw;
3168 u32 rxpb, frame, size, tmp;
3170 frame = adapter->max_frame_size;
3172 /* Calculate High Water */
3173 switch (hw->mac.type) {
3174 case ixgbe_mac_X540:
3175 case ixgbe_mac_X550:
3176 case ixgbe_mac_X550EM_x:
3177 case ixgbe_mac_X550EM_a:
3178 tmp = IXGBE_DV_X540(frame, frame);
3181 tmp = IXGBE_DV(frame, frame);
3184 size = IXGBE_BT2KB(tmp);
3185 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3186 hw->fc.high_water[0] = rxpb - size;
3188 /* Now calculate Low Water */
3189 switch (hw->mac.type) {
3190 case ixgbe_mac_X540:
3191 case ixgbe_mac_X550:
3192 case ixgbe_mac_X550EM_x:
3193 case ixgbe_mac_X550EM_a:
3194 tmp = IXGBE_LOW_DV_X540(frame);
3197 tmp = IXGBE_LOW_DV(frame);
3200 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3202 hw->fc.pause_time = IXGBE_FC_PAUSE;
3203 hw->fc.send_xon = TRUE;
3204 } /* ixgbe_config_delay_values */
3206 /************************************************************************
3207 * ixgbe_set_multi - Multicast Update
3209 * Called whenever multicast address list is updated.
3210 ************************************************************************/
3212 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
3214 struct adapter *adapter = arg;
3215 struct ixgbe_mc_addr *mta = adapter->mta;
3217 if (ifma->ifma_addr->sa_family != AF_LINK)
3219 if (count == MAX_NUM_MULTICAST_ADDRESSES)
3221 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3222 mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3223 mta[count].vmdq = adapter->pool;
3226 } /* ixgbe_mc_filter_apply */
3229 ixgbe_if_multi_set(if_ctx_t ctx)
3231 struct adapter *adapter = iflib_get_softc(ctx);
3232 struct ixgbe_mc_addr *mta;
3233 struct ifnet *ifp = iflib_get_ifp(ctx);
3238 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3241 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3243 mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
3245 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3246 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3247 if (ifp->if_flags & IFF_PROMISC)
3248 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3249 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3250 ifp->if_flags & IFF_ALLMULTI) {
3251 fctrl |= IXGBE_FCTRL_MPE;
3252 fctrl &= ~IXGBE_FCTRL_UPE;
3254 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3256 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3258 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3259 update_ptr = (u8 *)mta;
3260 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3261 ixgbe_mc_array_itr, TRUE);
3264 } /* ixgbe_if_multi_set */
3266 /************************************************************************
3267 * ixgbe_mc_array_itr
3269 * An iterator function needed by the multicast shared code.
3270 * It feeds the shared code routine the addresses in the
3271 * array of ixgbe_set_multi() one by one.
3272 ************************************************************************/
3274 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3276 struct ixgbe_mc_addr *mta;
3278 mta = (struct ixgbe_mc_addr *)*update_ptr;
3281 *update_ptr = (u8*)(mta + 1);
3284 } /* ixgbe_mc_array_itr */
3286 /************************************************************************
3287 * ixgbe_local_timer - Timer routine
3289 * Checks for link status, updates statistics,
3290 * and runs the watchdog check.
3291 ************************************************************************/
3293 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3295 struct adapter *adapter = iflib_get_softc(ctx);
3300 /* Check for pluggable optics */
3301 if (adapter->sfp_probe)
3302 if (!ixgbe_sfp_probe(ctx))
3303 return; /* Nothing to do */
3305 ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3306 &adapter->link_up, 0);
3308 /* Fire off the adminq task */
3309 iflib_admin_intr_deferred(ctx);
3311 } /* ixgbe_if_timer */
3313 /************************************************************************
3316 * Determine if a port had optics inserted.
3317 ************************************************************************/
3319 ixgbe_sfp_probe(if_ctx_t ctx)
3321 struct adapter *adapter = iflib_get_softc(ctx);
3322 struct ixgbe_hw *hw = &adapter->hw;
3323 device_t dev = iflib_get_dev(ctx);
3324 bool result = FALSE;
3326 if ((hw->phy.type == ixgbe_phy_nl) &&
3327 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3328 s32 ret = hw->phy.ops.identify_sfp(hw);
3331 ret = hw->phy.ops.reset(hw);
3332 adapter->sfp_probe = FALSE;
3333 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3334 device_printf(dev, "Unsupported SFP+ module detected!");
3336 "Reload driver with supported module.\n");
3339 device_printf(dev, "SFP+ module detected!\n");
3340 /* We now have supported optics */
3346 } /* ixgbe_sfp_probe */
3348 /************************************************************************
3349 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3350 ************************************************************************/
3352 ixgbe_handle_mod(void *context)
3354 if_ctx_t ctx = context;
3355 struct adapter *adapter = iflib_get_softc(ctx);
3356 struct ixgbe_hw *hw = &adapter->hw;
3357 device_t dev = iflib_get_dev(ctx);
3358 u32 err, cage_full = 0;
3360 if (adapter->hw.need_crosstalk_fix) {
3361 switch (hw->mac.type) {
3362 case ixgbe_mac_82599EB:
3363 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3366 case ixgbe_mac_X550EM_x:
3367 case ixgbe_mac_X550EM_a:
3368 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3376 goto handle_mod_out;
3379 err = hw->phy.ops.identify_sfp(hw);
3380 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3382 "Unsupported SFP+ module type was detected.\n");
3383 goto handle_mod_out;
3386 if (hw->mac.type == ixgbe_mac_82598EB)
3387 err = hw->phy.ops.reset(hw);
3389 err = hw->mac.ops.setup_sfp(hw);
3391 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3393 "Setup failure - unsupported SFP+ module type.\n");
3394 goto handle_mod_out;
3396 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3400 adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3401 } /* ixgbe_handle_mod */
3404 /************************************************************************
3405 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3406 ************************************************************************/
3408 ixgbe_handle_msf(void *context)
3410 if_ctx_t ctx = context;
3411 struct adapter *adapter = iflib_get_softc(ctx);
3412 struct ixgbe_hw *hw = &adapter->hw;
3416 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3417 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3419 autoneg = hw->phy.autoneg_advertised;
3420 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3421 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3422 if (hw->mac.ops.setup_link)
3423 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3425 /* Adjust media types shown in ifconfig */
3426 ifmedia_removeall(adapter->media);
3427 ixgbe_add_media_types(adapter->ctx);
3428 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3429 } /* ixgbe_handle_msf */
3431 /************************************************************************
3432 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3433 ************************************************************************/
3435 ixgbe_handle_phy(void *context)
3437 if_ctx_t ctx = context;
3438 struct adapter *adapter = iflib_get_softc(ctx);
3439 struct ixgbe_hw *hw = &adapter->hw;
3442 error = hw->phy.ops.handle_lasi(hw);
3443 if (error == IXGBE_ERR_OVERTEMP)
3444 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3446 device_printf(adapter->dev,
3447 "Error handling LASI interrupt: %d\n", error);
3448 } /* ixgbe_handle_phy */
3450 /************************************************************************
3451 * ixgbe_if_stop - Stop the hardware
3453 * Disables all traffic on the adapter by issuing a
3454 * global reset on the MAC and deallocates TX/RX buffers.
3455 ************************************************************************/
3457 ixgbe_if_stop(if_ctx_t ctx)
3459 struct adapter *adapter = iflib_get_softc(ctx);
3460 struct ixgbe_hw *hw = &adapter->hw;
3462 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3465 hw->adapter_stopped = FALSE;
3466 ixgbe_stop_adapter(hw);
3467 if (hw->mac.type == ixgbe_mac_82599EB)
3468 ixgbe_stop_mac_link_on_d3_82599(hw);
3469 /* Turn off the laser - noop with no optics */
3470 ixgbe_disable_tx_laser(hw);
3472 /* Update the stack */
3473 adapter->link_up = FALSE;
3474 ixgbe_if_update_admin_status(ctx);
3476 /* reprogram the RAR[0] in case user changed it. */
3477 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3480 } /* ixgbe_if_stop */
3482 /************************************************************************
3483 * ixgbe_update_link_status - Update OS on link state
3485 * Note: Only updates the OS on the cached link state.
3486 * The real check of the hardware only happens with
3488 ************************************************************************/
3490 ixgbe_if_update_admin_status(if_ctx_t ctx)
3492 struct adapter *adapter = iflib_get_softc(ctx);
3493 device_t dev = iflib_get_dev(ctx);
3495 if (adapter->link_up) {
3496 if (adapter->link_active == FALSE) {
3498 device_printf(dev, "Link is up %d Gbps %s \n",
3499 ((adapter->link_speed == 128) ? 10 : 1),
3501 adapter->link_active = TRUE;
3502 /* Update any Flow Control changes */
3503 ixgbe_fc_enable(&adapter->hw);
3504 /* Update DMA coalescing config */
3505 ixgbe_config_dmac(adapter);
3506 /* should actually be negotiated value */
3507 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3509 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3510 ixgbe_ping_all_vfs(adapter);
3512 } else { /* Link down */
3513 if (adapter->link_active == TRUE) {
3515 device_printf(dev, "Link is Down\n");
3516 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3517 adapter->link_active = FALSE;
3518 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3519 ixgbe_ping_all_vfs(adapter);
3523 /* Handle task requests from msix_link() */
3524 if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3525 ixgbe_handle_mod(ctx);
3526 if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3527 ixgbe_handle_msf(ctx);
3528 if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3529 ixgbe_handle_mbx(ctx);
3530 if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3531 ixgbe_reinit_fdir(ctx);
3532 if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3533 ixgbe_handle_phy(ctx);
3534 adapter->task_requests = 0;
3536 ixgbe_update_stats_counters(adapter);
3537 } /* ixgbe_if_update_admin_status */
3539 /************************************************************************
3540 * ixgbe_config_dmac - Configure DMA Coalescing
3541 ************************************************************************/
3543 ixgbe_config_dmac(struct adapter *adapter)
3545 struct ixgbe_hw *hw = &adapter->hw;
3546 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3548 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3551 if (dcfg->watchdog_timer ^ adapter->dmac ||
3552 dcfg->link_speed ^ adapter->link_speed) {
3553 dcfg->watchdog_timer = adapter->dmac;
3554 dcfg->fcoe_en = FALSE;
3555 dcfg->link_speed = adapter->link_speed;
3558 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3559 dcfg->watchdog_timer, dcfg->link_speed);
3561 hw->mac.ops.dmac_config(hw);
3563 } /* ixgbe_config_dmac */
3565 /************************************************************************
3566 * ixgbe_if_enable_intr
3567 ************************************************************************/
3569 ixgbe_if_enable_intr(if_ctx_t ctx)
3571 struct adapter *adapter = iflib_get_softc(ctx);
3572 struct ixgbe_hw *hw = &adapter->hw;
3573 struct ix_rx_queue *que = adapter->rx_queues;
3576 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3578 switch (adapter->hw.mac.type) {
3579 case ixgbe_mac_82599EB:
3580 mask |= IXGBE_EIMS_ECC;
3581 /* Temperature sensor on some adapters */
3582 mask |= IXGBE_EIMS_GPI_SDP0;
3583 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3584 mask |= IXGBE_EIMS_GPI_SDP1;
3585 mask |= IXGBE_EIMS_GPI_SDP2;
3587 case ixgbe_mac_X540:
3588 /* Detect if Thermal Sensor is enabled */
3589 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3590 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3591 mask |= IXGBE_EIMS_TS;
3592 mask |= IXGBE_EIMS_ECC;
3594 case ixgbe_mac_X550:
3595 /* MAC thermal sensor is automatically enabled */
3596 mask |= IXGBE_EIMS_TS;
3597 mask |= IXGBE_EIMS_ECC;
3599 case ixgbe_mac_X550EM_x:
3600 case ixgbe_mac_X550EM_a:
3601 /* Some devices use SDP0 for important information */
3602 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3603 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3604 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3605 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3606 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3607 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3608 mask |= IXGBE_EICR_GPI_SDP0_X540;
3609 mask |= IXGBE_EIMS_ECC;
3615 /* Enable Fan Failure detection */
3616 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3617 mask |= IXGBE_EIMS_GPI_SDP1;
3619 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3620 mask |= IXGBE_EIMS_MAILBOX;
3621 /* Enable Flow Director */
3622 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3623 mask |= IXGBE_EIMS_FLOW_DIR;
3625 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3627 /* With MSI-X we use auto clear */
3628 if (adapter->intr_type == IFLIB_INTR_MSIX) {
3629 mask = IXGBE_EIMS_ENABLE_MASK;
3630 /* Don't autoclear Link */
3631 mask &= ~IXGBE_EIMS_OTHER;
3632 mask &= ~IXGBE_EIMS_LSC;
3633 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3634 mask &= ~IXGBE_EIMS_MAILBOX;
3635 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3639 * Now enable all queues, this is done separately to
3640 * allow for handling the extended (beyond 32) MSI-X
3641 * vectors that can be used by 82599
3643 for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3644 ixgbe_enable_queue(adapter, que->msix);
3646 IXGBE_WRITE_FLUSH(hw);
3648 } /* ixgbe_if_enable_intr */
3650 /************************************************************************
3651 * ixgbe_disable_intr
3652 ************************************************************************/
3654 ixgbe_if_disable_intr(if_ctx_t ctx)
3656 struct adapter *adapter = iflib_get_softc(ctx);
3658 if (adapter->intr_type == IFLIB_INTR_MSIX)
3659 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3660 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3661 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3663 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3664 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3665 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3667 IXGBE_WRITE_FLUSH(&adapter->hw);
3669 } /* ixgbe_if_disable_intr */
3671 /************************************************************************
3672 * ixgbe_link_intr_enable
3673 ************************************************************************/
3675 ixgbe_link_intr_enable(if_ctx_t ctx)
3677 struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3679 /* Re-enable other interrupts */
3680 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3681 } /* ixgbe_link_intr_enable */
3683 /************************************************************************
3684 * ixgbe_if_rx_queue_intr_enable
3685 ************************************************************************/
3687 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3689 struct adapter *adapter = iflib_get_softc(ctx);
3690 struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3692 ixgbe_enable_queue(adapter, que->rxr.me);
3695 } /* ixgbe_if_rx_queue_intr_enable */
3697 /************************************************************************
3698 * ixgbe_enable_queue
3699 ************************************************************************/
3701 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3703 struct ixgbe_hw *hw = &adapter->hw;
3704 u64 queue = (u64)(1 << vector);
3707 if (hw->mac.type == ixgbe_mac_82598EB) {
3708 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3709 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3711 mask = (queue & 0xFFFFFFFF);
3713 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3714 mask = (queue >> 32);
3716 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3718 } /* ixgbe_enable_queue */
3720 /************************************************************************
3721 * ixgbe_disable_queue
3722 ************************************************************************/
3724 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3726 struct ixgbe_hw *hw = &adapter->hw;
3727 u64 queue = (u64)(1 << vector);
3730 if (hw->mac.type == ixgbe_mac_82598EB) {
3731 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3732 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3734 mask = (queue & 0xFFFFFFFF);
3736 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3737 mask = (queue >> 32);
3739 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3741 } /* ixgbe_disable_queue */
3743 /************************************************************************
3744 * ixgbe_intr - Legacy Interrupt Service Routine
3745 ************************************************************************/
3747 ixgbe_intr(void *arg)
3749 struct adapter *adapter = arg;
3750 struct ix_rx_queue *que = adapter->rx_queues;
3751 struct ixgbe_hw *hw = &adapter->hw;
3752 if_ctx_t ctx = adapter->ctx;
3753 u32 eicr, eicr_mask;
3755 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3759 ixgbe_if_enable_intr(ctx);
3760 return (FILTER_HANDLED);
3763 /* Check for fan failure */
3764 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3765 (eicr & IXGBE_EICR_GPI_SDP1)) {
3766 device_printf(adapter->dev,
3767 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3768 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3771 /* Link status change */
3772 if (eicr & IXGBE_EICR_LSC) {
3773 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3774 iflib_admin_intr_deferred(ctx);
3777 if (ixgbe_is_sfp(hw)) {
3778 /* Pluggable optics-related interrupt */
3779 if (hw->mac.type >= ixgbe_mac_X540)
3780 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3782 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3784 if (eicr & eicr_mask) {
3785 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3786 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3789 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3790 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3791 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3792 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3793 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3797 /* External PHY interrupt */
3798 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3799 (eicr & IXGBE_EICR_GPI_SDP0_X540))
3800 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3802 return (FILTER_SCHEDULE_THREAD);
3805 /************************************************************************
3806 * ixgbe_free_pci_resources
3807 ************************************************************************/
3809 ixgbe_free_pci_resources(if_ctx_t ctx)
3811 struct adapter *adapter = iflib_get_softc(ctx);
3812 struct ix_rx_queue *que = adapter->rx_queues;
3813 device_t dev = iflib_get_dev(ctx);
3815 /* Release all MSI-X queue resources */
3816 if (adapter->intr_type == IFLIB_INTR_MSIX)
3817 iflib_irq_free(ctx, &adapter->irq);
3820 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3821 iflib_irq_free(ctx, &que->que_irq);
3825 if (adapter->pci_mem != NULL)
3826 bus_release_resource(dev, SYS_RES_MEMORY,
3827 rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3828 } /* ixgbe_free_pci_resources */
3830 /************************************************************************
3831 * ixgbe_sysctl_flowcntl
3833 * SYSCTL wrapper around setting Flow Control
3834 ************************************************************************/
3836 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3838 struct adapter *adapter;
3841 adapter = (struct adapter *)arg1;
3842 fc = adapter->hw.fc.current_mode;
3844 error = sysctl_handle_int(oidp, &fc, 0, req);
3845 if ((error) || (req->newptr == NULL))
3848 /* Don't bother if it's not changed */
3849 if (fc == adapter->hw.fc.current_mode)
3852 return ixgbe_set_flowcntl(adapter, fc);
3853 } /* ixgbe_sysctl_flowcntl */
3855 /************************************************************************
3856 * ixgbe_set_flowcntl - Set flow control
3858 * Flow control values:
3863 ************************************************************************/
3865 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3868 case ixgbe_fc_rx_pause:
3869 case ixgbe_fc_tx_pause:
3871 adapter->hw.fc.requested_mode = fc;
3872 if (adapter->num_rx_queues > 1)
3873 ixgbe_disable_rx_drop(adapter);
3876 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3877 if (adapter->num_rx_queues > 1)
3878 ixgbe_enable_rx_drop(adapter);
3884 /* Don't autoneg if forcing a value */
3885 adapter->hw.fc.disable_fc_autoneg = TRUE;
3886 ixgbe_fc_enable(&adapter->hw);
3889 } /* ixgbe_set_flowcntl */
3891 /************************************************************************
3892 * ixgbe_enable_rx_drop
3894 * Enable the hardware to drop packets when the buffer is
3895 * full. This is useful with multiqueue, so that no single
3896 * queue being full stalls the entire RX engine. We only
3897 * enable this when Multiqueue is enabled AND Flow Control
3899 ************************************************************************/
3901 ixgbe_enable_rx_drop(struct adapter *adapter)
3903 struct ixgbe_hw *hw = &adapter->hw;
3904 struct rx_ring *rxr;
3907 for (int i = 0; i < adapter->num_rx_queues; i++) {
3908 rxr = &adapter->rx_queues[i].rxr;
3909 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3910 srrctl |= IXGBE_SRRCTL_DROP_EN;
3911 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3914 /* enable drop for each vf */
3915 for (int i = 0; i < adapter->num_vfs; i++) {
3916 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3917 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3920 } /* ixgbe_enable_rx_drop */
3922 /************************************************************************
3923 * ixgbe_disable_rx_drop
3924 ************************************************************************/
3926 ixgbe_disable_rx_drop(struct adapter *adapter)
3928 struct ixgbe_hw *hw = &adapter->hw;
3929 struct rx_ring *rxr;
3932 for (int i = 0; i < adapter->num_rx_queues; i++) {
3933 rxr = &adapter->rx_queues[i].rxr;
3934 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3935 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3936 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3939 /* disable drop for each vf */
3940 for (int i = 0; i < adapter->num_vfs; i++) {
3941 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3942 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3944 } /* ixgbe_disable_rx_drop */
3946 /************************************************************************
3947 * ixgbe_sysctl_advertise
3949 * SYSCTL wrapper around setting advertised speed
3950 ************************************************************************/
3952 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3954 struct adapter *adapter;
3955 int error, advertise;
3957 adapter = (struct adapter *)arg1;
3958 advertise = adapter->advertise;
3960 error = sysctl_handle_int(oidp, &advertise, 0, req);
3961 if ((error) || (req->newptr == NULL))
3964 return ixgbe_set_advertise(adapter, advertise);
3965 } /* ixgbe_sysctl_advertise */
3967 /************************************************************************
3968 * ixgbe_set_advertise - Control advertised link speed
3971 * 0x1 - advertise 100 Mb
3972 * 0x2 - advertise 1G
3973 * 0x4 - advertise 10G
3974 * 0x8 - advertise 10 Mb (yes, Mb)
3975 ************************************************************************/
3977 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3979 device_t dev = iflib_get_dev(adapter->ctx);
3980 struct ixgbe_hw *hw;
3981 ixgbe_link_speed speed = 0;
3982 ixgbe_link_speed link_caps = 0;
3983 s32 err = IXGBE_NOT_IMPLEMENTED;
3984 bool negotiate = FALSE;
3986 /* Checks to validate new value */
3987 if (adapter->advertise == advertise) /* no change */
3992 /* No speed changes for backplane media */
3993 if (hw->phy.media_type == ixgbe_media_type_backplane)
3996 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
3997 (hw->phy.multispeed_fiber))) {
3998 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4002 if (advertise < 0x1 || advertise > 0xF) {
4003 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4007 if (hw->mac.ops.get_link_capabilities) {
4008 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4010 if (err != IXGBE_SUCCESS) {
4011 device_printf(dev, "Unable to determine supported advertise speeds\n");
4016 /* Set new value and report new advertised mode */
4017 if (advertise & 0x1) {
4018 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4019 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4022 speed |= IXGBE_LINK_SPEED_100_FULL;
4024 if (advertise & 0x2) {
4025 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4026 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4029 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4031 if (advertise & 0x4) {
4032 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4033 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4036 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4038 if (advertise & 0x8) {
4039 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4040 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4043 speed |= IXGBE_LINK_SPEED_10_FULL;
4046 hw->mac.autotry_restart = TRUE;
4047 hw->mac.ops.setup_link(hw, speed, TRUE);
4048 adapter->advertise = advertise;
4051 } /* ixgbe_set_advertise */
4053 /************************************************************************
4054 * ixgbe_get_advertise - Get current advertised speed settings
4056 * Formatted for sysctl usage.
4058 * 0x1 - advertise 100 Mb
4059 * 0x2 - advertise 1G
4060 * 0x4 - advertise 10G
4061 * 0x8 - advertise 10 Mb (yes, Mb)
4062 ************************************************************************/
4064 ixgbe_get_advertise(struct adapter *adapter)
4066 struct ixgbe_hw *hw = &adapter->hw;
4068 ixgbe_link_speed link_caps = 0;
4070 bool negotiate = FALSE;
4073 * Advertised speed means nothing unless it's copper or
4076 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4077 !(hw->phy.multispeed_fiber))
4080 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4081 if (err != IXGBE_SUCCESS)
4085 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4086 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
4087 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
4088 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
4091 } /* ixgbe_get_advertise */
4093 /************************************************************************
4094 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4097 * 0/1 - off / on (use default value of 1000)
4099 * Legal timer values are:
4100 * 50,100,250,500,1000,2000,5000,10000
4102 * Turning off interrupt moderation will also turn this off.
4103 ************************************************************************/
4105 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4107 struct adapter *adapter = (struct adapter *)arg1;
4108 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
4112 newval = adapter->dmac;
4113 error = sysctl_handle_16(oidp, &newval, 0, req);
4114 if ((error) || (req->newptr == NULL))
4123 /* Enable and use default */
4124 adapter->dmac = 1000;
4134 /* Legal values - allow */
4135 adapter->dmac = newval;
4138 /* Do nothing, illegal value */
4142 /* Re-initialize hardware if it's already running */
4143 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4147 } /* ixgbe_sysctl_dmac */
4150 /************************************************************************
4151 * ixgbe_sysctl_power_state
4153 * Sysctl to test power states
4155 * 0 - set device to D0
4156 * 3 - set device to D3
4157 * (none) - get current device power state
4158 ************************************************************************/
4160 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4162 struct adapter *adapter = (struct adapter *)arg1;
4163 device_t dev = adapter->dev;
4164 int curr_ps, new_ps, error = 0;
4166 curr_ps = new_ps = pci_get_powerstate(dev);
4168 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4169 if ((error) || (req->newptr == NULL))
4172 if (new_ps == curr_ps)
4175 if (new_ps == 3 && curr_ps == 0)
4176 error = DEVICE_SUSPEND(dev);
4177 else if (new_ps == 0 && curr_ps == 3)
4178 error = DEVICE_RESUME(dev);
4182 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4185 } /* ixgbe_sysctl_power_state */
4188 /************************************************************************
4189 * ixgbe_sysctl_wol_enable
4191 * Sysctl to enable/disable the WoL capability,
4192 * if supported by the adapter.
4197 ************************************************************************/
4199 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4201 struct adapter *adapter = (struct adapter *)arg1;
4202 struct ixgbe_hw *hw = &adapter->hw;
4203 int new_wol_enabled;
4206 new_wol_enabled = hw->wol_enabled;
4207 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4208 if ((error) || (req->newptr == NULL))
4210 new_wol_enabled = !!(new_wol_enabled);
4211 if (new_wol_enabled == hw->wol_enabled)
4214 if (new_wol_enabled > 0 && !adapter->wol_support)
4217 hw->wol_enabled = new_wol_enabled;
4220 } /* ixgbe_sysctl_wol_enable */
4222 /************************************************************************
4223 * ixgbe_sysctl_wufc - Wake Up Filter Control
4225 * Sysctl to enable/disable the types of packets that the
4226 * adapter will wake up on upon receipt.
4228 * 0x1 - Link Status Change
4229 * 0x2 - Magic Packet
4230 * 0x4 - Direct Exact
4231 * 0x8 - Directed Multicast
4233 * 0x20 - ARP/IPv4 Request Packet
4234 * 0x40 - Direct IPv4 Packet
4235 * 0x80 - Direct IPv6 Packet
4237 * Settings not listed above will cause the sysctl to return an error.
4238 ************************************************************************/
4240 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4242 struct adapter *adapter = (struct adapter *)arg1;
4246 new_wufc = adapter->wufc;
4248 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4249 if ((error) || (req->newptr == NULL))
4251 if (new_wufc == adapter->wufc)
4254 if (new_wufc & 0xffffff00)
4258 new_wufc |= (0xffffff & adapter->wufc);
4259 adapter->wufc = new_wufc;
4262 } /* ixgbe_sysctl_wufc */
4265 /************************************************************************
4266 * ixgbe_sysctl_print_rss_config
4267 ************************************************************************/
4269 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4271 struct adapter *adapter = (struct adapter *)arg1;
4272 struct ixgbe_hw *hw = &adapter->hw;
4273 device_t dev = adapter->dev;
4275 int error = 0, reta_size;
4278 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4280 device_printf(dev, "Could not allocate sbuf for output.\n");
4284 // TODO: use sbufs to make a string to print out
4285 /* Set multiplier for RETA setup and table size based on MAC */
4286 switch (adapter->hw.mac.type) {
4287 case ixgbe_mac_X550:
4288 case ixgbe_mac_X550EM_x:
4289 case ixgbe_mac_X550EM_a:
4297 /* Print out the redirection table */
4298 sbuf_cat(buf, "\n");
4299 for (int i = 0; i < reta_size; i++) {
4301 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4302 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4304 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4305 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4309 // TODO: print more config
4311 error = sbuf_finish(buf);
4313 device_printf(dev, "Error finishing sbuf: %d\n", error);
4318 } /* ixgbe_sysctl_print_rss_config */
4319 #endif /* IXGBE_DEBUG */
4321 /************************************************************************
4322 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4324 * For X552/X557-AT devices using an external PHY
4325 ************************************************************************/
4327 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4329 struct adapter *adapter = (struct adapter *)arg1;
4330 struct ixgbe_hw *hw = &adapter->hw;
4333 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4334 device_printf(iflib_get_dev(adapter->ctx),
4335 "Device has no supported external thermal sensor.\n");
4339 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4340 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4341 device_printf(iflib_get_dev(adapter->ctx),
4342 "Error reading from PHY's current temperature register\n");
4346 /* Shift temp for output */
4349 return (sysctl_handle_16(oidp, NULL, reg, req));
4350 } /* ixgbe_sysctl_phy_temp */
4352 /************************************************************************
4353 * ixgbe_sysctl_phy_overtemp_occurred
4355 * Reports (directly from the PHY) whether the current PHY
4356 * temperature is over the overtemp threshold.
4357 ************************************************************************/
4359 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4361 struct adapter *adapter = (struct adapter *)arg1;
4362 struct ixgbe_hw *hw = &adapter->hw;
4365 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4366 device_printf(iflib_get_dev(adapter->ctx),
4367 "Device has no supported external thermal sensor.\n");
4371 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4372 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4373 device_printf(iflib_get_dev(adapter->ctx),
4374 "Error reading from PHY's temperature status register\n");
4378 /* Get occurrence bit */
4379 reg = !!(reg & 0x4000);
4381 return (sysctl_handle_16(oidp, 0, reg, req));
4382 } /* ixgbe_sysctl_phy_overtemp_occurred */
4384 /************************************************************************
4385 * ixgbe_sysctl_eee_state
4387 * Sysctl to set EEE power saving feature
4391 * (none) - get current device EEE state
4392 ************************************************************************/
4394 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4396 struct adapter *adapter = (struct adapter *)arg1;
4397 device_t dev = adapter->dev;
4398 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
4399 int curr_eee, new_eee, error = 0;
4402 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4404 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4405 if ((error) || (req->newptr == NULL))
4409 if (new_eee == curr_eee)
4413 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4416 /* Bounds checking */
4417 if ((new_eee < 0) || (new_eee > 1))
4420 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4422 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4426 /* Restart auto-neg */
4429 device_printf(dev, "New EEE state: %d\n", new_eee);
4431 /* Cache new value */
4433 adapter->feat_en |= IXGBE_FEATURE_EEE;
4435 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4438 } /* ixgbe_sysctl_eee_state */
4440 /************************************************************************
4441 * ixgbe_init_device_features
4442 ************************************************************************/
4444 ixgbe_init_device_features(struct adapter *adapter)
4446 adapter->feat_cap = IXGBE_FEATURE_NETMAP
4449 | IXGBE_FEATURE_MSIX
4450 | IXGBE_FEATURE_LEGACY_IRQ;
4452 /* Set capabilities first... */
4453 switch (adapter->hw.mac.type) {
4454 case ixgbe_mac_82598EB:
4455 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4456 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4458 case ixgbe_mac_X540:
4459 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4460 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4461 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4462 (adapter->hw.bus.func == 0))
4463 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4465 case ixgbe_mac_X550:
4466 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4467 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4468 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4470 case ixgbe_mac_X550EM_x:
4471 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4472 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4473 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4474 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4476 case ixgbe_mac_X550EM_a:
4477 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4478 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4479 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4480 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4481 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4482 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4483 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4486 case ixgbe_mac_82599EB:
4487 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4488 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4489 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4490 (adapter->hw.bus.func == 0))
4491 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4492 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4493 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4499 /* Enabled by default... */
4500 /* Fan failure detection */
4501 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4502 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4504 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4505 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4507 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4508 adapter->feat_en |= IXGBE_FEATURE_EEE;
4509 /* Thermal Sensor */
4510 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4511 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4513 /* Enabled via global sysctl... */
4515 if (ixgbe_enable_fdir) {
4516 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4517 adapter->feat_en |= IXGBE_FEATURE_FDIR;
4519 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4522 * Message Signal Interrupts - Extended (MSI-X)
4523 * Normal MSI is only enabled if MSI-X calls fail.
4525 if (!ixgbe_enable_msix)
4526 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4527 /* Receive-Side Scaling (RSS) */
4528 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4529 adapter->feat_en |= IXGBE_FEATURE_RSS;
4531 /* Disable features with unmet dependencies... */
4533 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4534 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4535 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4536 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4537 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4539 } /* ixgbe_init_device_features */
4541 /************************************************************************
4542 * ixgbe_check_fan_failure
4543 ************************************************************************/
4545 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4549 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4553 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4554 } /* ixgbe_check_fan_failure */