1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 #include "opt_inet6.h"
41 #include "ixgbe_sriov.h"
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
47 /************************************************************************
49 ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
53 /************************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixgbe_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 /* required last entry */
112 static void *ixgbe_register(device_t dev);
113 static int ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int ixgbe_if_attach_post(if_ctx_t ctx);
115 static int ixgbe_if_detach(if_ctx_t ctx);
116 static int ixgbe_if_shutdown(if_ctx_t ctx);
117 static int ixgbe_if_suspend(if_ctx_t ctx);
118 static int ixgbe_if_resume(if_ctx_t ctx);
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int ixgbe_if_media_change(if_ctx_t ctx);
127 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133 uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135 uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 int ixgbe_intr(void *arg);
144 /************************************************************************
145 * Function prototypes
146 ************************************************************************/
147 #if __FreeBSD_version >= 1100036
148 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
151 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
154 static int ixgbe_allocate_pci_resources(if_ctx_t ctx);
155 static int ixgbe_setup_low_power_mode(if_ctx_t ctx);
157 static void ixgbe_config_dmac(struct adapter *adapter);
158 static void ixgbe_configure_ivars(struct adapter *adapter);
159 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
161 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 static bool ixgbe_sfp_probe(if_ctx_t ctx);
164 static void ixgbe_free_pci_resources(if_ctx_t ctx);
166 static int ixgbe_msix_link(void *arg);
167 static int ixgbe_msix_que(void *arg);
168 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
169 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
170 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
172 static int ixgbe_setup_interface(if_ctx_t ctx);
173 static void ixgbe_init_device_features(struct adapter *adapter);
174 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
175 static void ixgbe_add_media_types(if_ctx_t ctx);
176 static void ixgbe_update_stats_counters(struct adapter *adapter);
177 static void ixgbe_config_link(if_ctx_t ctx);
178 static void ixgbe_get_slot_info(struct adapter *);
179 static void ixgbe_check_wol_support(struct adapter *adapter);
180 static void ixgbe_enable_rx_drop(struct adapter *);
181 static void ixgbe_disable_rx_drop(struct adapter *);
183 static void ixgbe_add_hw_stats(struct adapter *adapter);
184 static int ixgbe_set_flowcntl(struct adapter *, int);
185 static int ixgbe_set_advertise(struct adapter *, int);
186 static int ixgbe_get_advertise(struct adapter *);
187 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
188 static void ixgbe_config_gpie(struct adapter *adapter);
189 static void ixgbe_config_delay_values(struct adapter *adapter);
191 /* Sysctl handlers */
192 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
193 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
194 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
195 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
196 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
197 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
199 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
200 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
202 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
203 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
204 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
205 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
206 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
207 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
208 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
210 /* Deferred interrupt tasklets */
211 static void ixgbe_handle_msf(void *);
212 static void ixgbe_handle_mod(void *);
213 static void ixgbe_handle_phy(void *);
215 /************************************************************************
216 * FreeBSD Device Interface Entry Points
217 ************************************************************************/
218 static device_method_t ix_methods[] = {
219 /* Device interface */
220 DEVMETHOD(device_register, ixgbe_register),
221 DEVMETHOD(device_probe, iflib_device_probe),
222 DEVMETHOD(device_attach, iflib_device_attach),
223 DEVMETHOD(device_detach, iflib_device_detach),
224 DEVMETHOD(device_shutdown, iflib_device_shutdown),
225 DEVMETHOD(device_suspend, iflib_device_suspend),
226 DEVMETHOD(device_resume, iflib_device_resume),
228 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
229 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
230 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
235 static driver_t ix_driver = {
236 "ix", ix_methods, sizeof(struct adapter),
239 devclass_t ix_devclass;
240 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
241 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
242 MODULE_DEPEND(ix, pci, 1, 1, 1);
243 MODULE_DEPEND(ix, ether, 1, 1, 1);
244 MODULE_DEPEND(ix, iflib, 1, 1, 1);
246 static device_method_t ixgbe_if_methods[] = {
247 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
248 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
249 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
250 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
251 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
252 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
253 DEVMETHOD(ifdi_init, ixgbe_if_init),
254 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
255 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
256 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
257 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
258 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
259 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
260 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
262 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
263 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
264 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
265 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
266 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
267 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
268 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
269 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
270 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
271 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
272 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
273 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
274 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
275 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
277 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
278 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
279 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
285 * TUNEABLE PARAMETERS:
288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
289 "IXGBE driver parameters");
290 static driver_t ixgbe_if_driver = {
291 "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
294 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
295 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
296 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
298 /* Flow control setting, default to full */
299 static int ixgbe_flow_control = ixgbe_fc_full;
300 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
301 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
303 /* Advertise Speed, default to 0 (auto) */
304 static int ixgbe_advertise_speed = 0;
305 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
306 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
309 * Smart speed setting, default to on
310 * this only works as a compile option
311 * right now as its during attach, set
312 * this to 'ixgbe_smart_speed_off' to
315 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
318 * MSI-X should be the default for best performance,
319 * but this allows it to be forced off for testing.
321 static int ixgbe_enable_msix = 1;
322 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
323 "Enable MSI-X interrupts");
326 * Defining this on will allow the use
327 * of unsupported SFP+ modules, note that
328 * doing so you are on your own :)
330 static int allow_unsupported_sfp = FALSE;
331 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
332 &allow_unsupported_sfp, 0,
333 "Allow unsupported SFP modules...use at your own risk");
336 * Not sure if Flow Director is fully baked,
337 * so we'll default to turning it off.
339 static int ixgbe_enable_fdir = 0;
340 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
341 "Enable Flow Director");
343 /* Receive-Side Scaling */
344 static int ixgbe_enable_rss = 1;
345 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
346 "Enable Receive-Side Scaling (RSS)");
349 /* Keep running tab on them for sanity check */
350 static int ixgbe_total_ports;
353 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
356 * For Flow Director: this is the number of TX packets we sample
357 * for the filter pool, this means every 20th packet will be probed.
359 * This feature can be disabled by setting this to 0.
361 static int atr_sample_rate = 20;
363 extern struct if_txrx ixgbe_txrx;
365 static struct if_shared_ctx ixgbe_sctx_init = {
366 .isc_magic = IFLIB_MAGIC,
367 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
368 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
369 .isc_tx_maxsegsize = PAGE_SIZE,
370 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
371 .isc_tso_maxsegsize = PAGE_SIZE,
372 .isc_rx_maxsize = PAGE_SIZE*4,
373 .isc_rx_nsegments = 1,
374 .isc_rx_maxsegsize = PAGE_SIZE*4,
379 .isc_admin_intrcnt = 1,
380 .isc_vendor_info = ixgbe_vendor_info_array,
381 .isc_driver_version = ixgbe_driver_version,
382 .isc_driver = &ixgbe_if_driver,
383 .isc_flags = IFLIB_TSO_INIT_IP,
385 .isc_nrxd_min = {MIN_RXD},
386 .isc_ntxd_min = {MIN_TXD},
387 .isc_nrxd_max = {MAX_RXD},
388 .isc_ntxd_max = {MAX_TXD},
389 .isc_nrxd_default = {DEFAULT_RXD},
390 .isc_ntxd_default = {DEFAULT_TXD},
393 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
395 /************************************************************************
396 * ixgbe_if_tx_queues_alloc
397 ************************************************************************/
399 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
400 int ntxqs, int ntxqsets)
402 struct adapter *adapter = iflib_get_softc(ctx);
403 if_softc_ctx_t scctx = adapter->shared;
404 struct ix_tx_queue *que;
407 MPASS(adapter->num_tx_queues > 0);
408 MPASS(adapter->num_tx_queues == ntxqsets);
411 /* Allocate queue structure memory */
413 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
414 M_IXGBE, M_NOWAIT | M_ZERO);
415 if (!adapter->tx_queues) {
416 device_printf(iflib_get_dev(ctx),
417 "Unable to allocate TX ring memory\n");
421 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
422 struct tx_ring *txr = &que->txr;
424 /* In case SR-IOV is enabled, align the index properly */
425 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
428 txr->adapter = que->adapter = adapter;
430 /* Allocate report status array */
431 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
432 if (txr->tx_rsq == NULL) {
436 for (j = 0; j < scctx->isc_ntxd[0]; j++)
437 txr->tx_rsq[j] = QIDX_INVALID;
438 /* get the virtual and physical address of the hardware queues */
439 txr->tail = IXGBE_TDT(txr->me);
440 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
441 txr->tx_paddr = paddrs[i];
444 txr->total_packets = 0;
446 /* Set the rate at which we sample packets */
447 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
448 txr->atr_sample = atr_sample_rate;
452 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
453 adapter->num_tx_queues);
458 ixgbe_if_queues_free(ctx);
461 } /* ixgbe_if_tx_queues_alloc */
463 /************************************************************************
464 * ixgbe_if_rx_queues_alloc
465 ************************************************************************/
467 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
468 int nrxqs, int nrxqsets)
470 struct adapter *adapter = iflib_get_softc(ctx);
471 struct ix_rx_queue *que;
474 MPASS(adapter->num_rx_queues > 0);
475 MPASS(adapter->num_rx_queues == nrxqsets);
478 /* Allocate queue structure memory */
480 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
481 M_IXGBE, M_NOWAIT | M_ZERO);
482 if (!adapter->rx_queues) {
483 device_printf(iflib_get_dev(ctx),
484 "Unable to allocate TX ring memory\n");
488 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
489 struct rx_ring *rxr = &que->rxr;
491 /* In case SR-IOV is enabled, align the index properly */
492 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
495 rxr->adapter = que->adapter = adapter;
497 /* get the virtual and physical address of the hw queues */
498 rxr->tail = IXGBE_RDT(rxr->me);
499 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
500 rxr->rx_paddr = paddrs[i];
505 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
506 adapter->num_rx_queues);
509 } /* ixgbe_if_rx_queues_alloc */
511 /************************************************************************
512 * ixgbe_if_queues_free
513 ************************************************************************/
515 ixgbe_if_queues_free(if_ctx_t ctx)
517 struct adapter *adapter = iflib_get_softc(ctx);
518 struct ix_tx_queue *tx_que = adapter->tx_queues;
519 struct ix_rx_queue *rx_que = adapter->rx_queues;
522 if (tx_que != NULL) {
523 for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
524 struct tx_ring *txr = &tx_que->txr;
525 if (txr->tx_rsq == NULL)
528 free(txr->tx_rsq, M_IXGBE);
532 free(adapter->tx_queues, M_IXGBE);
533 adapter->tx_queues = NULL;
535 if (rx_que != NULL) {
536 free(adapter->rx_queues, M_IXGBE);
537 adapter->rx_queues = NULL;
539 } /* ixgbe_if_queues_free */
541 /************************************************************************
542 * ixgbe_initialize_rss_mapping
543 ************************************************************************/
545 ixgbe_initialize_rss_mapping(struct adapter *adapter)
547 struct ixgbe_hw *hw = &adapter->hw;
548 u32 reta = 0, mrqc, rss_key[10];
549 int queue_id, table_size, index_mult;
553 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
554 /* Fetch the configured RSS key */
555 rss_getkey((uint8_t *)&rss_key);
557 /* set up random bits */
558 arc4rand(&rss_key, sizeof(rss_key), 0);
561 /* Set multiplier for RETA setup and table size based on MAC */
564 switch (adapter->hw.mac.type) {
565 case ixgbe_mac_82598EB:
569 case ixgbe_mac_X550EM_x:
570 case ixgbe_mac_X550EM_a:
577 /* Set up the redirection table */
578 for (i = 0, j = 0; i < table_size; i++, j++) {
579 if (j == adapter->num_rx_queues)
582 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
584 * Fetch the RSS bucket id for the given indirection
585 * entry. Cap it at the number of configured buckets
586 * (which is num_rx_queues.)
588 queue_id = rss_get_indirection_to_bucket(i);
589 queue_id = queue_id % adapter->num_rx_queues;
591 queue_id = (j * index_mult);
594 * The low 8 bits are for hash value (n+0);
595 * The next 8 bits are for hash value (n+1), etc.
598 reta = reta | (((uint32_t)queue_id) << 24);
601 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
603 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
609 /* Now fill our hash function seeds */
610 for (i = 0; i < 10; i++)
611 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
613 /* Perform hash on these packet types */
614 if (adapter->feat_en & IXGBE_FEATURE_RSS)
615 rss_hash_config = rss_gethashconfig();
618 * Disable UDP - IP fragments aren't currently being handled
619 * and so we end up with a mix of 2-tuple and 4-tuple
622 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
623 | RSS_HASHTYPE_RSS_TCP_IPV4
624 | RSS_HASHTYPE_RSS_IPV6
625 | RSS_HASHTYPE_RSS_TCP_IPV6
626 | RSS_HASHTYPE_RSS_IPV6_EX
627 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
630 mrqc = IXGBE_MRQC_RSSEN;
631 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
632 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
633 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
634 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
635 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
636 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
637 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
638 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
639 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
640 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
641 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
642 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
643 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
645 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
647 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
649 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
650 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
651 } /* ixgbe_initialize_rss_mapping */
653 /************************************************************************
654 * ixgbe_initialize_receive_units - Setup receive registers and features.
655 ************************************************************************/
656 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
659 ixgbe_initialize_receive_units(if_ctx_t ctx)
661 struct adapter *adapter = iflib_get_softc(ctx);
662 if_softc_ctx_t scctx = adapter->shared;
663 struct ixgbe_hw *hw = &adapter->hw;
664 struct ifnet *ifp = iflib_get_ifp(ctx);
665 struct ix_rx_queue *que;
667 u32 bufsz, fctrl, srrctl, rxcsum;
671 * Make sure receives are disabled while
672 * setting up the descriptor ring
674 ixgbe_disable_rx(hw);
676 /* Enable broadcasts */
677 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
678 fctrl |= IXGBE_FCTRL_BAM;
679 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
680 fctrl |= IXGBE_FCTRL_DPF;
681 fctrl |= IXGBE_FCTRL_PMCF;
683 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
685 /* Set for Jumbo Frames? */
686 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
687 if (ifp->if_mtu > ETHERMTU)
688 hlreg |= IXGBE_HLREG0_JUMBOEN;
690 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
691 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
693 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
694 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
696 /* Setup the Base and Length of the Rx Descriptor Ring */
697 for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
698 struct rx_ring *rxr = &que->rxr;
699 u64 rdba = rxr->rx_paddr;
703 /* Setup the Base and Length of the Rx Descriptor Ring */
704 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
705 (rdba & 0x00000000ffffffffULL));
706 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
707 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
708 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
710 /* Set up the SRRCTL register */
711 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
712 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
713 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
715 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
718 * Set DROP_EN iff we have no flow control and >1 queue.
719 * Note that srrctl was cleared shortly before during reset,
720 * so we do not need to clear the bit, but do it just in case
721 * this code is moved elsewhere.
723 if (adapter->num_rx_queues > 1 &&
724 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
725 srrctl |= IXGBE_SRRCTL_DROP_EN;
727 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
730 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
732 /* Setup the HW Rx Head and Tail Descriptor Pointers */
733 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
734 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
736 /* Set the driver rx tail address */
737 rxr->tail = IXGBE_RDT(rxr->me);
740 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
741 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
742 | IXGBE_PSRTYPE_UDPHDR
743 | IXGBE_PSRTYPE_IPV4HDR
744 | IXGBE_PSRTYPE_IPV6HDR;
745 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
748 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
750 ixgbe_initialize_rss_mapping(adapter);
752 if (adapter->num_rx_queues > 1) {
753 /* RSS and RX IPP Checksum are mutually exclusive */
754 rxcsum |= IXGBE_RXCSUM_PCSD;
757 if (ifp->if_capenable & IFCAP_RXCSUM)
758 rxcsum |= IXGBE_RXCSUM_PCSD;
760 /* This is useful for calculating UDP/IP fragment checksums */
761 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
762 rxcsum |= IXGBE_RXCSUM_IPPCSE;
764 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
766 } /* ixgbe_initialize_receive_units */
768 /************************************************************************
769 * ixgbe_initialize_transmit_units - Enable transmit units.
770 ************************************************************************/
772 ixgbe_initialize_transmit_units(if_ctx_t ctx)
774 struct adapter *adapter = iflib_get_softc(ctx);
775 struct ixgbe_hw *hw = &adapter->hw;
776 if_softc_ctx_t scctx = adapter->shared;
777 struct ix_tx_queue *que;
780 /* Setup the Base and Length of the Tx Descriptor Ring */
781 for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
783 struct tx_ring *txr = &que->txr;
784 u64 tdba = txr->tx_paddr;
788 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
789 (tdba & 0x00000000ffffffffULL));
790 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
791 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
792 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
794 /* Setup the HW Tx Head and Tail descriptor pointers */
795 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
796 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
798 /* Cache the tail address */
799 txr->tail = IXGBE_TDT(txr->me);
801 txr->tx_rs_cidx = txr->tx_rs_pidx;
802 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
803 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
804 txr->tx_rsq[k] = QIDX_INVALID;
806 /* Disable Head Writeback */
808 * Note: for X550 series devices, these registers are actually
809 * prefixed with TPH_ isntead of DCA_, but the addresses and
810 * fields remain the same.
812 switch (hw->mac.type) {
813 case ixgbe_mac_82598EB:
814 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
817 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
820 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
821 switch (hw->mac.type) {
822 case ixgbe_mac_82598EB:
823 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
826 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
832 if (hw->mac.type != ixgbe_mac_82598EB) {
833 u32 dmatxctl, rttdcs;
835 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
836 dmatxctl |= IXGBE_DMATXCTL_TE;
837 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
838 /* Disable arbiter to set MTQC */
839 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
840 rttdcs |= IXGBE_RTTDCS_ARBDIS;
841 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
842 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
843 ixgbe_get_mtqc(adapter->iov_mode));
844 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
845 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
848 } /* ixgbe_initialize_transmit_units */
850 /************************************************************************
852 ************************************************************************/
854 ixgbe_register(device_t dev)
857 } /* ixgbe_register */
859 /************************************************************************
860 * ixgbe_if_attach_pre - Device initialization routine, part 1
862 * Called when the driver is being loaded.
863 * Identifies the type of hardware, initializes the hardware,
864 * and initializes iflib structures.
866 * return 0 on success, positive on failure
867 ************************************************************************/
869 ixgbe_if_attach_pre(if_ctx_t ctx)
871 struct adapter *adapter;
873 if_softc_ctx_t scctx;
878 INIT_DEBUGOUT("ixgbe_attach: begin");
880 /* Allocate, clear, and link in our adapter structure */
881 dev = iflib_get_dev(ctx);
882 adapter = iflib_get_softc(ctx);
883 adapter->hw.back = adapter;
886 scctx = adapter->shared = iflib_get_softc_ctx(ctx);
887 adapter->media = iflib_get_media(ctx);
890 /* Determine hardware revision */
891 hw->vendor_id = pci_get_vendor(dev);
892 hw->device_id = pci_get_device(dev);
893 hw->revision_id = pci_get_revid(dev);
894 hw->subsystem_vendor_id = pci_get_subvendor(dev);
895 hw->subsystem_device_id = pci_get_subdevice(dev);
897 /* Do base PCI setup - map BAR0 */
898 if (ixgbe_allocate_pci_resources(ctx)) {
899 device_printf(dev, "Allocation of PCI resources failed\n");
903 /* let hardware know driver is loaded */
904 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
905 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
906 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
909 * Initialize the shared code
911 if (ixgbe_init_shared_code(hw) != 0) {
912 device_printf(dev, "Unable to initialize the shared code\n");
917 if (hw->mbx.ops.init_params)
918 hw->mbx.ops.init_params(hw);
920 hw->allow_unsupported_sfp = allow_unsupported_sfp;
922 if (hw->mac.type != ixgbe_mac_82598EB)
923 hw->phy.smart_speed = ixgbe_smart_speed;
925 ixgbe_init_device_features(adapter);
927 /* Enable WoL (if supported) */
928 ixgbe_check_wol_support(adapter);
930 /* Verify adapter fan is still functional (if applicable) */
931 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
932 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
933 ixgbe_check_fan_failure(adapter, esdp, FALSE);
936 /* Ensure SW/FW semaphore is free */
937 ixgbe_init_swfw_semaphore(hw);
939 /* Set an initial default flow control value */
940 hw->fc.requested_mode = ixgbe_flow_control;
942 hw->phy.reset_if_overtemp = TRUE;
943 error = ixgbe_reset_hw(hw);
944 hw->phy.reset_if_overtemp = FALSE;
945 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
947 * No optics in this port, set up
948 * so the timer routine will probe
949 * for later insertion.
951 adapter->sfp_probe = TRUE;
953 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
954 device_printf(dev, "Unsupported SFP+ module detected!\n");
958 device_printf(dev, "Hardware initialization failed\n");
963 /* Make sure we have a good EEPROM before we read from it */
964 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
965 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
970 error = ixgbe_start_hw(hw);
972 case IXGBE_ERR_EEPROM_VERSION:
973 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
975 case IXGBE_ERR_SFP_NOT_SUPPORTED:
976 device_printf(dev, "Unsupported SFP+ Module\n");
979 case IXGBE_ERR_SFP_NOT_PRESENT:
980 device_printf(dev, "No SFP+ Module found\n");
986 /* Most of the iflib initialization... */
988 iflib_set_mac(ctx, hw->mac.addr);
989 switch (adapter->hw.mac.type) {
991 case ixgbe_mac_X550EM_x:
992 case ixgbe_mac_X550EM_a:
993 scctx->isc_rss_table_size = 512;
994 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
997 scctx->isc_rss_table_size = 128;
998 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1001 /* Allow legacy interrupts */
1002 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1004 scctx->isc_txqsizes[0] =
1005 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1006 sizeof(u32), DBA_ALIGN),
1007 scctx->isc_rxqsizes[0] =
1008 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1012 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1013 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1014 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1015 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1017 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1018 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1021 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1023 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1024 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1025 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1027 scctx->isc_txrx = &ixgbe_txrx;
1029 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1034 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1035 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1036 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1037 ixgbe_free_pci_resources(ctx);
1040 } /* ixgbe_if_attach_pre */
1042 /*********************************************************************
1043 * ixgbe_if_attach_post - Device initialization routine, part 2
1045 * Called during driver load, but after interrupts and
1046 * resources have been allocated and configured.
1047 * Sets up some data structures not relevant to iflib.
1049 * return 0 on success, positive on failure
1050 *********************************************************************/
1052 ixgbe_if_attach_post(if_ctx_t ctx)
1055 struct adapter *adapter;
1056 struct ixgbe_hw *hw;
1059 dev = iflib_get_dev(ctx);
1060 adapter = iflib_get_softc(ctx);
1064 if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1065 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1066 device_printf(dev, "Device does not support legacy interrupts");
1071 /* Allocate multicast array memory. */
1072 adapter->mta = malloc(sizeof(*adapter->mta) *
1073 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1074 if (adapter->mta == NULL) {
1075 device_printf(dev, "Can not allocate multicast setup array\n");
1080 /* hw.ix defaults init */
1081 ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1083 /* Enable the optics for 82599 SFP+ fiber */
1084 ixgbe_enable_tx_laser(hw);
1086 /* Enable power to the phy. */
1087 ixgbe_set_phy_power(hw, TRUE);
1089 ixgbe_initialize_iov(adapter);
1091 error = ixgbe_setup_interface(ctx);
1093 device_printf(dev, "Interface setup failed: %d\n", error);
1097 ixgbe_if_update_admin_status(ctx);
1099 /* Initialize statistics */
1100 ixgbe_update_stats_counters(adapter);
1101 ixgbe_add_hw_stats(adapter);
1103 /* Check PCIE slot type/speed/width */
1104 ixgbe_get_slot_info(adapter);
1107 * Do time init and sysctl init here, but
1108 * only on the first port of a bypass adapter.
1110 ixgbe_bypass_init(adapter);
1112 /* Set an initial dmac value */
1114 /* Set initial advertised speeds (if applicable) */
1115 adapter->advertise = ixgbe_get_advertise(adapter);
1117 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1118 ixgbe_define_iov_schemas(dev, &error);
1121 ixgbe_add_device_sysctls(ctx);
1126 } /* ixgbe_if_attach_post */
1128 /************************************************************************
1129 * ixgbe_check_wol_support
1131 * Checks whether the adapter's ports are capable of
1132 * Wake On LAN by reading the adapter's NVM.
1134 * Sets each port's hw->wol_enabled value depending
1135 * on the value read here.
1136 ************************************************************************/
1138 ixgbe_check_wol_support(struct adapter *adapter)
1140 struct ixgbe_hw *hw = &adapter->hw;
1143 /* Find out WoL support for port */
1144 adapter->wol_support = hw->wol_enabled = 0;
1145 ixgbe_get_device_caps(hw, &dev_caps);
1146 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1147 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1149 adapter->wol_support = hw->wol_enabled = 1;
1151 /* Save initial wake up filter configuration */
1152 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1155 } /* ixgbe_check_wol_support */
1157 /************************************************************************
1158 * ixgbe_setup_interface
1160 * Setup networking device structure and register an interface.
1161 ************************************************************************/
1163 ixgbe_setup_interface(if_ctx_t ctx)
1165 struct ifnet *ifp = iflib_get_ifp(ctx);
1166 struct adapter *adapter = iflib_get_softc(ctx);
1168 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1170 if_setbaudrate(ifp, IF_Gbps(10));
1172 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1174 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1176 ixgbe_add_media_types(ctx);
1178 /* Autoselect media by default */
1179 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1182 } /* ixgbe_setup_interface */
1184 /************************************************************************
1185 * ixgbe_if_get_counter
1186 ************************************************************************/
1188 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1190 struct adapter *adapter = iflib_get_softc(ctx);
1191 if_t ifp = iflib_get_ifp(ctx);
1194 case IFCOUNTER_IPACKETS:
1195 return (adapter->ipackets);
1196 case IFCOUNTER_OPACKETS:
1197 return (adapter->opackets);
1198 case IFCOUNTER_IBYTES:
1199 return (adapter->ibytes);
1200 case IFCOUNTER_OBYTES:
1201 return (adapter->obytes);
1202 case IFCOUNTER_IMCASTS:
1203 return (adapter->imcasts);
1204 case IFCOUNTER_OMCASTS:
1205 return (adapter->omcasts);
1206 case IFCOUNTER_COLLISIONS:
1208 case IFCOUNTER_IQDROPS:
1209 return (adapter->iqdrops);
1210 case IFCOUNTER_OQDROPS:
1212 case IFCOUNTER_IERRORS:
1213 return (adapter->ierrors);
1215 return (if_get_counter_default(ifp, cnt));
1217 } /* ixgbe_if_get_counter */
1219 /************************************************************************
1221 ************************************************************************/
1223 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1225 struct adapter *adapter = iflib_get_softc(ctx);
1226 struct ixgbe_hw *hw = &adapter->hw;
1230 if (hw->phy.ops.read_i2c_byte == NULL)
1232 for (i = 0; i < req->len; i++)
1233 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1234 req->dev_addr, &req->data[i]);
1236 } /* ixgbe_if_i2c_req */
1238 /************************************************************************
1239 * ixgbe_add_media_types
1240 ************************************************************************/
1242 ixgbe_add_media_types(if_ctx_t ctx)
1244 struct adapter *adapter = iflib_get_softc(ctx);
1245 struct ixgbe_hw *hw = &adapter->hw;
1246 device_t dev = iflib_get_dev(ctx);
1249 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1251 /* Media types with matching FreeBSD media defines */
1252 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1253 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1254 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1255 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1256 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1257 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1258 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1259 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1261 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1262 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1263 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1266 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1267 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1268 if (hw->phy.multispeed_fiber)
1269 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1272 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1273 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1274 if (hw->phy.multispeed_fiber)
1275 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1277 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1278 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1279 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1280 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1282 #ifdef IFM_ETH_XTYPE
1283 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1284 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1285 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1286 ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1287 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1288 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1289 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1290 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1292 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1293 device_printf(dev, "Media supported: 10GbaseKR\n");
1294 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1295 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1297 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1298 device_printf(dev, "Media supported: 10GbaseKX4\n");
1299 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1300 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1302 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1303 device_printf(dev, "Media supported: 1000baseKX\n");
1304 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1305 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1307 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1308 device_printf(dev, "Media supported: 2500baseKX\n");
1309 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1310 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1313 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1314 device_printf(dev, "Media supported: 1000baseBX\n");
1316 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1317 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1319 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1322 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1323 } /* ixgbe_add_media_types */
1325 /************************************************************************
1327 ************************************************************************/
1329 ixgbe_is_sfp(struct ixgbe_hw *hw)
1331 switch (hw->mac.type) {
1332 case ixgbe_mac_82598EB:
1333 if (hw->phy.type == ixgbe_phy_nl)
1336 case ixgbe_mac_82599EB:
1337 switch (hw->mac.ops.get_media_type(hw)) {
1338 case ixgbe_media_type_fiber:
1339 case ixgbe_media_type_fiber_qsfp:
1344 case ixgbe_mac_X550EM_x:
1345 case ixgbe_mac_X550EM_a:
1346 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1352 } /* ixgbe_is_sfp */
1354 /************************************************************************
1356 ************************************************************************/
1358 ixgbe_config_link(if_ctx_t ctx)
1360 struct adapter *adapter = iflib_get_softc(ctx);
1361 struct ixgbe_hw *hw = &adapter->hw;
1362 u32 autoneg, err = 0;
1363 bool sfp, negotiate;
1365 sfp = ixgbe_is_sfp(hw);
1368 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1369 iflib_admin_intr_deferred(ctx);
1371 if (hw->mac.ops.check_link)
1372 err = ixgbe_check_link(hw, &adapter->link_speed,
1373 &adapter->link_up, FALSE);
1376 autoneg = hw->phy.autoneg_advertised;
1377 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1378 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1382 if (hw->mac.ops.setup_link)
1383 err = hw->mac.ops.setup_link(hw, autoneg,
1386 } /* ixgbe_config_link */
1388 /************************************************************************
1389 * ixgbe_update_stats_counters - Update board statistics counters.
1390 ************************************************************************/
1392 ixgbe_update_stats_counters(struct adapter *adapter)
1394 struct ixgbe_hw *hw = &adapter->hw;
1395 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1396 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1398 u64 total_missed_rx = 0;
1400 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1401 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1402 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1403 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1404 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1406 for (int i = 0; i < 16; i++) {
1407 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1408 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1409 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1411 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1412 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1413 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1415 /* Hardware workaround, gprc counts missed packets */
1416 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1417 stats->gprc -= missed_rx;
1419 if (hw->mac.type != ixgbe_mac_82598EB) {
1420 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1421 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1422 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1423 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1424 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1425 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1426 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1427 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1428 stats->lxoffrxc += lxoffrxc;
1430 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1431 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1432 stats->lxoffrxc += lxoffrxc;
1433 /* 82598 only has a counter in the high register */
1434 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1435 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1436 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1440 * For watchdog management we need to know if we have been paused
1441 * during the last interval, so capture that here.
1444 adapter->shared->isc_pause_frames = 1;
1447 * Workaround: mprc hardware is incorrectly counting
1448 * broadcasts, so for now we subtract those.
1450 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1451 stats->bprc += bprc;
1452 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1453 if (hw->mac.type == ixgbe_mac_82598EB)
1454 stats->mprc -= bprc;
1456 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1457 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1458 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1459 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1460 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1461 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1463 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1464 stats->lxontxc += lxon;
1465 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1466 stats->lxofftxc += lxoff;
1467 total = lxon + lxoff;
1469 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1470 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1471 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1472 stats->gptc -= total;
1473 stats->mptc -= total;
1474 stats->ptc64 -= total;
1475 stats->gotc -= total * ETHER_MIN_LEN;
1477 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1478 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1479 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1480 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1481 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1482 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1483 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1484 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1485 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1486 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1487 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1488 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1489 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1490 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1491 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1492 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1493 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1494 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1495 /* Only read FCOE on 82599 */
1496 if (hw->mac.type != ixgbe_mac_82598EB) {
1497 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1498 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1499 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1500 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1501 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1504 /* Fill out the OS statistics structure */
1505 IXGBE_SET_IPACKETS(adapter, stats->gprc);
1506 IXGBE_SET_OPACKETS(adapter, stats->gptc);
1507 IXGBE_SET_IBYTES(adapter, stats->gorc);
1508 IXGBE_SET_OBYTES(adapter, stats->gotc);
1509 IXGBE_SET_IMCASTS(adapter, stats->mprc);
1510 IXGBE_SET_OMCASTS(adapter, stats->mptc);
1511 IXGBE_SET_COLLISIONS(adapter, 0);
1512 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1513 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1514 } /* ixgbe_update_stats_counters */
1516 /************************************************************************
1517 * ixgbe_add_hw_stats
1519 * Add sysctl variables, one per statistic, to the system.
1520 ************************************************************************/
1522 ixgbe_add_hw_stats(struct adapter *adapter)
1524 device_t dev = iflib_get_dev(adapter->ctx);
1525 struct ix_rx_queue *rx_que;
1526 struct ix_tx_queue *tx_que;
1527 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1528 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1529 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1530 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1531 struct sysctl_oid *stat_node, *queue_node;
1532 struct sysctl_oid_list *stat_list, *queue_list;
1535 #define QUEUE_NAME_LEN 32
1536 char namebuf[QUEUE_NAME_LEN];
1538 /* Driver Statistics */
1539 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1540 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1541 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1542 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1543 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1544 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1546 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1547 struct tx_ring *txr = &tx_que->txr;
1548 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1549 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1550 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1551 queue_list = SYSCTL_CHILDREN(queue_node);
1553 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1554 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1555 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1556 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1557 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1558 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1559 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1560 CTLFLAG_RD, &txr->tso_tx, "TSO");
1561 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1562 CTLFLAG_RD, &txr->total_packets,
1563 "Queue Packets Transmitted");
1566 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1567 struct rx_ring *rxr = &rx_que->rxr;
1568 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1569 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1570 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1571 queue_list = SYSCTL_CHILDREN(queue_node);
1573 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1574 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1575 &adapter->rx_queues[i], 0,
1576 ixgbe_sysctl_interrupt_rate_handler, "IU",
1578 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1579 CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1580 "irqs on this queue");
1581 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1582 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1583 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1584 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1585 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1586 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1587 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1588 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1589 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1590 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1591 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1592 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1593 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1594 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1597 /* MAC stats get their own sub node */
1599 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1600 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1601 stat_list = SYSCTL_CHILDREN(stat_node);
1603 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1604 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1605 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1606 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1607 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1608 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1609 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1610 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1611 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1612 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1613 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1614 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1615 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1616 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1617 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1618 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1620 /* Flow Control stats */
1621 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1622 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1623 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1624 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1625 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1626 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1627 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1628 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1630 /* Packet Reception Stats */
1631 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1632 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1633 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1634 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1635 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1636 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1637 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1638 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1639 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1640 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1641 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1642 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1643 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1644 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1645 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1646 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1647 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1648 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1649 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1650 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1651 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1652 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1653 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1654 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1655 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1656 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1657 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1658 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1659 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1660 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1661 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1662 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1663 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1664 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1665 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1666 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1667 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1668 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1670 /* Packet Transmission Stats */
1671 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1672 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1673 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1674 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1675 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1676 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1677 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1678 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1679 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1680 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1681 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1682 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1683 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1684 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1685 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1686 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1687 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1688 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1689 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1690 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1691 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1692 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1693 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1694 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1695 } /* ixgbe_add_hw_stats */
1697 /************************************************************************
1698 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1700 * Retrieves the TDH value from the hardware
1701 ************************************************************************/
1703 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1705 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1712 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1713 error = sysctl_handle_int(oidp, &val, 0, req);
1714 if (error || !req->newptr)
1718 } /* ixgbe_sysctl_tdh_handler */
1720 /************************************************************************
1721 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1723 * Retrieves the TDT value from the hardware
1724 ************************************************************************/
1726 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1728 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1735 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1736 error = sysctl_handle_int(oidp, &val, 0, req);
1737 if (error || !req->newptr)
1741 } /* ixgbe_sysctl_tdt_handler */
1743 /************************************************************************
1744 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1746 * Retrieves the RDH value from the hardware
1747 ************************************************************************/
1749 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1751 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1758 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1759 error = sysctl_handle_int(oidp, &val, 0, req);
1760 if (error || !req->newptr)
1764 } /* ixgbe_sysctl_rdh_handler */
1766 /************************************************************************
1767 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1769 * Retrieves the RDT value from the hardware
1770 ************************************************************************/
1772 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1774 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1781 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1782 error = sysctl_handle_int(oidp, &val, 0, req);
1783 if (error || !req->newptr)
1787 } /* ixgbe_sysctl_rdt_handler */
1789 /************************************************************************
1790 * ixgbe_if_vlan_register
1792 * Run via vlan config EVENT, it enables us to use the
1793 * HW Filter table since we can get the vlan id. This
1794 * just creates the entry in the soft version of the
1795 * VFTA, init will repopulate the real table.
1796 ************************************************************************/
1798 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1800 struct adapter *adapter = iflib_get_softc(ctx);
1803 index = (vtag >> 5) & 0x7F;
1805 adapter->shadow_vfta[index] |= (1 << bit);
1806 ++adapter->num_vlans;
1807 ixgbe_setup_vlan_hw_support(ctx);
1808 } /* ixgbe_if_vlan_register */
1810 /************************************************************************
1811 * ixgbe_if_vlan_unregister
1813 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1814 ************************************************************************/
1816 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1818 struct adapter *adapter = iflib_get_softc(ctx);
1821 index = (vtag >> 5) & 0x7F;
1823 adapter->shadow_vfta[index] &= ~(1 << bit);
1824 --adapter->num_vlans;
1825 /* Re-init to load the changes */
1826 ixgbe_setup_vlan_hw_support(ctx);
1827 } /* ixgbe_if_vlan_unregister */
1829 /************************************************************************
1830 * ixgbe_setup_vlan_hw_support
1831 ************************************************************************/
1833 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1835 struct ifnet *ifp = iflib_get_ifp(ctx);
1836 struct adapter *adapter = iflib_get_softc(ctx);
1837 struct ixgbe_hw *hw = &adapter->hw;
1838 struct rx_ring *rxr;
1844 * We get here thru init_locked, meaning
1845 * a soft reset, this has already cleared
1846 * the VFTA and other state, so if there
1847 * have been no vlan's registered do nothing.
1849 if (adapter->num_vlans == 0)
1852 /* Setup the queues for vlans */
1853 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1854 for (i = 0; i < adapter->num_rx_queues; i++) {
1855 rxr = &adapter->rx_queues[i].rxr;
1856 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1857 if (hw->mac.type != ixgbe_mac_82598EB) {
1858 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1859 ctrl |= IXGBE_RXDCTL_VME;
1860 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1862 rxr->vtag_strip = TRUE;
1866 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1869 * A soft reset zero's out the VFTA, so
1870 * we need to repopulate it now.
1872 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1873 if (adapter->shadow_vfta[i] != 0)
1874 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1875 adapter->shadow_vfta[i]);
1877 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1878 /* Enable the Filter Table if enabled */
1879 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1880 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1881 ctrl |= IXGBE_VLNCTRL_VFE;
1883 if (hw->mac.type == ixgbe_mac_82598EB)
1884 ctrl |= IXGBE_VLNCTRL_VME;
1885 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1886 } /* ixgbe_setup_vlan_hw_support */
1888 /************************************************************************
1889 * ixgbe_get_slot_info
1891 * Get the width and transaction speed of
1892 * the slot this adapter is plugged into.
1893 ************************************************************************/
1895 ixgbe_get_slot_info(struct adapter *adapter)
1897 device_t dev = iflib_get_dev(adapter->ctx);
1898 struct ixgbe_hw *hw = &adapter->hw;
1899 int bus_info_valid = TRUE;
1903 /* Some devices are behind an internal bridge */
1904 switch (hw->device_id) {
1905 case IXGBE_DEV_ID_82599_SFP_SF_QP:
1906 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1907 goto get_parent_info;
1912 ixgbe_get_bus_info(hw);
1915 * Some devices don't use PCI-E, but there is no need
1916 * to display "Unknown" for bus speed and width.
1918 switch (hw->mac.type) {
1919 case ixgbe_mac_X550EM_x:
1920 case ixgbe_mac_X550EM_a:
1928 * For the Quad port adapter we need to parse back
1929 * up the PCI tree to find the speed of the expansion
1930 * slot into which this adapter is plugged. A bit more work.
1932 dev = device_get_parent(device_get_parent(dev));
1934 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1935 pci_get_slot(dev), pci_get_function(dev));
1937 dev = device_get_parent(device_get_parent(dev));
1939 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1940 pci_get_slot(dev), pci_get_function(dev));
1942 /* Now get the PCI Express Capabilities offset */
1943 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1945 * Hmm...can't get PCI-Express capabilities.
1946 * Falling back to default method.
1948 bus_info_valid = FALSE;
1949 ixgbe_get_bus_info(hw);
1952 /* ...and read the Link Status Register */
1953 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1954 ixgbe_set_pci_config_data_generic(hw, link);
1957 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1958 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
1959 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
1960 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
1962 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1963 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1964 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1967 if (bus_info_valid) {
1968 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1969 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1970 (hw->bus.speed == ixgbe_bus_speed_2500))) {
1971 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1972 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1974 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1975 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1976 (hw->bus.speed < ixgbe_bus_speed_8000))) {
1977 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1978 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1981 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1984 } /* ixgbe_get_slot_info */
1986 /************************************************************************
1987 * ixgbe_if_msix_intr_assign
1989 * Setup MSI-X Interrupt resources and handlers
1990 ************************************************************************/
1992 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1994 struct adapter *adapter = iflib_get_softc(ctx);
1995 struct ix_rx_queue *rx_que = adapter->rx_queues;
1996 struct ix_tx_queue *tx_que;
1997 int error, rid, vector = 0;
2001 /* Admin Que is vector 0*/
2003 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2006 snprintf(buf, sizeof(buf), "rxq%d", i);
2007 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2008 IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2011 device_printf(iflib_get_dev(ctx),
2012 "Failed to allocate que int %d err: %d", i, error);
2013 adapter->num_rx_queues = i + 1;
2017 rx_que->msix = vector;
2018 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2020 * The queue ID is used as the RSS layer bucket ID.
2021 * We look up the queue ID -> RSS CPU ID and select
2024 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2027 * Bind the MSI-X vector, and thus the
2028 * rings to the corresponding cpu.
2030 * This just happens to match the default RSS
2031 * round-robin bucket -> queue -> CPU allocation.
2033 if (adapter->num_rx_queues > 1)
2038 for (int i = 0; i < adapter->num_tx_queues; i++) {
2039 snprintf(buf, sizeof(buf), "txq%d", i);
2040 tx_que = &adapter->tx_queues[i];
2041 tx_que->msix = i % adapter->num_rx_queues;
2042 iflib_softirq_alloc_generic(ctx,
2043 &adapter->rx_queues[tx_que->msix].que_irq,
2044 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2047 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2048 IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2050 device_printf(iflib_get_dev(ctx),
2051 "Failed to register admin handler");
2055 adapter->vector = vector;
2059 iflib_irq_free(ctx, &adapter->irq);
2060 rx_que = adapter->rx_queues;
2061 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2062 iflib_irq_free(ctx, &rx_que->que_irq);
2065 } /* ixgbe_if_msix_intr_assign */
2067 /*********************************************************************
2068 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2069 **********************************************************************/
2071 ixgbe_msix_que(void *arg)
2073 struct ix_rx_queue *que = arg;
2074 struct adapter *adapter = que->adapter;
2075 struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx);
2077 /* Protect against spurious interrupts */
2078 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2079 return (FILTER_HANDLED);
2081 ixgbe_disable_queue(adapter, que->msix);
2084 return (FILTER_SCHEDULE_THREAD);
2085 } /* ixgbe_msix_que */
2087 /************************************************************************
2088 * ixgbe_media_status - Media Ioctl callback
2090 * Called whenever the user queries the status of
2091 * the interface using ifconfig.
2092 ************************************************************************/
2094 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2096 struct adapter *adapter = iflib_get_softc(ctx);
2097 struct ixgbe_hw *hw = &adapter->hw;
2100 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2102 ifmr->ifm_status = IFM_AVALID;
2103 ifmr->ifm_active = IFM_ETHER;
2105 if (!adapter->link_active)
2108 ifmr->ifm_status |= IFM_ACTIVE;
2109 layer = adapter->phy_layer;
2111 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2112 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2113 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2114 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2115 switch (adapter->link_speed) {
2116 case IXGBE_LINK_SPEED_10GB_FULL:
2117 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2119 case IXGBE_LINK_SPEED_1GB_FULL:
2120 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2122 case IXGBE_LINK_SPEED_100_FULL:
2123 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2125 case IXGBE_LINK_SPEED_10_FULL:
2126 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2129 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2130 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2131 switch (adapter->link_speed) {
2132 case IXGBE_LINK_SPEED_10GB_FULL:
2133 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2136 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2137 switch (adapter->link_speed) {
2138 case IXGBE_LINK_SPEED_10GB_FULL:
2139 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2141 case IXGBE_LINK_SPEED_1GB_FULL:
2142 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2145 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2146 switch (adapter->link_speed) {
2147 case IXGBE_LINK_SPEED_10GB_FULL:
2148 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2150 case IXGBE_LINK_SPEED_1GB_FULL:
2151 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2154 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2155 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2156 switch (adapter->link_speed) {
2157 case IXGBE_LINK_SPEED_10GB_FULL:
2158 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2160 case IXGBE_LINK_SPEED_1GB_FULL:
2161 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2164 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2165 switch (adapter->link_speed) {
2166 case IXGBE_LINK_SPEED_10GB_FULL:
2167 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2171 * XXX: These need to use the proper media types once
2174 #ifndef IFM_ETH_XTYPE
2175 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2176 switch (adapter->link_speed) {
2177 case IXGBE_LINK_SPEED_10GB_FULL:
2178 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2180 case IXGBE_LINK_SPEED_2_5GB_FULL:
2181 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2183 case IXGBE_LINK_SPEED_1GB_FULL:
2184 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2187 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2188 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2189 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2190 switch (adapter->link_speed) {
2191 case IXGBE_LINK_SPEED_10GB_FULL:
2192 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2194 case IXGBE_LINK_SPEED_2_5GB_FULL:
2195 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2197 case IXGBE_LINK_SPEED_1GB_FULL:
2198 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2202 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2203 switch (adapter->link_speed) {
2204 case IXGBE_LINK_SPEED_10GB_FULL:
2205 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2207 case IXGBE_LINK_SPEED_2_5GB_FULL:
2208 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2210 case IXGBE_LINK_SPEED_1GB_FULL:
2211 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2214 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2215 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2216 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2217 switch (adapter->link_speed) {
2218 case IXGBE_LINK_SPEED_10GB_FULL:
2219 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2221 case IXGBE_LINK_SPEED_2_5GB_FULL:
2222 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2224 case IXGBE_LINK_SPEED_1GB_FULL:
2225 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2230 /* If nothing is recognized... */
2231 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2232 ifmr->ifm_active |= IFM_UNKNOWN;
2234 /* Display current flow control setting used on link */
2235 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2236 hw->fc.current_mode == ixgbe_fc_full)
2237 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2238 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2239 hw->fc.current_mode == ixgbe_fc_full)
2240 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2241 } /* ixgbe_media_status */
2243 /************************************************************************
2244 * ixgbe_media_change - Media Ioctl callback
2246 * Called when the user changes speed/duplex using
2247 * media/mediopt option with ifconfig.
2248 ************************************************************************/
2250 ixgbe_if_media_change(if_ctx_t ctx)
2252 struct adapter *adapter = iflib_get_softc(ctx);
2253 struct ifmedia *ifm = iflib_get_media(ctx);
2254 struct ixgbe_hw *hw = &adapter->hw;
2255 ixgbe_link_speed speed = 0;
2257 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2259 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2262 if (hw->phy.media_type == ixgbe_media_type_backplane)
2266 * We don't actually need to check against the supported
2267 * media types of the adapter; ifmedia will take care of
2270 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2273 speed |= IXGBE_LINK_SPEED_100_FULL;
2274 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2275 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2279 #ifndef IFM_ETH_XTYPE
2280 case IFM_10G_SR: /* KR, too */
2281 case IFM_10G_CX4: /* KX4 */
2286 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2287 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2289 #ifndef IFM_ETH_XTYPE
2290 case IFM_1000_CX: /* KX */
2296 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2299 speed |= IXGBE_LINK_SPEED_100_FULL;
2300 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2302 case IFM_10G_TWINAX:
2303 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2306 speed |= IXGBE_LINK_SPEED_100_FULL;
2309 speed |= IXGBE_LINK_SPEED_10_FULL;
2315 hw->mac.autotry_restart = TRUE;
2316 hw->mac.ops.setup_link(hw, speed, TRUE);
2317 adapter->advertise =
2318 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2319 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
2320 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
2321 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
2326 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2329 } /* ixgbe_if_media_change */
2331 /************************************************************************
2333 ************************************************************************/
2335 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2337 struct adapter *adapter = iflib_get_softc(ctx);
2338 struct ifnet *ifp = iflib_get_ifp(ctx);
2342 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2343 rctl &= (~IXGBE_FCTRL_UPE);
2344 if (ifp->if_flags & IFF_ALLMULTI)
2345 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2347 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2349 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2350 rctl &= (~IXGBE_FCTRL_MPE);
2351 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2353 if (ifp->if_flags & IFF_PROMISC) {
2354 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2355 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2356 } else if (ifp->if_flags & IFF_ALLMULTI) {
2357 rctl |= IXGBE_FCTRL_MPE;
2358 rctl &= ~IXGBE_FCTRL_UPE;
2359 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2362 } /* ixgbe_if_promisc_set */
2364 /************************************************************************
2365 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2366 ************************************************************************/
2368 ixgbe_msix_link(void *arg)
2370 struct adapter *adapter = arg;
2371 struct ixgbe_hw *hw = &adapter->hw;
2372 u32 eicr, eicr_mask;
2375 ++adapter->link_irq;
2377 /* Pause other interrupts */
2378 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2380 /* First get the cause */
2381 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2382 /* Be sure the queue bits are not cleared */
2383 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2384 /* Clear interrupt with write */
2385 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2387 /* Link status change */
2388 if (eicr & IXGBE_EICR_LSC) {
2389 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2390 adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2393 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2394 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2395 (eicr & IXGBE_EICR_FLOW_DIR)) {
2396 /* This is probably overkill :) */
2397 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2398 return (FILTER_HANDLED);
2399 /* Disable the interrupt */
2400 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2401 adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2403 if (eicr & IXGBE_EICR_ECC) {
2404 device_printf(iflib_get_dev(adapter->ctx),
2405 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2406 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2409 /* Check for over temp condition */
2410 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2411 switch (adapter->hw.mac.type) {
2412 case ixgbe_mac_X550EM_a:
2413 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2415 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2416 IXGBE_EICR_GPI_SDP0_X550EM_a);
2417 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2418 IXGBE_EICR_GPI_SDP0_X550EM_a);
2419 retval = hw->phy.ops.check_overtemp(hw);
2420 if (retval != IXGBE_ERR_OVERTEMP)
2422 device_printf(iflib_get_dev(adapter->ctx),
2423 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2424 device_printf(iflib_get_dev(adapter->ctx),
2425 "System shutdown required!\n");
2428 if (!(eicr & IXGBE_EICR_TS))
2430 retval = hw->phy.ops.check_overtemp(hw);
2431 if (retval != IXGBE_ERR_OVERTEMP)
2433 device_printf(iflib_get_dev(adapter->ctx),
2434 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2435 device_printf(iflib_get_dev(adapter->ctx),
2436 "System shutdown required!\n");
2437 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2442 /* Check for VF message */
2443 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2444 (eicr & IXGBE_EICR_MAILBOX))
2445 adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2448 if (ixgbe_is_sfp(hw)) {
2449 /* Pluggable optics-related interrupt */
2450 if (hw->mac.type >= ixgbe_mac_X540)
2451 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2453 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2455 if (eicr & eicr_mask) {
2456 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2457 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2460 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2461 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2462 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2463 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2464 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2468 /* Check for fan failure */
2469 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2470 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2471 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2474 /* External PHY interrupt */
2475 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2476 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2477 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2478 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2481 return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2482 } /* ixgbe_msix_link */
2484 /************************************************************************
2485 * ixgbe_sysctl_interrupt_rate_handler
2486 ************************************************************************/
2488 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2490 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2492 unsigned int reg, usec, rate;
2494 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2495 usec = ((reg & 0x0FF8) >> 3);
2497 rate = 500000 / usec;
2500 error = sysctl_handle_int(oidp, &rate, 0, req);
2501 if (error || !req->newptr)
2503 reg &= ~0xfff; /* default, no limitation */
2504 ixgbe_max_interrupt_rate = 0;
2505 if (rate > 0 && rate < 500000) {
2508 ixgbe_max_interrupt_rate = rate;
2509 reg |= ((4000000/rate) & 0xff8);
2511 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2514 } /* ixgbe_sysctl_interrupt_rate_handler */
2516 /************************************************************************
2517 * ixgbe_add_device_sysctls
2518 ************************************************************************/
2520 ixgbe_add_device_sysctls(if_ctx_t ctx)
2522 struct adapter *adapter = iflib_get_softc(ctx);
2523 device_t dev = iflib_get_dev(ctx);
2524 struct ixgbe_hw *hw = &adapter->hw;
2525 struct sysctl_oid_list *child;
2526 struct sysctl_ctx_list *ctx_list;
2528 ctx_list = device_get_sysctl_ctx(dev);
2529 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2531 /* Sysctls for all devices */
2532 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2533 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2534 adapter, 0, ixgbe_sysctl_flowcntl, "I",
2535 IXGBE_SYSCTL_DESC_SET_FC);
2537 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2538 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2539 adapter, 0, ixgbe_sysctl_advertise, "I",
2540 IXGBE_SYSCTL_DESC_ADV_SPEED);
2543 /* testing sysctls (for all devices) */
2544 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2545 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2546 adapter, 0, ixgbe_sysctl_power_state,
2547 "I", "PCI Power State");
2549 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2550 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2551 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2553 /* for X550 series devices */
2554 if (hw->mac.type >= ixgbe_mac_X550)
2555 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2556 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2557 adapter, 0, ixgbe_sysctl_dmac,
2558 "I", "DMA Coalesce");
2560 /* for WoL-capable devices */
2561 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2562 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2563 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2564 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2566 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2567 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2568 adapter, 0, ixgbe_sysctl_wufc,
2569 "I", "Enable/Disable Wake Up Filters");
2572 /* for X552/X557-AT devices */
2573 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2574 struct sysctl_oid *phy_node;
2575 struct sysctl_oid_list *phy_list;
2577 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2578 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2579 phy_list = SYSCTL_CHILDREN(phy_node);
2581 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2582 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2583 adapter, 0, ixgbe_sysctl_phy_temp,
2584 "I", "Current External PHY Temperature (Celsius)");
2586 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2587 "overtemp_occurred",
2588 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2589 ixgbe_sysctl_phy_overtemp_occurred, "I",
2590 "External PHY High Temperature Event Occurred");
2593 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2594 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2595 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2596 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2598 } /* ixgbe_add_device_sysctls */
2600 /************************************************************************
2601 * ixgbe_allocate_pci_resources
2602 ************************************************************************/
2604 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2606 struct adapter *adapter = iflib_get_softc(ctx);
2607 device_t dev = iflib_get_dev(ctx);
2611 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2614 if (!(adapter->pci_mem)) {
2615 device_printf(dev, "Unable to allocate bus resource: memory\n");
2619 /* Save bus_space values for READ/WRITE_REG macros */
2620 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2621 adapter->osdep.mem_bus_space_handle =
2622 rman_get_bushandle(adapter->pci_mem);
2623 /* Set hw values for shared code */
2624 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2627 } /* ixgbe_allocate_pci_resources */
2629 /************************************************************************
2630 * ixgbe_detach - Device removal routine
2632 * Called when the driver is being removed.
2633 * Stops the adapter and deallocates all the resources
2634 * that were allocated for driver operation.
2636 * return 0 on success, positive on failure
2637 ************************************************************************/
2639 ixgbe_if_detach(if_ctx_t ctx)
2641 struct adapter *adapter = iflib_get_softc(ctx);
2642 device_t dev = iflib_get_dev(ctx);
2645 INIT_DEBUGOUT("ixgbe_detach: begin");
2647 if (ixgbe_pci_iov_detach(dev) != 0) {
2648 device_printf(dev, "SR-IOV in use; detach first.\n");
2652 ixgbe_setup_low_power_mode(ctx);
2654 /* let hardware know driver is unloading */
2655 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2656 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2657 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2659 ixgbe_free_pci_resources(ctx);
2660 free(adapter->mta, M_IXGBE);
2663 } /* ixgbe_if_detach */
2665 /************************************************************************
2666 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2668 * Prepare the adapter/port for LPLU and/or WoL
2669 ************************************************************************/
2671 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2673 struct adapter *adapter = iflib_get_softc(ctx);
2674 struct ixgbe_hw *hw = &adapter->hw;
2675 device_t dev = iflib_get_dev(ctx);
2678 if (!hw->wol_enabled)
2679 ixgbe_set_phy_power(hw, FALSE);
2681 /* Limit power management flow to X550EM baseT */
2682 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2683 hw->phy.ops.enter_lplu) {
2684 /* Turn off support for APM wakeup. (Using ACPI instead) */
2685 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2686 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2689 * Clear Wake Up Status register to prevent any previous wakeup
2690 * events from waking us up immediately after we suspend.
2692 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2695 * Program the Wakeup Filter Control register with user filter
2698 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2700 /* Enable wakeups and power management in Wakeup Control */
2701 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2702 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2704 /* X550EM baseT adapters need a special LPLU flow */
2705 hw->phy.reset_disable = TRUE;
2707 error = hw->phy.ops.enter_lplu(hw);
2709 device_printf(dev, "Error entering LPLU: %d\n", error);
2710 hw->phy.reset_disable = FALSE;
2712 /* Just stop for other adapters */
2717 } /* ixgbe_setup_low_power_mode */
2719 /************************************************************************
2720 * ixgbe_shutdown - Shutdown entry point
2721 ************************************************************************/
2723 ixgbe_if_shutdown(if_ctx_t ctx)
2727 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2729 error = ixgbe_setup_low_power_mode(ctx);
2732 } /* ixgbe_if_shutdown */
2734 /************************************************************************
2738 ************************************************************************/
2740 ixgbe_if_suspend(if_ctx_t ctx)
2744 INIT_DEBUGOUT("ixgbe_suspend: begin");
2746 error = ixgbe_setup_low_power_mode(ctx);
2749 } /* ixgbe_if_suspend */
2751 /************************************************************************
2755 ************************************************************************/
2757 ixgbe_if_resume(if_ctx_t ctx)
2759 struct adapter *adapter = iflib_get_softc(ctx);
2760 device_t dev = iflib_get_dev(ctx);
2761 struct ifnet *ifp = iflib_get_ifp(ctx);
2762 struct ixgbe_hw *hw = &adapter->hw;
2765 INIT_DEBUGOUT("ixgbe_resume: begin");
2767 /* Read & clear WUS register */
2768 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2770 device_printf(dev, "Woken up by (WUS): %#010x\n",
2771 IXGBE_READ_REG(hw, IXGBE_WUS));
2772 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2773 /* And clear WUFC until next low-power transition */
2774 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2777 * Required after D3->D0 transition;
2778 * will re-advertise all previous advertised speeds
2780 if (ifp->if_flags & IFF_UP)
2784 } /* ixgbe_if_resume */
2786 /************************************************************************
2787 * ixgbe_if_mtu_set - Ioctl mtu entry point
2789 * Return 0 on success, EINVAL on failure
2790 ************************************************************************/
2792 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2794 struct adapter *adapter = iflib_get_softc(ctx);
2797 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2799 if (mtu > IXGBE_MAX_MTU) {
2802 adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2806 } /* ixgbe_if_mtu_set */
2808 /************************************************************************
2809 * ixgbe_if_crcstrip_set
2810 ************************************************************************/
2812 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2814 struct adapter *sc = iflib_get_softc(ctx);
2815 struct ixgbe_hw *hw = &sc->hw;
2816 /* crc stripping is set in two places:
2817 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2818 * IXGBE_RDRXCTL (set by the original driver in
2819 * ixgbe_setup_hw_rsc() called in init_locked.
2820 * We disable the setting when netmap is compiled in).
2821 * We update the values here, but also in ixgbe.c because
2822 * init_locked sometimes is called outside our control.
2826 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2827 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2830 D("%s read HLREG 0x%x rxc 0x%x",
2831 onoff ? "enter" : "exit", hl, rxc);
2833 /* hw requirements ... */
2834 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2835 rxc |= IXGBE_RDRXCTL_RSCACKC;
2836 if (onoff && !crcstrip) {
2837 /* keep the crc. Fast rx */
2838 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2839 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2841 /* reset default mode */
2842 hl |= IXGBE_HLREG0_RXCRCSTRP;
2843 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2847 D("%s write HLREG 0x%x rxc 0x%x",
2848 onoff ? "enter" : "exit", hl, rxc);
2850 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2851 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2852 } /* ixgbe_if_crcstrip_set */
2854 /*********************************************************************
2855 * ixgbe_if_init - Init entry point
2857 * Used in two ways: It is used by the stack as an init
2858 * entry point in network interface structure. It is also
2859 * used by the driver as a hw/sw initialization routine to
2860 * get to a consistent state.
2862 * Return 0 on success, positive on failure
2863 **********************************************************************/
2865 ixgbe_if_init(if_ctx_t ctx)
2867 struct adapter *adapter = iflib_get_softc(ctx);
2868 struct ifnet *ifp = iflib_get_ifp(ctx);
2869 device_t dev = iflib_get_dev(ctx);
2870 struct ixgbe_hw *hw = &adapter->hw;
2871 struct ix_rx_queue *rx_que;
2872 struct ix_tx_queue *tx_que;
2879 INIT_DEBUGOUT("ixgbe_if_init: begin");
2881 /* Queue indices may change with IOV mode */
2882 ixgbe_align_all_queue_indices(adapter);
2884 /* reprogram the RAR[0] in case user changed it. */
2885 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2887 /* Get the latest mac address, User can use a LAA */
2888 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2889 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2890 hw->addr_ctrl.rar_used_count = 1;
2894 ixgbe_initialize_iov(adapter);
2896 ixgbe_initialize_transmit_units(ctx);
2898 /* Setup Multicast table */
2899 ixgbe_if_multi_set(ctx);
2901 /* Determine the correct mbuf pool, based on frame size */
2902 adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2904 /* Configure RX settings */
2905 ixgbe_initialize_receive_units(ctx);
2908 * Initialize variable holding task enqueue requests
2909 * from MSI-X interrupts
2911 adapter->task_requests = 0;
2913 /* Enable SDP & MSI-X interrupts based on adapter */
2914 ixgbe_config_gpie(adapter);
2917 if (ifp->if_mtu > ETHERMTU) {
2918 /* aka IXGBE_MAXFRS on 82599 and newer */
2919 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2920 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2921 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2922 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2925 /* Now enable all the queues */
2926 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2927 struct tx_ring *txr = &tx_que->txr;
2929 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2930 txdctl |= IXGBE_TXDCTL_ENABLE;
2931 /* Set WTHRESH to 8, burst writeback */
2932 txdctl |= (8 << 16);
2934 * When the internal queue falls below PTHRESH (32),
2935 * start prefetching as long as there are at least
2936 * HTHRESH (1) buffers ready. The values are taken
2937 * from the Intel linux driver 3.8.21.
2938 * Prefetching enables tx line rate even with 1 queue.
2940 txdctl |= (32 << 0) | (1 << 8);
2941 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2944 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2945 struct rx_ring *rxr = &rx_que->rxr;
2947 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2948 if (hw->mac.type == ixgbe_mac_82598EB) {
2954 rxdctl &= ~0x3FFFFF;
2957 rxdctl |= IXGBE_RXDCTL_ENABLE;
2958 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2959 for (j = 0; j < 10; j++) {
2960 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2961 IXGBE_RXDCTL_ENABLE)
2969 /* Enable Receive engine */
2970 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2971 if (hw->mac.type == ixgbe_mac_82598EB)
2972 rxctrl |= IXGBE_RXCTRL_DMBYPS;
2973 rxctrl |= IXGBE_RXCTRL_RXEN;
2974 ixgbe_enable_rx_dma(hw, rxctrl);
2976 /* Set up MSI/MSI-X routing */
2977 if (ixgbe_enable_msix) {
2978 ixgbe_configure_ivars(adapter);
2979 /* Set up auto-mask */
2980 if (hw->mac.type == ixgbe_mac_82598EB)
2981 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2983 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2984 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2986 } else { /* Simple settings for Legacy/MSI */
2987 ixgbe_set_ivar(adapter, 0, 0, 0);
2988 ixgbe_set_ivar(adapter, 0, 0, 1);
2989 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2992 ixgbe_init_fdir(adapter);
2995 * Check on any SFP devices that
2996 * need to be kick-started
2998 if (hw->phy.type == ixgbe_phy_none) {
2999 err = hw->phy.ops.identify(hw);
3000 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3002 "Unsupported SFP+ module type was detected.\n");
3007 /* Set moderation on the Link interrupt */
3008 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3010 /* Enable power to the phy. */
3011 ixgbe_set_phy_power(hw, TRUE);
3013 /* Config/Enable Link */
3014 ixgbe_config_link(ctx);
3016 /* Hardware Packet Buffer & Flow Control setup */
3017 ixgbe_config_delay_values(adapter);
3019 /* Initialize the FC settings */
3022 /* Set up VLAN support and filter */
3023 ixgbe_setup_vlan_hw_support(ctx);
3025 /* Setup DMA Coalescing */
3026 ixgbe_config_dmac(adapter);
3028 /* And now turn on interrupts */
3029 ixgbe_if_enable_intr(ctx);
3031 /* Enable the use of the MBX by the VF's */
3032 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3033 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3034 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3035 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3038 } /* ixgbe_init_locked */
3040 /************************************************************************
3043 * Setup the correct IVAR register for a particular MSI-X interrupt
3044 * (yes this is all very magic and confusing :)
3045 * - entry is the register array entry
3046 * - vector is the MSI-X vector for this queue
3047 * - type is RX/TX/MISC
3048 ************************************************************************/
3050 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3052 struct ixgbe_hw *hw = &adapter->hw;
3055 vector |= IXGBE_IVAR_ALLOC_VAL;
3057 switch (hw->mac.type) {
3058 case ixgbe_mac_82598EB:
3060 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3062 entry += (type * 64);
3063 index = (entry >> 2) & 0x1F;
3064 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3065 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3066 ivar |= (vector << (8 * (entry & 0x3)));
3067 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3069 case ixgbe_mac_82599EB:
3070 case ixgbe_mac_X540:
3071 case ixgbe_mac_X550:
3072 case ixgbe_mac_X550EM_x:
3073 case ixgbe_mac_X550EM_a:
3074 if (type == -1) { /* MISC IVAR */
3075 index = (entry & 1) * 8;
3076 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3077 ivar &= ~(0xFF << index);
3078 ivar |= (vector << index);
3079 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3080 } else { /* RX/TX IVARS */
3081 index = (16 * (entry & 1)) + (8 * type);
3082 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3083 ivar &= ~(0xFF << index);
3084 ivar |= (vector << index);
3085 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3090 } /* ixgbe_set_ivar */
3092 /************************************************************************
3093 * ixgbe_configure_ivars
3094 ************************************************************************/
3096 ixgbe_configure_ivars(struct adapter *adapter)
3098 struct ix_rx_queue *rx_que = adapter->rx_queues;
3099 struct ix_tx_queue *tx_que = adapter->tx_queues;
3102 if (ixgbe_max_interrupt_rate > 0)
3103 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3106 * Disable DMA coalescing if interrupt moderation is
3113 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3114 struct rx_ring *rxr = &rx_que->rxr;
3116 /* First the RX queue entry */
3117 ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3119 /* Set an Initial EITR value */
3120 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3122 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3123 struct tx_ring *txr = &tx_que->txr;
3125 /* ... and the TX */
3126 ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3128 /* For the Link interrupt */
3129 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3130 } /* ixgbe_configure_ivars */
3132 /************************************************************************
3134 ************************************************************************/
3136 ixgbe_config_gpie(struct adapter *adapter)
3138 struct ixgbe_hw *hw = &adapter->hw;
3141 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3143 if (adapter->intr_type == IFLIB_INTR_MSIX) {
3144 /* Enable Enhanced MSI-X mode */
3145 gpie |= IXGBE_GPIE_MSIX_MODE
3147 | IXGBE_GPIE_PBA_SUPPORT
3151 /* Fan Failure Interrupt */
3152 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3153 gpie |= IXGBE_SDP1_GPIEN;
3155 /* Thermal Sensor Interrupt */
3156 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3157 gpie |= IXGBE_SDP0_GPIEN_X540;
3159 /* Link detection */
3160 switch (hw->mac.type) {
3161 case ixgbe_mac_82599EB:
3162 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3164 case ixgbe_mac_X550EM_x:
3165 case ixgbe_mac_X550EM_a:
3166 gpie |= IXGBE_SDP0_GPIEN_X540;
3172 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3174 } /* ixgbe_config_gpie */
3176 /************************************************************************
3177 * ixgbe_config_delay_values
3179 * Requires adapter->max_frame_size to be set.
3180 ************************************************************************/
3182 ixgbe_config_delay_values(struct adapter *adapter)
3184 struct ixgbe_hw *hw = &adapter->hw;
3185 u32 rxpb, frame, size, tmp;
3187 frame = adapter->max_frame_size;
3189 /* Calculate High Water */
3190 switch (hw->mac.type) {
3191 case ixgbe_mac_X540:
3192 case ixgbe_mac_X550:
3193 case ixgbe_mac_X550EM_x:
3194 case ixgbe_mac_X550EM_a:
3195 tmp = IXGBE_DV_X540(frame, frame);
3198 tmp = IXGBE_DV(frame, frame);
3201 size = IXGBE_BT2KB(tmp);
3202 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3203 hw->fc.high_water[0] = rxpb - size;
3205 /* Now calculate Low Water */
3206 switch (hw->mac.type) {
3207 case ixgbe_mac_X540:
3208 case ixgbe_mac_X550:
3209 case ixgbe_mac_X550EM_x:
3210 case ixgbe_mac_X550EM_a:
3211 tmp = IXGBE_LOW_DV_X540(frame);
3214 tmp = IXGBE_LOW_DV(frame);
3217 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3219 hw->fc.pause_time = IXGBE_FC_PAUSE;
3220 hw->fc.send_xon = TRUE;
3221 } /* ixgbe_config_delay_values */
3223 /************************************************************************
3224 * ixgbe_set_multi - Multicast Update
3226 * Called whenever multicast address list is updated.
3227 ************************************************************************/
3229 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count)
3231 struct adapter *adapter = arg;
3232 struct ixgbe_mc_addr *mta = adapter->mta;
3234 if (count == MAX_NUM_MULTICAST_ADDRESSES)
3236 bcopy(LLADDR(sdl), mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3237 mta[count].vmdq = adapter->pool;
3240 } /* ixgbe_mc_filter_apply */
3243 ixgbe_if_multi_set(if_ctx_t ctx)
3245 struct adapter *adapter = iflib_get_softc(ctx);
3246 struct ixgbe_mc_addr *mta;
3247 struct ifnet *ifp = iflib_get_ifp(ctx);
3252 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3255 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3257 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3260 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3261 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3262 if (ifp->if_flags & IFF_PROMISC)
3263 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3264 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3265 ifp->if_flags & IFF_ALLMULTI) {
3266 fctrl |= IXGBE_FCTRL_MPE;
3267 fctrl &= ~IXGBE_FCTRL_UPE;
3269 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3271 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3273 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3274 update_ptr = (u8 *)mta;
3275 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3276 ixgbe_mc_array_itr, TRUE);
3279 } /* ixgbe_if_multi_set */
3281 /************************************************************************
3282 * ixgbe_mc_array_itr
3284 * An iterator function needed by the multicast shared code.
3285 * It feeds the shared code routine the addresses in the
3286 * array of ixgbe_set_multi() one by one.
3287 ************************************************************************/
3289 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3291 struct ixgbe_mc_addr *mta;
3293 mta = (struct ixgbe_mc_addr *)*update_ptr;
3296 *update_ptr = (u8*)(mta + 1);
3299 } /* ixgbe_mc_array_itr */
3301 /************************************************************************
3302 * ixgbe_local_timer - Timer routine
3304 * Checks for link status, updates statistics,
3305 * and runs the watchdog check.
3306 ************************************************************************/
3308 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3310 struct adapter *adapter = iflib_get_softc(ctx);
3315 /* Check for pluggable optics */
3316 if (adapter->sfp_probe)
3317 if (!ixgbe_sfp_probe(ctx))
3318 return; /* Nothing to do */
3320 ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3321 &adapter->link_up, 0);
3323 /* Fire off the adminq task */
3324 iflib_admin_intr_deferred(ctx);
3326 } /* ixgbe_if_timer */
3328 /************************************************************************
3331 * Determine if a port had optics inserted.
3332 ************************************************************************/
3334 ixgbe_sfp_probe(if_ctx_t ctx)
3336 struct adapter *adapter = iflib_get_softc(ctx);
3337 struct ixgbe_hw *hw = &adapter->hw;
3338 device_t dev = iflib_get_dev(ctx);
3339 bool result = FALSE;
3341 if ((hw->phy.type == ixgbe_phy_nl) &&
3342 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3343 s32 ret = hw->phy.ops.identify_sfp(hw);
3346 ret = hw->phy.ops.reset(hw);
3347 adapter->sfp_probe = FALSE;
3348 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3349 device_printf(dev, "Unsupported SFP+ module detected!");
3351 "Reload driver with supported module.\n");
3354 device_printf(dev, "SFP+ module detected!\n");
3355 /* We now have supported optics */
3361 } /* ixgbe_sfp_probe */
3363 /************************************************************************
3364 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3365 ************************************************************************/
3367 ixgbe_handle_mod(void *context)
3369 if_ctx_t ctx = context;
3370 struct adapter *adapter = iflib_get_softc(ctx);
3371 struct ixgbe_hw *hw = &adapter->hw;
3372 device_t dev = iflib_get_dev(ctx);
3373 u32 err, cage_full = 0;
3375 if (adapter->hw.need_crosstalk_fix) {
3376 switch (hw->mac.type) {
3377 case ixgbe_mac_82599EB:
3378 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3381 case ixgbe_mac_X550EM_x:
3382 case ixgbe_mac_X550EM_a:
3383 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3391 goto handle_mod_out;
3394 err = hw->phy.ops.identify_sfp(hw);
3395 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3397 "Unsupported SFP+ module type was detected.\n");
3398 goto handle_mod_out;
3401 if (hw->mac.type == ixgbe_mac_82598EB)
3402 err = hw->phy.ops.reset(hw);
3404 err = hw->mac.ops.setup_sfp(hw);
3406 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3408 "Setup failure - unsupported SFP+ module type.\n");
3409 goto handle_mod_out;
3411 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3415 adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3416 } /* ixgbe_handle_mod */
3419 /************************************************************************
3420 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3421 ************************************************************************/
3423 ixgbe_handle_msf(void *context)
3425 if_ctx_t ctx = context;
3426 struct adapter *adapter = iflib_get_softc(ctx);
3427 struct ixgbe_hw *hw = &adapter->hw;
3431 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3432 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3434 autoneg = hw->phy.autoneg_advertised;
3435 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3436 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3437 if (hw->mac.ops.setup_link)
3438 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3440 /* Adjust media types shown in ifconfig */
3441 ifmedia_removeall(adapter->media);
3442 ixgbe_add_media_types(adapter->ctx);
3443 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3444 } /* ixgbe_handle_msf */
3446 /************************************************************************
3447 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3448 ************************************************************************/
3450 ixgbe_handle_phy(void *context)
3452 if_ctx_t ctx = context;
3453 struct adapter *adapter = iflib_get_softc(ctx);
3454 struct ixgbe_hw *hw = &adapter->hw;
3457 error = hw->phy.ops.handle_lasi(hw);
3458 if (error == IXGBE_ERR_OVERTEMP)
3459 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3461 device_printf(adapter->dev,
3462 "Error handling LASI interrupt: %d\n", error);
3463 } /* ixgbe_handle_phy */
3465 /************************************************************************
3466 * ixgbe_if_stop - Stop the hardware
3468 * Disables all traffic on the adapter by issuing a
3469 * global reset on the MAC and deallocates TX/RX buffers.
3470 ************************************************************************/
3472 ixgbe_if_stop(if_ctx_t ctx)
3474 struct adapter *adapter = iflib_get_softc(ctx);
3475 struct ixgbe_hw *hw = &adapter->hw;
3477 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3480 hw->adapter_stopped = FALSE;
3481 ixgbe_stop_adapter(hw);
3482 if (hw->mac.type == ixgbe_mac_82599EB)
3483 ixgbe_stop_mac_link_on_d3_82599(hw);
3484 /* Turn off the laser - noop with no optics */
3485 ixgbe_disable_tx_laser(hw);
3487 /* Update the stack */
3488 adapter->link_up = FALSE;
3489 ixgbe_if_update_admin_status(ctx);
3491 /* reprogram the RAR[0] in case user changed it. */
3492 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3495 } /* ixgbe_if_stop */
3497 /************************************************************************
3498 * ixgbe_update_link_status - Update OS on link state
3500 * Note: Only updates the OS on the cached link state.
3501 * The real check of the hardware only happens with
3503 ************************************************************************/
3505 ixgbe_if_update_admin_status(if_ctx_t ctx)
3507 struct adapter *adapter = iflib_get_softc(ctx);
3508 device_t dev = iflib_get_dev(ctx);
3510 if (adapter->link_up) {
3511 if (adapter->link_active == FALSE) {
3513 device_printf(dev, "Link is up %d Gbps %s \n",
3514 ((adapter->link_speed == 128) ? 10 : 1),
3516 adapter->link_active = TRUE;
3517 /* Update any Flow Control changes */
3518 ixgbe_fc_enable(&adapter->hw);
3519 /* Update DMA coalescing config */
3520 ixgbe_config_dmac(adapter);
3521 /* should actually be negotiated value */
3522 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3524 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3525 ixgbe_ping_all_vfs(adapter);
3527 } else { /* Link down */
3528 if (adapter->link_active == TRUE) {
3530 device_printf(dev, "Link is Down\n");
3531 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3532 adapter->link_active = FALSE;
3533 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3534 ixgbe_ping_all_vfs(adapter);
3538 /* Handle task requests from msix_link() */
3539 if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3540 ixgbe_handle_mod(ctx);
3541 if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3542 ixgbe_handle_msf(ctx);
3543 if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3544 ixgbe_handle_mbx(ctx);
3545 if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3546 ixgbe_reinit_fdir(ctx);
3547 if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3548 ixgbe_handle_phy(ctx);
3549 adapter->task_requests = 0;
3551 ixgbe_update_stats_counters(adapter);
3552 } /* ixgbe_if_update_admin_status */
3554 /************************************************************************
3555 * ixgbe_config_dmac - Configure DMA Coalescing
3556 ************************************************************************/
3558 ixgbe_config_dmac(struct adapter *adapter)
3560 struct ixgbe_hw *hw = &adapter->hw;
3561 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3563 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3566 if (dcfg->watchdog_timer ^ adapter->dmac ||
3567 dcfg->link_speed ^ adapter->link_speed) {
3568 dcfg->watchdog_timer = adapter->dmac;
3569 dcfg->fcoe_en = FALSE;
3570 dcfg->link_speed = adapter->link_speed;
3573 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3574 dcfg->watchdog_timer, dcfg->link_speed);
3576 hw->mac.ops.dmac_config(hw);
3578 } /* ixgbe_config_dmac */
3580 /************************************************************************
3581 * ixgbe_if_enable_intr
3582 ************************************************************************/
3584 ixgbe_if_enable_intr(if_ctx_t ctx)
3586 struct adapter *adapter = iflib_get_softc(ctx);
3587 struct ixgbe_hw *hw = &adapter->hw;
3588 struct ix_rx_queue *que = adapter->rx_queues;
3591 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3593 switch (adapter->hw.mac.type) {
3594 case ixgbe_mac_82599EB:
3595 mask |= IXGBE_EIMS_ECC;
3596 /* Temperature sensor on some adapters */
3597 mask |= IXGBE_EIMS_GPI_SDP0;
3598 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3599 mask |= IXGBE_EIMS_GPI_SDP1;
3600 mask |= IXGBE_EIMS_GPI_SDP2;
3602 case ixgbe_mac_X540:
3603 /* Detect if Thermal Sensor is enabled */
3604 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3605 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3606 mask |= IXGBE_EIMS_TS;
3607 mask |= IXGBE_EIMS_ECC;
3609 case ixgbe_mac_X550:
3610 /* MAC thermal sensor is automatically enabled */
3611 mask |= IXGBE_EIMS_TS;
3612 mask |= IXGBE_EIMS_ECC;
3614 case ixgbe_mac_X550EM_x:
3615 case ixgbe_mac_X550EM_a:
3616 /* Some devices use SDP0 for important information */
3617 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3618 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3619 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3620 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3621 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3622 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3623 mask |= IXGBE_EICR_GPI_SDP0_X540;
3624 mask |= IXGBE_EIMS_ECC;
3630 /* Enable Fan Failure detection */
3631 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3632 mask |= IXGBE_EIMS_GPI_SDP1;
3634 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3635 mask |= IXGBE_EIMS_MAILBOX;
3636 /* Enable Flow Director */
3637 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3638 mask |= IXGBE_EIMS_FLOW_DIR;
3640 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3642 /* With MSI-X we use auto clear */
3643 if (adapter->intr_type == IFLIB_INTR_MSIX) {
3644 mask = IXGBE_EIMS_ENABLE_MASK;
3645 /* Don't autoclear Link */
3646 mask &= ~IXGBE_EIMS_OTHER;
3647 mask &= ~IXGBE_EIMS_LSC;
3648 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3649 mask &= ~IXGBE_EIMS_MAILBOX;
3650 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3654 * Now enable all queues, this is done separately to
3655 * allow for handling the extended (beyond 32) MSI-X
3656 * vectors that can be used by 82599
3658 for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3659 ixgbe_enable_queue(adapter, que->msix);
3661 IXGBE_WRITE_FLUSH(hw);
3663 } /* ixgbe_if_enable_intr */
3665 /************************************************************************
3666 * ixgbe_disable_intr
3667 ************************************************************************/
3669 ixgbe_if_disable_intr(if_ctx_t ctx)
3671 struct adapter *adapter = iflib_get_softc(ctx);
3673 if (adapter->intr_type == IFLIB_INTR_MSIX)
3674 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3675 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3676 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3678 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3679 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3680 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3682 IXGBE_WRITE_FLUSH(&adapter->hw);
3684 } /* ixgbe_if_disable_intr */
3686 /************************************************************************
3687 * ixgbe_link_intr_enable
3688 ************************************************************************/
3690 ixgbe_link_intr_enable(if_ctx_t ctx)
3692 struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3694 /* Re-enable other interrupts */
3695 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3696 } /* ixgbe_link_intr_enable */
3698 /************************************************************************
3699 * ixgbe_if_rx_queue_intr_enable
3700 ************************************************************************/
3702 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3704 struct adapter *adapter = iflib_get_softc(ctx);
3705 struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3707 ixgbe_enable_queue(adapter, que->msix);
3710 } /* ixgbe_if_rx_queue_intr_enable */
3712 /************************************************************************
3713 * ixgbe_enable_queue
3714 ************************************************************************/
3716 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3718 struct ixgbe_hw *hw = &adapter->hw;
3719 u64 queue = 1ULL << vector;
3722 if (hw->mac.type == ixgbe_mac_82598EB) {
3723 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3724 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3726 mask = (queue & 0xFFFFFFFF);
3728 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3729 mask = (queue >> 32);
3731 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3733 } /* ixgbe_enable_queue */
3735 /************************************************************************
3736 * ixgbe_disable_queue
3737 ************************************************************************/
3739 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3741 struct ixgbe_hw *hw = &adapter->hw;
3742 u64 queue = 1ULL << vector;
3745 if (hw->mac.type == ixgbe_mac_82598EB) {
3746 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3747 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3749 mask = (queue & 0xFFFFFFFF);
3751 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3752 mask = (queue >> 32);
3754 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3756 } /* ixgbe_disable_queue */
3758 /************************************************************************
3759 * ixgbe_intr - Legacy Interrupt Service Routine
3760 ************************************************************************/
3762 ixgbe_intr(void *arg)
3764 struct adapter *adapter = arg;
3765 struct ix_rx_queue *que = adapter->rx_queues;
3766 struct ixgbe_hw *hw = &adapter->hw;
3767 if_ctx_t ctx = adapter->ctx;
3768 u32 eicr, eicr_mask;
3770 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3774 ixgbe_if_enable_intr(ctx);
3775 return (FILTER_HANDLED);
3778 /* Check for fan failure */
3779 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3780 (eicr & IXGBE_EICR_GPI_SDP1)) {
3781 device_printf(adapter->dev,
3782 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3783 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3786 /* Link status change */
3787 if (eicr & IXGBE_EICR_LSC) {
3788 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3789 iflib_admin_intr_deferred(ctx);
3792 if (ixgbe_is_sfp(hw)) {
3793 /* Pluggable optics-related interrupt */
3794 if (hw->mac.type >= ixgbe_mac_X540)
3795 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3797 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3799 if (eicr & eicr_mask) {
3800 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3801 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3804 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3805 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3806 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3807 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3808 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3812 /* External PHY interrupt */
3813 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3814 (eicr & IXGBE_EICR_GPI_SDP0_X540))
3815 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3817 return (FILTER_SCHEDULE_THREAD);
3820 /************************************************************************
3821 * ixgbe_free_pci_resources
3822 ************************************************************************/
3824 ixgbe_free_pci_resources(if_ctx_t ctx)
3826 struct adapter *adapter = iflib_get_softc(ctx);
3827 struct ix_rx_queue *que = adapter->rx_queues;
3828 device_t dev = iflib_get_dev(ctx);
3830 /* Release all MSI-X queue resources */
3831 if (adapter->intr_type == IFLIB_INTR_MSIX)
3832 iflib_irq_free(ctx, &adapter->irq);
3835 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3836 iflib_irq_free(ctx, &que->que_irq);
3840 if (adapter->pci_mem != NULL)
3841 bus_release_resource(dev, SYS_RES_MEMORY,
3842 rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3843 } /* ixgbe_free_pci_resources */
3845 /************************************************************************
3846 * ixgbe_sysctl_flowcntl
3848 * SYSCTL wrapper around setting Flow Control
3849 ************************************************************************/
3851 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3853 struct adapter *adapter;
3856 adapter = (struct adapter *)arg1;
3857 fc = adapter->hw.fc.current_mode;
3859 error = sysctl_handle_int(oidp, &fc, 0, req);
3860 if ((error) || (req->newptr == NULL))
3863 /* Don't bother if it's not changed */
3864 if (fc == adapter->hw.fc.current_mode)
3867 return ixgbe_set_flowcntl(adapter, fc);
3868 } /* ixgbe_sysctl_flowcntl */
3870 /************************************************************************
3871 * ixgbe_set_flowcntl - Set flow control
3873 * Flow control values:
3878 ************************************************************************/
3880 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3883 case ixgbe_fc_rx_pause:
3884 case ixgbe_fc_tx_pause:
3886 adapter->hw.fc.requested_mode = fc;
3887 if (adapter->num_rx_queues > 1)
3888 ixgbe_disable_rx_drop(adapter);
3891 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3892 if (adapter->num_rx_queues > 1)
3893 ixgbe_enable_rx_drop(adapter);
3899 /* Don't autoneg if forcing a value */
3900 adapter->hw.fc.disable_fc_autoneg = TRUE;
3901 ixgbe_fc_enable(&adapter->hw);
3904 } /* ixgbe_set_flowcntl */
3906 /************************************************************************
3907 * ixgbe_enable_rx_drop
3909 * Enable the hardware to drop packets when the buffer is
3910 * full. This is useful with multiqueue, so that no single
3911 * queue being full stalls the entire RX engine. We only
3912 * enable this when Multiqueue is enabled AND Flow Control
3914 ************************************************************************/
3916 ixgbe_enable_rx_drop(struct adapter *adapter)
3918 struct ixgbe_hw *hw = &adapter->hw;
3919 struct rx_ring *rxr;
3922 for (int i = 0; i < adapter->num_rx_queues; i++) {
3923 rxr = &adapter->rx_queues[i].rxr;
3924 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3925 srrctl |= IXGBE_SRRCTL_DROP_EN;
3926 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3929 /* enable drop for each vf */
3930 for (int i = 0; i < adapter->num_vfs; i++) {
3931 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3932 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3935 } /* ixgbe_enable_rx_drop */
3937 /************************************************************************
3938 * ixgbe_disable_rx_drop
3939 ************************************************************************/
3941 ixgbe_disable_rx_drop(struct adapter *adapter)
3943 struct ixgbe_hw *hw = &adapter->hw;
3944 struct rx_ring *rxr;
3947 for (int i = 0; i < adapter->num_rx_queues; i++) {
3948 rxr = &adapter->rx_queues[i].rxr;
3949 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3950 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3951 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3954 /* disable drop for each vf */
3955 for (int i = 0; i < adapter->num_vfs; i++) {
3956 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3957 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3959 } /* ixgbe_disable_rx_drop */
3961 /************************************************************************
3962 * ixgbe_sysctl_advertise
3964 * SYSCTL wrapper around setting advertised speed
3965 ************************************************************************/
3967 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3969 struct adapter *adapter;
3970 int error, advertise;
3972 adapter = (struct adapter *)arg1;
3973 advertise = adapter->advertise;
3975 error = sysctl_handle_int(oidp, &advertise, 0, req);
3976 if ((error) || (req->newptr == NULL))
3979 return ixgbe_set_advertise(adapter, advertise);
3980 } /* ixgbe_sysctl_advertise */
3982 /************************************************************************
3983 * ixgbe_set_advertise - Control advertised link speed
3986 * 0x1 - advertise 100 Mb
3987 * 0x2 - advertise 1G
3988 * 0x4 - advertise 10G
3989 * 0x8 - advertise 10 Mb (yes, Mb)
3990 ************************************************************************/
3992 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3994 device_t dev = iflib_get_dev(adapter->ctx);
3995 struct ixgbe_hw *hw;
3996 ixgbe_link_speed speed = 0;
3997 ixgbe_link_speed link_caps = 0;
3998 s32 err = IXGBE_NOT_IMPLEMENTED;
3999 bool negotiate = FALSE;
4001 /* Checks to validate new value */
4002 if (adapter->advertise == advertise) /* no change */
4007 /* No speed changes for backplane media */
4008 if (hw->phy.media_type == ixgbe_media_type_backplane)
4011 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4012 (hw->phy.multispeed_fiber))) {
4013 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4017 if (advertise < 0x1 || advertise > 0xF) {
4018 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4022 if (hw->mac.ops.get_link_capabilities) {
4023 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4025 if (err != IXGBE_SUCCESS) {
4026 device_printf(dev, "Unable to determine supported advertise speeds\n");
4031 /* Set new value and report new advertised mode */
4032 if (advertise & 0x1) {
4033 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4034 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4037 speed |= IXGBE_LINK_SPEED_100_FULL;
4039 if (advertise & 0x2) {
4040 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4041 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4044 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4046 if (advertise & 0x4) {
4047 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4048 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4051 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4053 if (advertise & 0x8) {
4054 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4055 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4058 speed |= IXGBE_LINK_SPEED_10_FULL;
4061 hw->mac.autotry_restart = TRUE;
4062 hw->mac.ops.setup_link(hw, speed, TRUE);
4063 adapter->advertise = advertise;
4066 } /* ixgbe_set_advertise */
4068 /************************************************************************
4069 * ixgbe_get_advertise - Get current advertised speed settings
4071 * Formatted for sysctl usage.
4073 * 0x1 - advertise 100 Mb
4074 * 0x2 - advertise 1G
4075 * 0x4 - advertise 10G
4076 * 0x8 - advertise 10 Mb (yes, Mb)
4077 ************************************************************************/
4079 ixgbe_get_advertise(struct adapter *adapter)
4081 struct ixgbe_hw *hw = &adapter->hw;
4083 ixgbe_link_speed link_caps = 0;
4085 bool negotiate = FALSE;
4088 * Advertised speed means nothing unless it's copper or
4091 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4092 !(hw->phy.multispeed_fiber))
4095 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4096 if (err != IXGBE_SUCCESS)
4100 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4101 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
4102 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
4103 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
4106 } /* ixgbe_get_advertise */
4108 /************************************************************************
4109 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4112 * 0/1 - off / on (use default value of 1000)
4114 * Legal timer values are:
4115 * 50,100,250,500,1000,2000,5000,10000
4117 * Turning off interrupt moderation will also turn this off.
4118 ************************************************************************/
4120 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4122 struct adapter *adapter = (struct adapter *)arg1;
4123 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
4127 newval = adapter->dmac;
4128 error = sysctl_handle_16(oidp, &newval, 0, req);
4129 if ((error) || (req->newptr == NULL))
4138 /* Enable and use default */
4139 adapter->dmac = 1000;
4149 /* Legal values - allow */
4150 adapter->dmac = newval;
4153 /* Do nothing, illegal value */
4157 /* Re-initialize hardware if it's already running */
4158 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4162 } /* ixgbe_sysctl_dmac */
4165 /************************************************************************
4166 * ixgbe_sysctl_power_state
4168 * Sysctl to test power states
4170 * 0 - set device to D0
4171 * 3 - set device to D3
4172 * (none) - get current device power state
4173 ************************************************************************/
4175 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4177 struct adapter *adapter = (struct adapter *)arg1;
4178 device_t dev = adapter->dev;
4179 int curr_ps, new_ps, error = 0;
4181 curr_ps = new_ps = pci_get_powerstate(dev);
4183 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4184 if ((error) || (req->newptr == NULL))
4187 if (new_ps == curr_ps)
4190 if (new_ps == 3 && curr_ps == 0)
4191 error = DEVICE_SUSPEND(dev);
4192 else if (new_ps == 0 && curr_ps == 3)
4193 error = DEVICE_RESUME(dev);
4197 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4200 } /* ixgbe_sysctl_power_state */
4203 /************************************************************************
4204 * ixgbe_sysctl_wol_enable
4206 * Sysctl to enable/disable the WoL capability,
4207 * if supported by the adapter.
4212 ************************************************************************/
4214 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4216 struct adapter *adapter = (struct adapter *)arg1;
4217 struct ixgbe_hw *hw = &adapter->hw;
4218 int new_wol_enabled;
4221 new_wol_enabled = hw->wol_enabled;
4222 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4223 if ((error) || (req->newptr == NULL))
4225 new_wol_enabled = !!(new_wol_enabled);
4226 if (new_wol_enabled == hw->wol_enabled)
4229 if (new_wol_enabled > 0 && !adapter->wol_support)
4232 hw->wol_enabled = new_wol_enabled;
4235 } /* ixgbe_sysctl_wol_enable */
4237 /************************************************************************
4238 * ixgbe_sysctl_wufc - Wake Up Filter Control
4240 * Sysctl to enable/disable the types of packets that the
4241 * adapter will wake up on upon receipt.
4243 * 0x1 - Link Status Change
4244 * 0x2 - Magic Packet
4245 * 0x4 - Direct Exact
4246 * 0x8 - Directed Multicast
4248 * 0x20 - ARP/IPv4 Request Packet
4249 * 0x40 - Direct IPv4 Packet
4250 * 0x80 - Direct IPv6 Packet
4252 * Settings not listed above will cause the sysctl to return an error.
4253 ************************************************************************/
4255 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4257 struct adapter *adapter = (struct adapter *)arg1;
4261 new_wufc = adapter->wufc;
4263 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4264 if ((error) || (req->newptr == NULL))
4266 if (new_wufc == adapter->wufc)
4269 if (new_wufc & 0xffffff00)
4273 new_wufc |= (0xffffff & adapter->wufc);
4274 adapter->wufc = new_wufc;
4277 } /* ixgbe_sysctl_wufc */
4280 /************************************************************************
4281 * ixgbe_sysctl_print_rss_config
4282 ************************************************************************/
4284 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4286 struct adapter *adapter = (struct adapter *)arg1;
4287 struct ixgbe_hw *hw = &adapter->hw;
4288 device_t dev = adapter->dev;
4290 int error = 0, reta_size;
4293 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4295 device_printf(dev, "Could not allocate sbuf for output.\n");
4299 // TODO: use sbufs to make a string to print out
4300 /* Set multiplier for RETA setup and table size based on MAC */
4301 switch (adapter->hw.mac.type) {
4302 case ixgbe_mac_X550:
4303 case ixgbe_mac_X550EM_x:
4304 case ixgbe_mac_X550EM_a:
4312 /* Print out the redirection table */
4313 sbuf_cat(buf, "\n");
4314 for (int i = 0; i < reta_size; i++) {
4316 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4317 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4319 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4320 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4324 // TODO: print more config
4326 error = sbuf_finish(buf);
4328 device_printf(dev, "Error finishing sbuf: %d\n", error);
4333 } /* ixgbe_sysctl_print_rss_config */
4334 #endif /* IXGBE_DEBUG */
4336 /************************************************************************
4337 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4339 * For X552/X557-AT devices using an external PHY
4340 ************************************************************************/
4342 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4344 struct adapter *adapter = (struct adapter *)arg1;
4345 struct ixgbe_hw *hw = &adapter->hw;
4348 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4349 device_printf(iflib_get_dev(adapter->ctx),
4350 "Device has no supported external thermal sensor.\n");
4354 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4355 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4356 device_printf(iflib_get_dev(adapter->ctx),
4357 "Error reading from PHY's current temperature register\n");
4361 /* Shift temp for output */
4364 return (sysctl_handle_16(oidp, NULL, reg, req));
4365 } /* ixgbe_sysctl_phy_temp */
4367 /************************************************************************
4368 * ixgbe_sysctl_phy_overtemp_occurred
4370 * Reports (directly from the PHY) whether the current PHY
4371 * temperature is over the overtemp threshold.
4372 ************************************************************************/
4374 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4376 struct adapter *adapter = (struct adapter *)arg1;
4377 struct ixgbe_hw *hw = &adapter->hw;
4380 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4381 device_printf(iflib_get_dev(adapter->ctx),
4382 "Device has no supported external thermal sensor.\n");
4386 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4387 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4388 device_printf(iflib_get_dev(adapter->ctx),
4389 "Error reading from PHY's temperature status register\n");
4393 /* Get occurrence bit */
4394 reg = !!(reg & 0x4000);
4396 return (sysctl_handle_16(oidp, 0, reg, req));
4397 } /* ixgbe_sysctl_phy_overtemp_occurred */
4399 /************************************************************************
4400 * ixgbe_sysctl_eee_state
4402 * Sysctl to set EEE power saving feature
4406 * (none) - get current device EEE state
4407 ************************************************************************/
4409 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4411 struct adapter *adapter = (struct adapter *)arg1;
4412 device_t dev = adapter->dev;
4413 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
4414 int curr_eee, new_eee, error = 0;
4417 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4419 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4420 if ((error) || (req->newptr == NULL))
4424 if (new_eee == curr_eee)
4428 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4431 /* Bounds checking */
4432 if ((new_eee < 0) || (new_eee > 1))
4435 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
4437 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4441 /* Restart auto-neg */
4444 device_printf(dev, "New EEE state: %d\n", new_eee);
4446 /* Cache new value */
4448 adapter->feat_en |= IXGBE_FEATURE_EEE;
4450 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4453 } /* ixgbe_sysctl_eee_state */
4455 /************************************************************************
4456 * ixgbe_init_device_features
4457 ************************************************************************/
4459 ixgbe_init_device_features(struct adapter *adapter)
4461 adapter->feat_cap = IXGBE_FEATURE_NETMAP
4464 | IXGBE_FEATURE_MSIX
4465 | IXGBE_FEATURE_LEGACY_IRQ;
4467 /* Set capabilities first... */
4468 switch (adapter->hw.mac.type) {
4469 case ixgbe_mac_82598EB:
4470 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4471 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4473 case ixgbe_mac_X540:
4474 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4475 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4476 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4477 (adapter->hw.bus.func == 0))
4478 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4480 case ixgbe_mac_X550:
4481 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4482 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4483 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4485 case ixgbe_mac_X550EM_x:
4486 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4487 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4489 case ixgbe_mac_X550EM_a:
4490 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4491 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4492 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4493 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4494 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4495 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4496 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4499 case ixgbe_mac_82599EB:
4500 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4501 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4502 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4503 (adapter->hw.bus.func == 0))
4504 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4505 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4506 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4512 /* Enabled by default... */
4513 /* Fan failure detection */
4514 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4515 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4517 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4518 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4520 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4521 adapter->feat_en |= IXGBE_FEATURE_EEE;
4522 /* Thermal Sensor */
4523 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4524 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4526 /* Enabled via global sysctl... */
4528 if (ixgbe_enable_fdir) {
4529 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4530 adapter->feat_en |= IXGBE_FEATURE_FDIR;
4532 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4535 * Message Signal Interrupts - Extended (MSI-X)
4536 * Normal MSI is only enabled if MSI-X calls fail.
4538 if (!ixgbe_enable_msix)
4539 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4540 /* Receive-Side Scaling (RSS) */
4541 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4542 adapter->feat_en |= IXGBE_FEATURE_RSS;
4544 /* Disable features with unmet dependencies... */
4546 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4547 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4548 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4549 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4550 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4552 } /* ixgbe_init_device_features */
4554 /************************************************************************
4555 * ixgbe_check_fan_failure
4556 ************************************************************************/
4558 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4562 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4566 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4567 } /* ixgbe_check_fan_failure */