]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/if_ix.c
ixgbe: workaround errata about UDP frames with zero checksum
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / if_ix.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_rss.h"
38
39 #include "ixgbe.h"
40 #include "ixgbe_sriov.h"
41 #include "ifdi_if.h"
42
43 #include <net/netmap.h>
44 #include <dev/netmap/netmap_kern.h>
45
46 /************************************************************************
47  * Driver version
48  ************************************************************************/
49 char ixgbe_driver_version[] = "4.0.1-k";
50
51 /************************************************************************
52  * PCI Device ID Table
53  *
54  *   Used by probe to select devices to load on
55  *   Last field stores an index into ixgbe_strings
56  *   Last entry must be all 0s
57  *
58  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59  ************************************************************************/
60 static pci_vendor_info_t ixgbe_vendor_info_array[] =
61 {
62   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) 82598EB AF (Dual Fiber)"),
63   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) 82598EB AF (Fiber)"),
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) 82598EB AT (CX4)"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) 82598EB AT"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) 82598EB AT2"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) 82598"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) 82598EB AF DA (Dual Fiber)"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) 82598EB AT (Dual CX4)"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) 82598EB AF (Dual Fiber LR)"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) 82598EB AF (Dual Fiber SR)"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) 82598EB LOM"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) X520 82599 (KX4)"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) X520 82599 (KX4 Mezzanine)"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) X520 82599ES (SFI/SFP+)"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) X520 82599 (XAUI/BX4)"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) X520 82599 (Dual CX4)"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) X520-T 82599 LOM"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) X520 82599 (Combined Backplane)"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) X520 82599 (Backplane w/FCoE)"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) X520 82599 (Dual SFP+)"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) X520-1 82599EN (SFP+)"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) X520-4 82599 (Quad SFP+)"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) X520-Q1 82599 (QSFP+)"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) X540-AT2"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) X540-T1"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) X550-T2"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) X552 (KR Backplane)"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) X552 (KX4 Backplane)"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) X552/X557-AT (10GBASE-T)"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) X552 (1000BASE-T)"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
106         /* required last entry */
107   PVID_END
108 };
109
110 static void *ixgbe_register(device_t);
111 static int  ixgbe_if_attach_pre(if_ctx_t);
112 static int  ixgbe_if_attach_post(if_ctx_t);
113 static int  ixgbe_if_detach(if_ctx_t);
114 static int  ixgbe_if_shutdown(if_ctx_t);
115 static int  ixgbe_if_suspend(if_ctx_t);
116 static int  ixgbe_if_resume(if_ctx_t);
117
118 static void ixgbe_if_stop(if_ctx_t);
119 void ixgbe_if_enable_intr(if_ctx_t);
120 static void ixgbe_if_disable_intr(if_ctx_t);
121 static void ixgbe_link_intr_enable(if_ctx_t);
122 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
124 static int  ixgbe_if_media_change(if_ctx_t);
125 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
126 static int  ixgbe_if_mtu_set(if_ctx_t, uint32_t);
127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
128 static void ixgbe_if_multi_set(if_ctx_t);
129 static int  ixgbe_if_promisc_set(if_ctx_t, int);
130 static int  ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
131 static int  ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
132 static void ixgbe_if_queues_free(if_ctx_t);
133 static void ixgbe_if_timer(if_ctx_t, uint16_t);
134 static void ixgbe_if_update_admin_status(if_ctx_t);
135 static void ixgbe_if_vlan_register(if_ctx_t, u16);
136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
137 static int  ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
139 int ixgbe_intr(void *);
140
141 /************************************************************************
142  * Function prototypes
143  ************************************************************************/
144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
145
146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
148 static void ixgbe_add_device_sysctls(if_ctx_t);
149 static int  ixgbe_allocate_pci_resources(if_ctx_t);
150 static int  ixgbe_setup_low_power_mode(if_ctx_t);
151
152 static void ixgbe_config_dmac(struct ixgbe_softc *);
153 static void ixgbe_configure_ivars(struct ixgbe_softc *);
154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
155 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
156 static bool ixgbe_sfp_probe(if_ctx_t);
157
158 static void ixgbe_free_pci_resources(if_ctx_t);
159
160 static int  ixgbe_msix_link(void *);
161 static int  ixgbe_msix_que(void *);
162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
163 static void ixgbe_initialize_receive_units(if_ctx_t);
164 static void ixgbe_initialize_transmit_units(if_ctx_t);
165
166 static int  ixgbe_setup_interface(if_ctx_t);
167 static void ixgbe_init_device_features(struct ixgbe_softc *);
168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
170 static void ixgbe_print_fw_version(if_ctx_t);
171 static void ixgbe_add_media_types(if_ctx_t);
172 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
173 static void ixgbe_config_link(if_ctx_t);
174 static void ixgbe_get_slot_info(struct ixgbe_softc *);
175 static void ixgbe_check_wol_support(struct ixgbe_softc *);
176 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
177 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
178
179 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
180 static int  ixgbe_set_flowcntl(struct ixgbe_softc *, int);
181 static int  ixgbe_set_advertise(struct ixgbe_softc *, int);
182 static int  ixgbe_get_advertise(struct ixgbe_softc *);
183 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
184 static void ixgbe_config_gpie(struct ixgbe_softc *);
185 static void ixgbe_config_delay_values(struct ixgbe_softc *);
186
187 /* Sysctl handlers */
188 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
189 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
190 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
191 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
192 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
195 #ifdef IXGBE_DEBUG
196 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
198 #endif
199 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
200 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
201 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
202 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
206
207 /* Deferred interrupt tasklets */
208 static void ixgbe_handle_msf(void *);
209 static void ixgbe_handle_mod(void *);
210 static void ixgbe_handle_phy(void *);
211
212 /************************************************************************
213  *  FreeBSD Device Interface Entry Points
214  ************************************************************************/
215 static device_method_t ix_methods[] = {
216         /* Device interface */
217         DEVMETHOD(device_register, ixgbe_register),
218         DEVMETHOD(device_probe, iflib_device_probe),
219         DEVMETHOD(device_attach, iflib_device_attach),
220         DEVMETHOD(device_detach, iflib_device_detach),
221         DEVMETHOD(device_shutdown, iflib_device_shutdown),
222         DEVMETHOD(device_suspend, iflib_device_suspend),
223         DEVMETHOD(device_resume, iflib_device_resume),
224 #ifdef PCI_IOV
225         DEVMETHOD(pci_iov_init, iflib_device_iov_init),
226         DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
227         DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
228 #endif /* PCI_IOV */
229         DEVMETHOD_END
230 };
231
232 static driver_t ix_driver = {
233         "ix", ix_methods, sizeof(struct ixgbe_softc),
234 };
235
236 devclass_t ix_devclass;
237 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
238 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
239 MODULE_DEPEND(ix, pci, 1, 1, 1);
240 MODULE_DEPEND(ix, ether, 1, 1, 1);
241 MODULE_DEPEND(ix, iflib, 1, 1, 1);
242
243 static device_method_t ixgbe_if_methods[] = {
244         DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
245         DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
246         DEVMETHOD(ifdi_detach, ixgbe_if_detach),
247         DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
248         DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
249         DEVMETHOD(ifdi_resume, ixgbe_if_resume),
250         DEVMETHOD(ifdi_init, ixgbe_if_init),
251         DEVMETHOD(ifdi_stop, ixgbe_if_stop),
252         DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
253         DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
254         DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
255         DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
256         DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
257         DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258         DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
259         DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
260         DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
261         DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
262         DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
263         DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
264         DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
265         DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
266         DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
267         DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
268         DEVMETHOD(ifdi_timer, ixgbe_if_timer),
269         DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
270         DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
271         DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
272         DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
273         DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
274 #ifdef PCI_IOV
275         DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
276         DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
277         DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
278 #endif /* PCI_IOV */
279         DEVMETHOD_END
280 };
281
282 /*
283  * TUNEABLE PARAMETERS:
284  */
285
286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
287 static driver_t ixgbe_if_driver = {
288   "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
289 };
290
291 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
292 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
293     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
294
295 /* Flow control setting, default to full */
296 static int ixgbe_flow_control = ixgbe_fc_full;
297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
298     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
299
300 /* Advertise Speed, default to 0 (auto) */
301 static int ixgbe_advertise_speed = 0;
302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
303     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
304
305 /*
306  * Smart speed setting, default to on
307  * this only works as a compile option
308  * right now as its during attach, set
309  * this to 'ixgbe_smart_speed_off' to
310  * disable.
311  */
312 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
313
314 /*
315  * MSI-X should be the default for best performance,
316  * but this allows it to be forced off for testing.
317  */
318 static int ixgbe_enable_msix = 1;
319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
320     "Enable MSI-X interrupts");
321
322 /*
323  * Defining this on will allow the use
324  * of unsupported SFP+ modules, note that
325  * doing so you are on your own :)
326  */
327 static int allow_unsupported_sfp = false;
328 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
329     &allow_unsupported_sfp, 0,
330     "Allow unsupported SFP modules...use at your own risk");
331
332 /*
333  * Not sure if Flow Director is fully baked,
334  * so we'll default to turning it off.
335  */
336 static int ixgbe_enable_fdir = 0;
337 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
338     "Enable Flow Director");
339
340 /* Receive-Side Scaling */
341 static int ixgbe_enable_rss = 1;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
343     "Enable Receive-Side Scaling (RSS)");
344
345 #if 0
346 /* Keep running tab on them for sanity check */
347 static int ixgbe_total_ports;
348 #endif
349
350 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
351
352 /*
353  * For Flow Director: this is the number of TX packets we sample
354  * for the filter pool, this means every 20th packet will be probed.
355  *
356  * This feature can be disabled by setting this to 0.
357  */
358 static int atr_sample_rate = 20;
359
360 extern struct if_txrx ixgbe_txrx;
361
362 static struct if_shared_ctx ixgbe_sctx_init = {
363         .isc_magic = IFLIB_MAGIC,
364         .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
365         .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
366         .isc_tx_maxsegsize = PAGE_SIZE,
367         .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
368         .isc_tso_maxsegsize = PAGE_SIZE,
369         .isc_rx_maxsize = PAGE_SIZE*4,
370         .isc_rx_nsegments = 1,
371         .isc_rx_maxsegsize = PAGE_SIZE*4,
372         .isc_nfl = 1,
373         .isc_ntxqs = 1,
374         .isc_nrxqs = 1,
375
376         .isc_admin_intrcnt = 1,
377         .isc_vendor_info = ixgbe_vendor_info_array,
378         .isc_driver_version = ixgbe_driver_version,
379         .isc_driver = &ixgbe_if_driver,
380         .isc_flags = IFLIB_TSO_INIT_IP,
381
382         .isc_nrxd_min = {MIN_RXD},
383         .isc_ntxd_min = {MIN_TXD},
384         .isc_nrxd_max = {MAX_RXD},
385         .isc_ntxd_max = {MAX_TXD},
386         .isc_nrxd_default = {DEFAULT_RXD},
387         .isc_ntxd_default = {DEFAULT_TXD},
388 };
389
390 /************************************************************************
391  * ixgbe_if_tx_queues_alloc
392  ************************************************************************/
393 static int
394 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
395     int ntxqs, int ntxqsets)
396 {
397         struct ixgbe_softc *sc = iflib_get_softc(ctx);
398         if_softc_ctx_t     scctx = sc->shared;
399         struct ix_tx_queue *que;
400         int                i, j, error;
401
402         MPASS(sc->num_tx_queues > 0);
403         MPASS(sc->num_tx_queues == ntxqsets);
404         MPASS(ntxqs == 1);
405
406         /* Allocate queue structure memory */
407         sc->tx_queues =
408             (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
409                                          M_IXGBE, M_NOWAIT | M_ZERO);
410         if (!sc->tx_queues) {
411                 device_printf(iflib_get_dev(ctx),
412                     "Unable to allocate TX ring memory\n");
413                 return (ENOMEM);
414         }
415
416         for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
417                 struct tx_ring *txr = &que->txr;
418
419                 /* In case SR-IOV is enabled, align the index properly */
420                 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
421                     i);
422
423                 txr->sc = que->sc = sc;
424
425                 /* Allocate report status array */
426                 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
427                 if (txr->tx_rsq == NULL) {
428                         error = ENOMEM;
429                         goto fail;
430                 }
431                 for (j = 0; j < scctx->isc_ntxd[0]; j++)
432                         txr->tx_rsq[j] = QIDX_INVALID;
433                 /* get the virtual and physical address of the hardware queues */
434                 txr->tail = IXGBE_TDT(txr->me);
435                 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
436                 txr->tx_paddr = paddrs[i];
437
438                 txr->bytes = 0;
439                 txr->total_packets = 0;
440
441                 /* Set the rate at which we sample packets */
442                 if (sc->feat_en & IXGBE_FEATURE_FDIR)
443                         txr->atr_sample = atr_sample_rate;
444
445         }
446
447         device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
448             sc->num_tx_queues);
449
450         return (0);
451
452 fail:
453         ixgbe_if_queues_free(ctx);
454
455         return (error);
456 } /* ixgbe_if_tx_queues_alloc */
457
458 /************************************************************************
459  * ixgbe_if_rx_queues_alloc
460  ************************************************************************/
461 static int
462 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
463     int nrxqs, int nrxqsets)
464 {
465         struct ixgbe_softc     *sc = iflib_get_softc(ctx);
466         struct ix_rx_queue *que;
467         int                i;
468
469         MPASS(sc->num_rx_queues > 0);
470         MPASS(sc->num_rx_queues == nrxqsets);
471         MPASS(nrxqs == 1);
472
473         /* Allocate queue structure memory */
474         sc->rx_queues =
475             (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
476                                          M_IXGBE, M_NOWAIT | M_ZERO);
477         if (!sc->rx_queues) {
478                 device_printf(iflib_get_dev(ctx),
479                     "Unable to allocate TX ring memory\n");
480                 return (ENOMEM);
481         }
482
483         for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
484                 struct rx_ring *rxr = &que->rxr;
485
486                 /* In case SR-IOV is enabled, align the index properly */
487                 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
488                     i);
489
490                 rxr->sc = que->sc = sc;
491
492                 /* get the virtual and physical address of the hw queues */
493                 rxr->tail = IXGBE_RDT(rxr->me);
494                 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
495                 rxr->rx_paddr = paddrs[i];
496                 rxr->bytes = 0;
497                 rxr->que = que;
498         }
499
500         device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
501             sc->num_rx_queues);
502
503         return (0);
504 } /* ixgbe_if_rx_queues_alloc */
505
506 /************************************************************************
507  * ixgbe_if_queues_free
508  ************************************************************************/
509 static void
510 ixgbe_if_queues_free(if_ctx_t ctx)
511 {
512         struct ixgbe_softc     *sc = iflib_get_softc(ctx);
513         struct ix_tx_queue *tx_que = sc->tx_queues;
514         struct ix_rx_queue *rx_que = sc->rx_queues;
515         int                i;
516
517         if (tx_que != NULL) {
518                 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
519                         struct tx_ring *txr = &tx_que->txr;
520                         if (txr->tx_rsq == NULL)
521                                 break;
522
523                         free(txr->tx_rsq, M_IXGBE);
524                         txr->tx_rsq = NULL;
525                 }
526
527                 free(sc->tx_queues, M_IXGBE);
528                 sc->tx_queues = NULL;
529         }
530         if (rx_que != NULL) {
531                 free(sc->rx_queues, M_IXGBE);
532                 sc->rx_queues = NULL;
533         }
534 } /* ixgbe_if_queues_free */
535
536 /************************************************************************
537  * ixgbe_initialize_rss_mapping
538  ************************************************************************/
539 static void
540 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
541 {
542         struct ixgbe_hw *hw = &sc->hw;
543         u32             reta = 0, mrqc, rss_key[10];
544         int             queue_id, table_size, index_mult;
545         int             i, j;
546         u32             rss_hash_config;
547
548         if (sc->feat_en & IXGBE_FEATURE_RSS) {
549                 /* Fetch the configured RSS key */
550                 rss_getkey((uint8_t *)&rss_key);
551         } else {
552                 /* set up random bits */
553                 arc4rand(&rss_key, sizeof(rss_key), 0);
554         }
555
556         /* Set multiplier for RETA setup and table size based on MAC */
557         index_mult = 0x1;
558         table_size = 128;
559         switch (sc->hw.mac.type) {
560         case ixgbe_mac_82598EB:
561                 index_mult = 0x11;
562                 break;
563         case ixgbe_mac_X550:
564         case ixgbe_mac_X550EM_x:
565         case ixgbe_mac_X550EM_a:
566                 table_size = 512;
567                 break;
568         default:
569                 break;
570         }
571
572         /* Set up the redirection table */
573         for (i = 0, j = 0; i < table_size; i++, j++) {
574                 if (j == sc->num_rx_queues)
575                         j = 0;
576
577                 if (sc->feat_en & IXGBE_FEATURE_RSS) {
578                         /*
579                          * Fetch the RSS bucket id for the given indirection
580                          * entry. Cap it at the number of configured buckets
581                          * (which is num_rx_queues.)
582                          */
583                         queue_id = rss_get_indirection_to_bucket(i);
584                         queue_id = queue_id % sc->num_rx_queues;
585                 } else
586                         queue_id = (j * index_mult);
587
588                 /*
589                  * The low 8 bits are for hash value (n+0);
590                  * The next 8 bits are for hash value (n+1), etc.
591                  */
592                 reta = reta >> 8;
593                 reta = reta | (((uint32_t)queue_id) << 24);
594                 if ((i & 3) == 3) {
595                         if (i < 128)
596                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
597                         else
598                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
599                                     reta);
600                         reta = 0;
601                 }
602         }
603
604         /* Now fill our hash function seeds */
605         for (i = 0; i < 10; i++)
606                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
607
608         /* Perform hash on these packet types */
609         if (sc->feat_en & IXGBE_FEATURE_RSS)
610                 rss_hash_config = rss_gethashconfig();
611         else {
612                 /*
613                  * Disable UDP - IP fragments aren't currently being handled
614                  * and so we end up with a mix of 2-tuple and 4-tuple
615                  * traffic.
616                  */
617                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
618                                 | RSS_HASHTYPE_RSS_TCP_IPV4
619                                 | RSS_HASHTYPE_RSS_IPV6
620                                 | RSS_HASHTYPE_RSS_TCP_IPV6
621                                 | RSS_HASHTYPE_RSS_IPV6_EX
622                                 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
623         }
624
625         mrqc = IXGBE_MRQC_RSSEN;
626         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
627                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
628         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
629                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
630         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
631                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
632         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
633                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
634         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
635                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
636         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
637                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
638         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
639                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
640         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
641                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
642         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
643                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
644         mrqc |= ixgbe_get_mrqc(sc->iov_mode);
645         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
646 } /* ixgbe_initialize_rss_mapping */
647
648 /************************************************************************
649  * ixgbe_initialize_receive_units - Setup receive registers and features.
650  ************************************************************************/
651 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
652
653 static void
654 ixgbe_initialize_receive_units(if_ctx_t ctx)
655 {
656         struct ixgbe_softc     *sc = iflib_get_softc(ctx);
657         if_softc_ctx_t     scctx = sc->shared;
658         struct ixgbe_hw    *hw = &sc->hw;
659         struct ifnet       *ifp = iflib_get_ifp(ctx);
660         struct ix_rx_queue *que;
661         int                i, j;
662         u32                bufsz, fctrl, srrctl, rxcsum;
663         u32                hlreg;
664
665         /*
666          * Make sure receives are disabled while
667          * setting up the descriptor ring
668          */
669         ixgbe_disable_rx(hw);
670
671         /* Enable broadcasts */
672         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
673         fctrl |= IXGBE_FCTRL_BAM;
674         if (sc->hw.mac.type == ixgbe_mac_82598EB) {
675                 fctrl |= IXGBE_FCTRL_DPF;
676                 fctrl |= IXGBE_FCTRL_PMCF;
677         }
678         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
679
680         /* Set for Jumbo Frames? */
681         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
682         if (ifp->if_mtu > ETHERMTU)
683                 hlreg |= IXGBE_HLREG0_JUMBOEN;
684         else
685                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
686         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
687
688         bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
689             IXGBE_SRRCTL_BSIZEPKT_SHIFT;
690
691         /* Setup the Base and Length of the Rx Descriptor Ring */
692         for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
693                 struct rx_ring *rxr = &que->rxr;
694                 u64            rdba = rxr->rx_paddr;
695
696                 j = rxr->me;
697
698                 /* Setup the Base and Length of the Rx Descriptor Ring */
699                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
700                     (rdba & 0x00000000ffffffffULL));
701                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
702                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
703                      scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
704
705                 /* Set up the SRRCTL register */
706                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
707                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
708                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
709                 srrctl |= bufsz;
710                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
711
712                 /*
713                  * Set DROP_EN iff we have no flow control and >1 queue.
714                  * Note that srrctl was cleared shortly before during reset,
715                  * so we do not need to clear the bit, but do it just in case
716                  * this code is moved elsewhere.
717                  */
718                 if (sc->num_rx_queues > 1 &&
719                     sc->hw.fc.requested_mode == ixgbe_fc_none) {
720                         srrctl |= IXGBE_SRRCTL_DROP_EN;
721                 } else {
722                         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
723                 }
724
725                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
726
727                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
728                 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
729                 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
730
731                 /* Set the driver rx tail address */
732                 rxr->tail =  IXGBE_RDT(rxr->me);
733         }
734
735         if (sc->hw.mac.type != ixgbe_mac_82598EB) {
736                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
737                             | IXGBE_PSRTYPE_UDPHDR
738                             | IXGBE_PSRTYPE_IPV4HDR
739                             | IXGBE_PSRTYPE_IPV6HDR;
740                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
741         }
742
743         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
744
745         ixgbe_initialize_rss_mapping(sc);
746
747         if (sc->num_rx_queues > 1) {
748                 /* RSS and RX IPP Checksum are mutually exclusive */
749                 rxcsum |= IXGBE_RXCSUM_PCSD;
750         }
751
752         if (ifp->if_capenable & IFCAP_RXCSUM)
753                 rxcsum |= IXGBE_RXCSUM_PCSD;
754
755         /* This is useful for calculating UDP/IP fragment checksums */
756         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
757                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
758
759         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
760
761 } /* ixgbe_initialize_receive_units */
762
763 /************************************************************************
764  * ixgbe_initialize_transmit_units - Enable transmit units.
765  ************************************************************************/
766 static void
767 ixgbe_initialize_transmit_units(if_ctx_t ctx)
768 {
769         struct ixgbe_softc     *sc = iflib_get_softc(ctx);
770         struct ixgbe_hw    *hw = &sc->hw;
771         if_softc_ctx_t     scctx = sc->shared;
772         struct ix_tx_queue *que;
773         int i;
774
775         /* Setup the Base and Length of the Tx Descriptor Ring */
776         for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
777             i++, que++) {
778                 struct tx_ring     *txr = &que->txr;
779                 u64 tdba = txr->tx_paddr;
780                 u32 txctrl = 0;
781                 int j = txr->me;
782
783                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
784                     (tdba & 0x00000000ffffffffULL));
785                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
786                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
787                     scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
788
789                 /* Setup the HW Tx Head and Tail descriptor pointers */
790                 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
791                 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
792
793                 /* Cache the tail address */
794                 txr->tail = IXGBE_TDT(txr->me);
795
796                 txr->tx_rs_cidx = txr->tx_rs_pidx;
797                 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
798                 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
799                         txr->tx_rsq[k] = QIDX_INVALID;
800
801                 /* Disable Head Writeback */
802                 /*
803                  * Note: for X550 series devices, these registers are actually
804                  * prefixed with TPH_ isntead of DCA_, but the addresses and
805                  * fields remain the same.
806                  */
807                 switch (hw->mac.type) {
808                 case ixgbe_mac_82598EB:
809                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
810                         break;
811                 default:
812                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
813                         break;
814                 }
815                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
816                 switch (hw->mac.type) {
817                 case ixgbe_mac_82598EB:
818                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
819                         break;
820                 default:
821                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
822                         break;
823                 }
824
825         }
826
827         if (hw->mac.type != ixgbe_mac_82598EB) {
828                 u32 dmatxctl, rttdcs;
829
830                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
831                 dmatxctl |= IXGBE_DMATXCTL_TE;
832                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
833                 /* Disable arbiter to set MTQC */
834                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
835                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
836                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
837                 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
838                     ixgbe_get_mtqc(sc->iov_mode));
839                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
840                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
841         }
842
843 } /* ixgbe_initialize_transmit_units */
844
845 /************************************************************************
846  * ixgbe_register
847  ************************************************************************/
848 static void *
849 ixgbe_register(device_t dev)
850 {
851         return (&ixgbe_sctx_init);
852 } /* ixgbe_register */
853
854 /************************************************************************
855  * ixgbe_if_attach_pre - Device initialization routine, part 1
856  *
857  *   Called when the driver is being loaded.
858  *   Identifies the type of hardware, initializes the hardware,
859  *   and initializes iflib structures.
860  *
861  *   return 0 on success, positive on failure
862  ************************************************************************/
863 static int
864 ixgbe_if_attach_pre(if_ctx_t ctx)
865 {
866         struct ixgbe_softc  *sc;
867         device_t        dev;
868         if_softc_ctx_t  scctx;
869         struct ixgbe_hw *hw;
870         int             error = 0;
871         u32             ctrl_ext;
872
873         INIT_DEBUGOUT("ixgbe_attach: begin");
874
875         /* Allocate, clear, and link in our adapter structure */
876         dev = iflib_get_dev(ctx);
877         sc = iflib_get_softc(ctx);
878         sc->hw.back = sc;
879         sc->ctx = ctx;
880         sc->dev = dev;
881         scctx = sc->shared = iflib_get_softc_ctx(ctx);
882         sc->media = iflib_get_media(ctx);
883         hw = &sc->hw;
884
885         /* Determine hardware revision */
886         hw->vendor_id = pci_get_vendor(dev);
887         hw->device_id = pci_get_device(dev);
888         hw->revision_id = pci_get_revid(dev);
889         hw->subsystem_vendor_id = pci_get_subvendor(dev);
890         hw->subsystem_device_id = pci_get_subdevice(dev);
891
892         /* Do base PCI setup - map BAR0 */
893         if (ixgbe_allocate_pci_resources(ctx)) {
894                 device_printf(dev, "Allocation of PCI resources failed\n");
895                 return (ENXIO);
896         }
897
898         /* let hardware know driver is loaded */
899         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
900         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
901         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
902
903         /*
904          * Initialize the shared code
905          */
906         if (ixgbe_init_shared_code(hw) != 0) {
907                 device_printf(dev, "Unable to initialize the shared code\n");
908                 error = ENXIO;
909                 goto err_pci;
910         }
911
912         if (hw->mbx.ops.init_params)
913                 hw->mbx.ops.init_params(hw);
914
915         hw->allow_unsupported_sfp = allow_unsupported_sfp;
916
917         if (hw->mac.type != ixgbe_mac_82598EB)
918                 hw->phy.smart_speed = ixgbe_smart_speed;
919
920         ixgbe_init_device_features(sc);
921
922         /* Enable WoL (if supported) */
923         ixgbe_check_wol_support(sc);
924
925         /* Verify adapter fan is still functional (if applicable) */
926         if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
927                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
928                 ixgbe_check_fan_failure(sc, esdp, false);
929         }
930
931         /* Ensure SW/FW semaphore is free */
932         ixgbe_init_swfw_semaphore(hw);
933
934         /* Set an initial default flow control value */
935         hw->fc.requested_mode = ixgbe_flow_control;
936
937         hw->phy.reset_if_overtemp = true;
938         error = ixgbe_reset_hw(hw);
939         hw->phy.reset_if_overtemp = false;
940         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
941                 /*
942                  * No optics in this port, set up
943                  * so the timer routine will probe
944                  * for later insertion.
945                  */
946                 sc->sfp_probe = true;
947                 error = 0;
948         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
949                 device_printf(dev, "Unsupported SFP+ module detected!\n");
950                 error = EIO;
951                 goto err_pci;
952         } else if (error) {
953                 device_printf(dev, "Hardware initialization failed\n");
954                 error = EIO;
955                 goto err_pci;
956         }
957
958         /* Make sure we have a good EEPROM before we read from it */
959         if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
960                 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
961                 error = EIO;
962                 goto err_pci;
963         }
964
965         error = ixgbe_start_hw(hw);
966         switch (error) {
967         case IXGBE_ERR_EEPROM_VERSION:
968                 device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
969                 break;
970         case IXGBE_ERR_SFP_NOT_SUPPORTED:
971                 device_printf(dev, "Unsupported SFP+ Module\n");
972                 error = EIO;
973                 goto err_pci;
974         case IXGBE_ERR_SFP_NOT_PRESENT:
975                 device_printf(dev, "No SFP+ Module found\n");
976                 /* falls thru */
977         default:
978                 break;
979         }
980
981         /* Most of the iflib initialization... */
982
983         iflib_set_mac(ctx, hw->mac.addr);
984         switch (sc->hw.mac.type) {
985         case ixgbe_mac_X550:
986         case ixgbe_mac_X550EM_x:
987         case ixgbe_mac_X550EM_a:
988                 scctx->isc_rss_table_size = 512;
989                 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
990                 break;
991         default:
992                 scctx->isc_rss_table_size = 128;
993                 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
994         }
995
996         /* Allow legacy interrupts */
997         ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
998
999         scctx->isc_txqsizes[0] =
1000             roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1001             sizeof(u32), DBA_ALIGN),
1002         scctx->isc_rxqsizes[0] =
1003             roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1004             DBA_ALIGN);
1005
1006         /* XXX */
1007         scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1008             CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1009         if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1010                 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1011         } else {
1012                 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1013                 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1014         }
1015
1016         scctx->isc_msix_bar = pci_msix_table_bar(dev);
1017
1018         scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1019         scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1020         scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1021
1022         scctx->isc_txrx = &ixgbe_txrx;
1023
1024         scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1025
1026         return (0);
1027
1028 err_pci:
1029         ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1030         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1031         IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1032         ixgbe_free_pci_resources(ctx);
1033
1034         return (error);
1035 } /* ixgbe_if_attach_pre */
1036
1037  /*********************************************************************
1038  * ixgbe_if_attach_post - Device initialization routine, part 2
1039  *
1040  *   Called during driver load, but after interrupts and
1041  *   resources have been allocated and configured.
1042  *   Sets up some data structures not relevant to iflib.
1043  *
1044  *   return 0 on success, positive on failure
1045  *********************************************************************/
1046 static int
1047 ixgbe_if_attach_post(if_ctx_t ctx)
1048 {
1049         device_t dev;
1050         struct ixgbe_softc  *sc;
1051         struct ixgbe_hw *hw;
1052         int             error = 0;
1053
1054         dev = iflib_get_dev(ctx);
1055         sc = iflib_get_softc(ctx);
1056         hw = &sc->hw;
1057
1058
1059         if (sc->intr_type == IFLIB_INTR_LEGACY &&
1060                 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1061                 device_printf(dev, "Device does not support legacy interrupts");
1062                 error = ENXIO;
1063                 goto err;
1064         }
1065
1066         /* Allocate multicast array memory. */
1067         sc->mta = malloc(sizeof(*sc->mta) *
1068                               MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1069         if (sc->mta == NULL) {
1070                 device_printf(dev, "Can not allocate multicast setup array\n");
1071                 error = ENOMEM;
1072                 goto err;
1073         }
1074
1075         /* hw.ix defaults init */
1076         ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1077
1078         /* Enable the optics for 82599 SFP+ fiber */
1079         ixgbe_enable_tx_laser(hw);
1080
1081         /* Enable power to the phy. */
1082         ixgbe_set_phy_power(hw, true);
1083
1084         ixgbe_initialize_iov(sc);
1085
1086         error = ixgbe_setup_interface(ctx);
1087         if (error) {
1088                 device_printf(dev, "Interface setup failed: %d\n", error);
1089                 goto err;
1090         }
1091
1092         ixgbe_if_update_admin_status(ctx);
1093
1094         /* Initialize statistics */
1095         ixgbe_update_stats_counters(sc);
1096         ixgbe_add_hw_stats(sc);
1097
1098         /* Check PCIE slot type/speed/width */
1099         ixgbe_get_slot_info(sc);
1100
1101         /*
1102          * Do time init and sysctl init here, but
1103          * only on the first port of a bypass sc.
1104          */
1105         ixgbe_bypass_init(sc);
1106
1107         /* Display NVM and Option ROM versions */
1108         ixgbe_print_fw_version(ctx);
1109
1110         /* Set an initial dmac value */
1111         sc->dmac = 0;
1112         /* Set initial advertised speeds (if applicable) */
1113         sc->advertise = ixgbe_get_advertise(sc);
1114
1115         if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1116                 ixgbe_define_iov_schemas(dev, &error);
1117
1118         /* Add sysctls */
1119         ixgbe_add_device_sysctls(ctx);
1120
1121         return (0);
1122 err:
1123         return (error);
1124 } /* ixgbe_if_attach_post */
1125
1126 /************************************************************************
1127  * ixgbe_check_wol_support
1128  *
1129  *   Checks whether the adapter's ports are capable of
1130  *   Wake On LAN by reading the adapter's NVM.
1131  *
1132  *   Sets each port's hw->wol_enabled value depending
1133  *   on the value read here.
1134  ************************************************************************/
1135 static void
1136 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1137 {
1138         struct ixgbe_hw *hw = &sc->hw;
1139         u16             dev_caps = 0;
1140
1141         /* Find out WoL support for port */
1142         sc->wol_support = hw->wol_enabled = 0;
1143         ixgbe_get_device_caps(hw, &dev_caps);
1144         if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1145             ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1146              hw->bus.func == 0))
1147                 sc->wol_support = hw->wol_enabled = 1;
1148
1149         /* Save initial wake up filter configuration */
1150         sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1151
1152         return;
1153 } /* ixgbe_check_wol_support */
1154
1155 /************************************************************************
1156  * ixgbe_setup_interface
1157  *
1158  *   Setup networking device structure and register an interface.
1159  ************************************************************************/
1160 static int
1161 ixgbe_setup_interface(if_ctx_t ctx)
1162 {
1163         struct ifnet   *ifp = iflib_get_ifp(ctx);
1164         struct ixgbe_softc *sc = iflib_get_softc(ctx);
1165
1166         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1167
1168         if_setbaudrate(ifp, IF_Gbps(10));
1169
1170         sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1171
1172         sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1173
1174         ixgbe_add_media_types(ctx);
1175
1176         /* Autoselect media by default */
1177         ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1178
1179         return (0);
1180 } /* ixgbe_setup_interface */
1181
1182 /************************************************************************
1183  * ixgbe_if_get_counter
1184  ************************************************************************/
1185 static uint64_t
1186 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1187 {
1188         struct ixgbe_softc *sc = iflib_get_softc(ctx);
1189         if_t           ifp = iflib_get_ifp(ctx);
1190
1191         switch (cnt) {
1192         case IFCOUNTER_IPACKETS:
1193                 return (sc->ipackets);
1194         case IFCOUNTER_OPACKETS:
1195                 return (sc->opackets);
1196         case IFCOUNTER_IBYTES:
1197                 return (sc->ibytes);
1198         case IFCOUNTER_OBYTES:
1199                 return (sc->obytes);
1200         case IFCOUNTER_IMCASTS:
1201                 return (sc->imcasts);
1202         case IFCOUNTER_OMCASTS:
1203                 return (sc->omcasts);
1204         case IFCOUNTER_COLLISIONS:
1205                 return (0);
1206         case IFCOUNTER_IQDROPS:
1207                 return (sc->iqdrops);
1208         case IFCOUNTER_OQDROPS:
1209                 return (0);
1210         case IFCOUNTER_IERRORS:
1211                 return (sc->ierrors);
1212         default:
1213                 return (if_get_counter_default(ifp, cnt));
1214         }
1215 } /* ixgbe_if_get_counter */
1216
1217 /************************************************************************
1218  * ixgbe_if_i2c_req
1219  ************************************************************************/
1220 static int
1221 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1222 {
1223         struct ixgbe_softc              *sc = iflib_get_softc(ctx);
1224         struct ixgbe_hw         *hw = &sc->hw;
1225         int                     i;
1226
1227
1228         if (hw->phy.ops.read_i2c_byte == NULL)
1229                 return (ENXIO);
1230         for (i = 0; i < req->len; i++)
1231                 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1232                     req->dev_addr, &req->data[i]);
1233         return (0);
1234 } /* ixgbe_if_i2c_req */
1235
1236 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1237  * @ctx: iflib context
1238  * @event: event code to check
1239  *
1240  * Defaults to returning true for unknown events.
1241  *
1242  * @returns true if iflib needs to reinit the interface
1243  */
1244 static bool
1245 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1246 {
1247         switch (event) {
1248         case IFLIB_RESTART_VLAN_CONFIG:
1249                 return (false);
1250         default:
1251                 return (true);
1252         }
1253 }
1254
1255 /************************************************************************
1256  * ixgbe_add_media_types
1257  ************************************************************************/
1258 static void
1259 ixgbe_add_media_types(if_ctx_t ctx)
1260 {
1261         struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1262         struct ixgbe_hw *hw = &sc->hw;
1263         device_t        dev = iflib_get_dev(ctx);
1264         u64             layer;
1265
1266         layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1267
1268         /* Media types with matching FreeBSD media defines */
1269         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1270                 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1271         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1272                 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1273         if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1274                 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1275         if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1276                 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1277
1278         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1279             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1280                 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1281                     NULL);
1282
1283         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1284                 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1285                 if (hw->phy.multispeed_fiber)
1286                         ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1287                             NULL);
1288         }
1289         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1290                 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1291                 if (hw->phy.multispeed_fiber)
1292                         ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1293                             NULL);
1294         } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1295                 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1296         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1297                 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1298
1299 #ifdef IFM_ETH_XTYPE
1300         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1301                 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1302         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1303                 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1304         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1305                 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1306         if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1307                 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1308 #else
1309         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1310                 device_printf(dev, "Media supported: 10GbaseKR\n");
1311                 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1312                 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1313         }
1314         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1315                 device_printf(dev, "Media supported: 10GbaseKX4\n");
1316                 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1317                 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1318         }
1319         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1320                 device_printf(dev, "Media supported: 1000baseKX\n");
1321                 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1322                 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1323         }
1324         if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1325                 device_printf(dev, "Media supported: 2500baseKX\n");
1326                 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1327                 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1328         }
1329 #endif
1330         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1331                 device_printf(dev, "Media supported: 1000baseBX\n");
1332
1333         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1334                 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1335                     0, NULL);
1336                 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1337         }
1338
1339         ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1340 } /* ixgbe_add_media_types */
1341
1342 /************************************************************************
1343  * ixgbe_is_sfp
1344  ************************************************************************/
1345 static inline bool
1346 ixgbe_is_sfp(struct ixgbe_hw *hw)
1347 {
1348         switch (hw->mac.type) {
1349         case ixgbe_mac_82598EB:
1350                 if (hw->phy.type == ixgbe_phy_nl)
1351                         return (true);
1352                 return (false);
1353         case ixgbe_mac_82599EB:
1354                 switch (hw->mac.ops.get_media_type(hw)) {
1355                 case ixgbe_media_type_fiber:
1356                 case ixgbe_media_type_fiber_qsfp:
1357                         return (true);
1358                 default:
1359                         return (false);
1360                 }
1361         case ixgbe_mac_X550EM_x:
1362         case ixgbe_mac_X550EM_a:
1363                 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1364                         return (true);
1365                 return (false);
1366         default:
1367                 return (false);
1368         }
1369 } /* ixgbe_is_sfp */
1370
1371 /************************************************************************
1372  * ixgbe_config_link
1373  ************************************************************************/
1374 static void
1375 ixgbe_config_link(if_ctx_t ctx)
1376 {
1377         struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1378         struct ixgbe_hw *hw = &sc->hw;
1379         u32             autoneg, err = 0;
1380         bool            sfp, negotiate;
1381
1382         sfp = ixgbe_is_sfp(hw);
1383
1384         if (sfp) {
1385                 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1386                 iflib_admin_intr_deferred(ctx);
1387         } else {
1388                 if (hw->mac.ops.check_link)
1389                         err = ixgbe_check_link(hw, &sc->link_speed,
1390                             &sc->link_up, false);
1391                 if (err)
1392                         return;
1393                 autoneg = hw->phy.autoneg_advertised;
1394                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1395                         err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1396                             &negotiate);
1397                 if (err)
1398                         return;
1399                 if (hw->mac.ops.setup_link)
1400                         err = hw->mac.ops.setup_link(hw, autoneg,
1401                             sc->link_up);
1402         }
1403 } /* ixgbe_config_link */
1404
1405 /************************************************************************
1406  * ixgbe_update_stats_counters - Update board statistics counters.
1407  ************************************************************************/
1408 static void
1409 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1410 {
1411         struct ixgbe_hw       *hw = &sc->hw;
1412         struct ixgbe_hw_stats *stats = &sc->stats.pf;
1413         u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1414         u32                   lxoffrxc;
1415         u64                   total_missed_rx = 0;
1416
1417         stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1418         stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1419         stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1420         stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1421         stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1422
1423         for (int i = 0; i < 16; i++) {
1424                 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1425                 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1426                 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1427         }
1428         stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1429         stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1430         stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1431
1432         /* Hardware workaround, gprc counts missed packets */
1433         stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1434         stats->gprc -= missed_rx;
1435
1436         if (hw->mac.type != ixgbe_mac_82598EB) {
1437                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1438                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1439                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1440                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1441                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1442                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1443                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1444                 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1445                 stats->lxoffrxc += lxoffrxc;
1446         } else {
1447                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1448                 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1449                 stats->lxoffrxc += lxoffrxc;
1450                 /* 82598 only has a counter in the high register */
1451                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1452                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1453                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1454         }
1455
1456         /*
1457          * For watchdog management we need to know if we have been paused
1458          * during the last interval, so capture that here.
1459         */
1460         if (lxoffrxc)
1461                 sc->shared->isc_pause_frames = 1;
1462
1463         /*
1464          * Workaround: mprc hardware is incorrectly counting
1465          * broadcasts, so for now we subtract those.
1466          */
1467         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1468         stats->bprc += bprc;
1469         stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1470         if (hw->mac.type == ixgbe_mac_82598EB)
1471                 stats->mprc -= bprc;
1472
1473         stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1474         stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1475         stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1476         stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1477         stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1478         stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1479
1480         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1481         stats->lxontxc += lxon;
1482         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1483         stats->lxofftxc += lxoff;
1484         total = lxon + lxoff;
1485
1486         stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1487         stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1488         stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1489         stats->gptc -= total;
1490         stats->mptc -= total;
1491         stats->ptc64 -= total;
1492         stats->gotc -= total * ETHER_MIN_LEN;
1493
1494         stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1495         stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1496         stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1497         stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1498         stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1499         stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1500         stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1501         stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1502         stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1503         stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1504         stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1505         stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1506         stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1507         stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1508         stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1509         stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1510         stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1511         stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1512         /* Only read FCOE on 82599 */
1513         if (hw->mac.type != ixgbe_mac_82598EB) {
1514                 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1515                 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1516                 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1517                 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1518                 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1519         }
1520
1521         /* Fill out the OS statistics structure */
1522         IXGBE_SET_IPACKETS(sc, stats->gprc);
1523         IXGBE_SET_OPACKETS(sc, stats->gptc);
1524         IXGBE_SET_IBYTES(sc, stats->gorc);
1525         IXGBE_SET_OBYTES(sc, stats->gotc);
1526         IXGBE_SET_IMCASTS(sc, stats->mprc);
1527         IXGBE_SET_OMCASTS(sc, stats->mptc);
1528         IXGBE_SET_COLLISIONS(sc, 0);
1529         IXGBE_SET_IQDROPS(sc, total_missed_rx);
1530
1531         /*
1532          * Aggregate following types of errors as RX errors:
1533          * - CRC error count,
1534          * - illegal byte error count,
1535          * - checksum error count,
1536          * - missed packets count,
1537          * - length error count,
1538          * - undersized packets count,
1539          * - fragmented packets count,
1540          * - oversized packets count,
1541          * - jabber count.
1542          *
1543          * Ignore XEC errors for 82599 to workaround errata about
1544          * UDP frames with zero checksum.
1545          */
1546         IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1547             (hw->mac.type != ixgbe_mac_82599EB ? stats->xec : 0) +
1548             stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1549             stats->rjc);
1550 } /* ixgbe_update_stats_counters */
1551
1552 /************************************************************************
1553  * ixgbe_add_hw_stats
1554  *
1555  *   Add sysctl variables, one per statistic, to the system.
1556  ************************************************************************/
1557 static void
1558 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1559 {
1560         device_t               dev = iflib_get_dev(sc->ctx);
1561         struct ix_rx_queue     *rx_que;
1562         struct ix_tx_queue     *tx_que;
1563         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1564         struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1565         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1566         struct ixgbe_hw_stats  *stats = &sc->stats.pf;
1567         struct sysctl_oid      *stat_node, *queue_node;
1568         struct sysctl_oid_list *stat_list, *queue_list;
1569         int                    i;
1570
1571 #define QUEUE_NAME_LEN 32
1572         char                   namebuf[QUEUE_NAME_LEN];
1573
1574         /* Driver Statistics */
1575         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1576             CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1577         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1578             CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1579         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1580             CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1581
1582         for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
1583                 struct tx_ring *txr = &tx_que->txr;
1584                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1585                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1586                     CTLFLAG_RD, NULL, "Queue Name");
1587                 queue_list = SYSCTL_CHILDREN(queue_node);
1588
1589                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1590                     CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1591                     ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1592                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1593                     CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1594                     ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1595                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1596                     CTLFLAG_RD, &txr->tso_tx, "TSO");
1597                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1598                     CTLFLAG_RD, &txr->total_packets,
1599                     "Queue Packets Transmitted");
1600         }
1601
1602         for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
1603                 struct rx_ring *rxr = &rx_que->rxr;
1604                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1605                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1606                     CTLFLAG_RD, NULL, "Queue Name");
1607                 queue_list = SYSCTL_CHILDREN(queue_node);
1608
1609                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1610                     CTLTYPE_UINT | CTLFLAG_RW, &sc->rx_queues[i], 0,
1611                     ixgbe_sysctl_interrupt_rate_handler, "IU",
1612                     "Interrupt Rate");
1613                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1614                     CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1615                     "irqs on this queue");
1616                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1617                     CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1618                     ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1619                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1620                     CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1621                     ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1622                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1623                     CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1624                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1625                     CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1626                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1627                     CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1628                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1629                     CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1630         }
1631
1632         /* MAC stats get their own sub node */
1633
1634         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1635             CTLFLAG_RD, NULL, "MAC Statistics");
1636         stat_list = SYSCTL_CHILDREN(stat_node);
1637
1638         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1639             CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1640         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1641             CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1642         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1643             CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1644         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1645             CTLFLAG_RD, &stats->errbc, "Byte Errors");
1646         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1647             CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1648         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1649             CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1650         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1651             CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1652         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1653             CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1654         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1655             CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1656
1657         /* Flow Control stats */
1658         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1659             CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1660         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1661             CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1662         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1663             CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1664         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1665             CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1666
1667         /* Packet Reception Stats */
1668         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1669             CTLFLAG_RD, &stats->tor, "Total Octets Received");
1670         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1671             CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1672         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1673             CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1674         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1675             CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1676         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1677             CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1678         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1679             CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1680         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1681             CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1682         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1683             CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1684         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1685             CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1686         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1687             CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1688         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1689             CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1690         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1691             CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1692         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1693             CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1694         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1695             CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1696         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1697             CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1698         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1699             CTLFLAG_RD, &stats->rjc, "Received Jabber");
1700         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1701             CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1702         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1703             CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1704         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1705             CTLFLAG_RD, &stats->xec, "Checksum Errors");
1706
1707         /* Packet Transmission Stats */
1708         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1709             CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1710         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1711             CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1712         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1713             CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1714         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1715             CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1716         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1717             CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1718         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1719             CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1720         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1721             CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1722         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1723             CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1724         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1725             CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1726         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1727             CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1728         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1729             CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1730         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1731             CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1732 } /* ixgbe_add_hw_stats */
1733
1734 /************************************************************************
1735  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1736  *
1737  *   Retrieves the TDH value from the hardware
1738  ************************************************************************/
1739 static int
1740 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1741 {
1742         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1743         int            error;
1744         unsigned int   val;
1745
1746         if (!txr)
1747                 return (0);
1748
1749         val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1750         error = sysctl_handle_int(oidp, &val, 0, req);
1751         if (error || !req->newptr)
1752                 return error;
1753
1754         return (0);
1755 } /* ixgbe_sysctl_tdh_handler */
1756
1757 /************************************************************************
1758  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1759  *
1760  *   Retrieves the TDT value from the hardware
1761  ************************************************************************/
1762 static int
1763 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1764 {
1765         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1766         int            error;
1767         unsigned int   val;
1768
1769         if (!txr)
1770                 return (0);
1771
1772         val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1773         error = sysctl_handle_int(oidp, &val, 0, req);
1774         if (error || !req->newptr)
1775                 return error;
1776
1777         return (0);
1778 } /* ixgbe_sysctl_tdt_handler */
1779
1780 /************************************************************************
1781  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1782  *
1783  *   Retrieves the RDH value from the hardware
1784  ************************************************************************/
1785 static int
1786 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1787 {
1788         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1789         int            error;
1790         unsigned int   val;
1791
1792         if (!rxr)
1793                 return (0);
1794
1795         val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
1796         error = sysctl_handle_int(oidp, &val, 0, req);
1797         if (error || !req->newptr)
1798                 return error;
1799
1800         return (0);
1801 } /* ixgbe_sysctl_rdh_handler */
1802
1803 /************************************************************************
1804  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1805  *
1806  *   Retrieves the RDT value from the hardware
1807  ************************************************************************/
1808 static int
1809 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1810 {
1811         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1812         int            error;
1813         unsigned int   val;
1814
1815         if (!rxr)
1816                 return (0);
1817
1818         val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
1819         error = sysctl_handle_int(oidp, &val, 0, req);
1820         if (error || !req->newptr)
1821                 return error;
1822
1823         return (0);
1824 } /* ixgbe_sysctl_rdt_handler */
1825
1826 /************************************************************************
1827  * ixgbe_if_vlan_register
1828  *
1829  *   Run via vlan config EVENT, it enables us to use the
1830  *   HW Filter table since we can get the vlan id. This
1831  *   just creates the entry in the soft version of the
1832  *   VFTA, init will repopulate the real table.
1833  ************************************************************************/
1834 static void
1835 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1836 {
1837         struct ixgbe_softc *sc = iflib_get_softc(ctx);
1838         u16            index, bit;
1839
1840         index = (vtag >> 5) & 0x7F;
1841         bit = vtag & 0x1F;
1842         sc->shadow_vfta[index] |= (1 << bit);
1843         ++sc->num_vlans;
1844         ixgbe_setup_vlan_hw_support(ctx);
1845 } /* ixgbe_if_vlan_register */
1846
1847 /************************************************************************
1848  * ixgbe_if_vlan_unregister
1849  *
1850  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1851  ************************************************************************/
1852 static void
1853 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1854 {
1855         struct ixgbe_softc *sc = iflib_get_softc(ctx);
1856         u16            index, bit;
1857
1858         index = (vtag >> 5) & 0x7F;
1859         bit = vtag & 0x1F;
1860         sc->shadow_vfta[index] &= ~(1 << bit);
1861         --sc->num_vlans;
1862         /* Re-init to load the changes */
1863         ixgbe_setup_vlan_hw_support(ctx);
1864 } /* ixgbe_if_vlan_unregister */
1865
1866 /************************************************************************
1867  * ixgbe_setup_vlan_hw_support
1868  ************************************************************************/
1869 static void
1870 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1871 {
1872         struct ifnet    *ifp = iflib_get_ifp(ctx);
1873         struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1874         struct ixgbe_hw *hw = &sc->hw;
1875         struct rx_ring  *rxr;
1876         int             i;
1877         u32             ctrl;
1878
1879
1880         /*
1881          * We get here thru init_locked, meaning
1882          * a soft reset, this has already cleared
1883          * the VFTA and other state, so if there
1884          * have been no vlan's registered do nothing.
1885          */
1886         if (sc->num_vlans == 0)
1887                 return;
1888
1889         /* Setup the queues for vlans */
1890         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1891                 for (i = 0; i < sc->num_rx_queues; i++) {
1892                         rxr = &sc->rx_queues[i].rxr;
1893                         /* On 82599 the VLAN enable is per/queue in RXDCTL */
1894                         if (hw->mac.type != ixgbe_mac_82598EB) {
1895                                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1896                                 ctrl |= IXGBE_RXDCTL_VME;
1897                                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1898                         }
1899                         rxr->vtag_strip = true;
1900                 }
1901         }
1902
1903         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1904                 return;
1905         /*
1906          * A soft reset zero's out the VFTA, so
1907          * we need to repopulate it now.
1908          */
1909         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1910                 if (sc->shadow_vfta[i] != 0)
1911                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1912                             sc->shadow_vfta[i]);
1913
1914         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1915         /* Enable the Filter Table if enabled */
1916         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1917                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1918                 ctrl |= IXGBE_VLNCTRL_VFE;
1919         }
1920         if (hw->mac.type == ixgbe_mac_82598EB)
1921                 ctrl |= IXGBE_VLNCTRL_VME;
1922         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1923 } /* ixgbe_setup_vlan_hw_support */
1924
1925 /************************************************************************
1926  * ixgbe_get_slot_info
1927  *
1928  *   Get the width and transaction speed of
1929  *   the slot this adapter is plugged into.
1930  ************************************************************************/
1931 static void
1932 ixgbe_get_slot_info(struct ixgbe_softc *sc)
1933 {
1934         device_t        dev = iflib_get_dev(sc->ctx);
1935         struct ixgbe_hw *hw = &sc->hw;
1936         int             bus_info_valid = true;
1937         u32             offset;
1938         u16             link;
1939
1940         /* Some devices are behind an internal bridge */
1941         switch (hw->device_id) {
1942         case IXGBE_DEV_ID_82599_SFP_SF_QP:
1943         case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1944                 goto get_parent_info;
1945         default:
1946                 break;
1947         }
1948
1949         ixgbe_get_bus_info(hw);
1950
1951         /*
1952          * Some devices don't use PCI-E, but there is no need
1953          * to display "Unknown" for bus speed and width.
1954          */
1955         switch (hw->mac.type) {
1956         case ixgbe_mac_X550EM_x:
1957         case ixgbe_mac_X550EM_a:
1958                 return;
1959         default:
1960                 goto display;
1961         }
1962
1963 get_parent_info:
1964         /*
1965          * For the Quad port adapter we need to parse back
1966          * up the PCI tree to find the speed of the expansion
1967          * slot into which this adapter is plugged. A bit more work.
1968          */
1969         dev = device_get_parent(device_get_parent(dev));
1970 #ifdef IXGBE_DEBUG
1971         device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1972             pci_get_slot(dev), pci_get_function(dev));
1973 #endif
1974         dev = device_get_parent(device_get_parent(dev));
1975 #ifdef IXGBE_DEBUG
1976         device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1977             pci_get_slot(dev), pci_get_function(dev));
1978 #endif
1979         /* Now get the PCI Express Capabilities offset */
1980         if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1981                 /*
1982                  * Hmm...can't get PCI-Express capabilities.
1983                  * Falling back to default method.
1984                  */
1985                 bus_info_valid = false;
1986                 ixgbe_get_bus_info(hw);
1987                 goto display;
1988         }
1989         /* ...and read the Link Status Register */
1990         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1991         ixgbe_set_pci_config_data_generic(hw, link);
1992
1993 display:
1994         device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1995             ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1996              (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1997              (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1998              "Unknown"),
1999             ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2000              (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2001              (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2002              "Unknown"));
2003
2004         if (bus_info_valid) {
2005                 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2006                     ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2007                     (hw->bus.speed == ixgbe_bus_speed_2500))) {
2008                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2009                         device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2010                 }
2011                 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2012                     ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2013                     (hw->bus.speed < ixgbe_bus_speed_8000))) {
2014                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2015                         device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2016                 }
2017         } else
2018                 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2019
2020         return;
2021 } /* ixgbe_get_slot_info */
2022
2023 /************************************************************************
2024  * ixgbe_if_msix_intr_assign
2025  *
2026  *   Setup MSI-X Interrupt resources and handlers
2027  ************************************************************************/
2028 static int
2029 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2030 {
2031         struct ixgbe_softc     *sc = iflib_get_softc(ctx);
2032         struct ix_rx_queue *rx_que = sc->rx_queues;
2033         struct ix_tx_queue *tx_que;
2034         int                error, rid, vector = 0;
2035         int                cpu_id = 0;
2036         char               buf[16];
2037
2038         /* Admin Que is vector 0*/
2039         rid = vector + 1;
2040         for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2041                 rid = vector + 1;
2042
2043                 snprintf(buf, sizeof(buf), "rxq%d", i);
2044                 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2045                     IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2046
2047                 if (error) {
2048                         device_printf(iflib_get_dev(ctx),
2049                             "Failed to allocate que int %d err: %d", i, error);
2050                         sc->num_rx_queues = i + 1;
2051                         goto fail;
2052                 }
2053
2054                 rx_que->msix = vector;
2055                 if (sc->feat_en & IXGBE_FEATURE_RSS) {
2056                         /*
2057                          * The queue ID is used as the RSS layer bucket ID.
2058                          * We look up the queue ID -> RSS CPU ID and select
2059                          * that.
2060                          */
2061                         cpu_id = rss_getcpu(i % rss_getnumbuckets());
2062                 } else {
2063                         /*
2064                          * Bind the MSI-X vector, and thus the
2065                          * rings to the corresponding cpu.
2066                          *
2067                          * This just happens to match the default RSS
2068                          * round-robin bucket -> queue -> CPU allocation.
2069                          */
2070                         if (sc->num_rx_queues > 1)
2071                                 cpu_id = i;
2072                 }
2073
2074         }
2075         for (int i = 0; i < sc->num_tx_queues; i++) {
2076                 snprintf(buf, sizeof(buf), "txq%d", i);
2077                 tx_que = &sc->tx_queues[i];
2078                 tx_que->msix = i % sc->num_rx_queues;
2079                 iflib_softirq_alloc_generic(ctx,
2080                     &sc->rx_queues[tx_que->msix].que_irq,
2081                     IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2082         }
2083         rid = vector + 1;
2084         error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2085             IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2086         if (error) {
2087                 device_printf(iflib_get_dev(ctx),
2088                     "Failed to register admin handler");
2089                 return (error);
2090         }
2091
2092         sc->vector = vector;
2093
2094         return (0);
2095 fail:
2096         iflib_irq_free(ctx, &sc->irq);
2097         rx_que = sc->rx_queues;
2098         for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2099                 iflib_irq_free(ctx, &rx_que->que_irq);
2100
2101         return (error);
2102 } /* ixgbe_if_msix_intr_assign */
2103
2104 /*********************************************************************
2105  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2106  **********************************************************************/
2107 static int
2108 ixgbe_msix_que(void *arg)
2109 {
2110         struct ix_rx_queue *que = arg;
2111         struct ixgbe_softc     *sc = que->sc;
2112         struct ifnet       *ifp = iflib_get_ifp(que->sc->ctx);
2113
2114         /* Protect against spurious interrupts */
2115         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2116                 return (FILTER_HANDLED);
2117
2118         ixgbe_disable_queue(sc, que->msix);
2119         ++que->irqs;
2120
2121         return (FILTER_SCHEDULE_THREAD);
2122 } /* ixgbe_msix_que */
2123
2124 /************************************************************************
2125  * ixgbe_media_status - Media Ioctl callback
2126  *
2127  *   Called whenever the user queries the status of
2128  *   the interface using ifconfig.
2129  ************************************************************************/
2130 static void
2131 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2132 {
2133         struct ixgbe_softc  *sc = iflib_get_softc(ctx);
2134         struct ixgbe_hw *hw = &sc->hw;
2135         int             layer;
2136
2137         INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2138
2139         ifmr->ifm_status = IFM_AVALID;
2140         ifmr->ifm_active = IFM_ETHER;
2141
2142         if (!sc->link_active)
2143                 return;
2144
2145         ifmr->ifm_status |= IFM_ACTIVE;
2146         layer = sc->phy_layer;
2147
2148         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2149             layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2150             layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2151             layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2152                 switch (sc->link_speed) {
2153                 case IXGBE_LINK_SPEED_10GB_FULL:
2154                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2155                         break;
2156                 case IXGBE_LINK_SPEED_1GB_FULL:
2157                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2158                         break;
2159                 case IXGBE_LINK_SPEED_100_FULL:
2160                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2161                         break;
2162                 case IXGBE_LINK_SPEED_10_FULL:
2163                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2164                         break;
2165                 }
2166         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2167             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2168                 switch (sc->link_speed) {
2169                 case IXGBE_LINK_SPEED_10GB_FULL:
2170                         ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2171                         break;
2172                 }
2173         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2174                 switch (sc->link_speed) {
2175                 case IXGBE_LINK_SPEED_10GB_FULL:
2176                         ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2177                         break;
2178                 case IXGBE_LINK_SPEED_1GB_FULL:
2179                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2180                         break;
2181                 }
2182         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2183                 switch (sc->link_speed) {
2184                 case IXGBE_LINK_SPEED_10GB_FULL:
2185                         ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2186                         break;
2187                 case IXGBE_LINK_SPEED_1GB_FULL:
2188                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2189                         break;
2190                 }
2191         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2192             layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2193                 switch (sc->link_speed) {
2194                 case IXGBE_LINK_SPEED_10GB_FULL:
2195                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2196                         break;
2197                 case IXGBE_LINK_SPEED_1GB_FULL:
2198                         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2199                         break;
2200                 }
2201         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2202                 switch (sc->link_speed) {
2203                 case IXGBE_LINK_SPEED_10GB_FULL:
2204                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2205                         break;
2206                 }
2207         /*
2208          * XXX: These need to use the proper media types once
2209          * they're added.
2210          */
2211 #ifndef IFM_ETH_XTYPE
2212         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2213                 switch (sc->link_speed) {
2214                 case IXGBE_LINK_SPEED_10GB_FULL:
2215                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2216                         break;
2217                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2218                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2219                         break;
2220                 case IXGBE_LINK_SPEED_1GB_FULL:
2221                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2222                         break;
2223                 }
2224         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2225             layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2226             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2227                 switch (sc->link_speed) {
2228                 case IXGBE_LINK_SPEED_10GB_FULL:
2229                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2230                         break;
2231                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2232                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2233                         break;
2234                 case IXGBE_LINK_SPEED_1GB_FULL:
2235                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2236                         break;
2237                 }
2238 #else
2239         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2240                 switch (sc->link_speed) {
2241                 case IXGBE_LINK_SPEED_10GB_FULL:
2242                         ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2243                         break;
2244                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2245                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2246                         break;
2247                 case IXGBE_LINK_SPEED_1GB_FULL:
2248                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2249                         break;
2250                 }
2251         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2252             layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2253             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2254                 switch (sc->link_speed) {
2255                 case IXGBE_LINK_SPEED_10GB_FULL:
2256                         ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2257                         break;
2258                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2259                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2260                         break;
2261                 case IXGBE_LINK_SPEED_1GB_FULL:
2262                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2263                         break;
2264                 }
2265 #endif
2266
2267         /* If nothing is recognized... */
2268         if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2269                 ifmr->ifm_active |= IFM_UNKNOWN;
2270
2271         /* Display current flow control setting used on link */
2272         if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2273             hw->fc.current_mode == ixgbe_fc_full)
2274                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2275         if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2276             hw->fc.current_mode == ixgbe_fc_full)
2277                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2278 } /* ixgbe_media_status */
2279
2280 /************************************************************************
2281  * ixgbe_media_change - Media Ioctl callback
2282  *
2283  *   Called when the user changes speed/duplex using
2284  *   media/mediopt option with ifconfig.
2285  ************************************************************************/
2286 static int
2287 ixgbe_if_media_change(if_ctx_t ctx)
2288 {
2289         struct ixgbe_softc   *sc = iflib_get_softc(ctx);
2290         struct ifmedia   *ifm = iflib_get_media(ctx);
2291         struct ixgbe_hw  *hw = &sc->hw;
2292         ixgbe_link_speed speed = 0;
2293
2294         INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2295
2296         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2297                 return (EINVAL);
2298
2299         if (hw->phy.media_type == ixgbe_media_type_backplane)
2300                 return (EPERM);
2301
2302         /*
2303          * We don't actually need to check against the supported
2304          * media types of the adapter; ifmedia will take care of
2305          * that for us.
2306          */
2307         switch (IFM_SUBTYPE(ifm->ifm_media)) {
2308         case IFM_AUTO:
2309         case IFM_10G_T:
2310                 speed |= IXGBE_LINK_SPEED_100_FULL;
2311                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2312                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2313                 break;
2314         case IFM_10G_LRM:
2315         case IFM_10G_LR:
2316 #ifndef IFM_ETH_XTYPE
2317         case IFM_10G_SR: /* KR, too */
2318         case IFM_10G_CX4: /* KX4 */
2319 #else
2320         case IFM_10G_KR:
2321         case IFM_10G_KX4:
2322 #endif
2323                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2324                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2325                 break;
2326 #ifndef IFM_ETH_XTYPE
2327         case IFM_1000_CX: /* KX */
2328 #else
2329         case IFM_1000_KX:
2330 #endif
2331         case IFM_1000_LX:
2332         case IFM_1000_SX:
2333                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2334                 break;
2335         case IFM_1000_T:
2336                 speed |= IXGBE_LINK_SPEED_100_FULL;
2337                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2338                 break;
2339         case IFM_10G_TWINAX:
2340                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2341                 break;
2342         case IFM_100_TX:
2343                 speed |= IXGBE_LINK_SPEED_100_FULL;
2344                 break;
2345         case IFM_10_T:
2346                 speed |= IXGBE_LINK_SPEED_10_FULL;
2347                 break;
2348         default:
2349                 goto invalid;
2350         }
2351
2352         hw->mac.autotry_restart = true;
2353         hw->mac.ops.setup_link(hw, speed, true);
2354         sc->advertise =
2355             ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2356             ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2357             ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2358             ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2359
2360         return (0);
2361
2362 invalid:
2363         device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2364
2365         return (EINVAL);
2366 } /* ixgbe_if_media_change */
2367
2368 /************************************************************************
2369  * ixgbe_set_promisc
2370  ************************************************************************/
2371 static int
2372 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2373 {
2374         struct ixgbe_softc *sc = iflib_get_softc(ctx);
2375         struct ifnet   *ifp = iflib_get_ifp(ctx);
2376         u32            rctl;
2377         int            mcnt = 0;
2378
2379         rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2380         rctl &= (~IXGBE_FCTRL_UPE);
2381         if (ifp->if_flags & IFF_ALLMULTI)
2382                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2383         else {
2384                 mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2385         }
2386         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2387                 rctl &= (~IXGBE_FCTRL_MPE);
2388         IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2389
2390         if (ifp->if_flags & IFF_PROMISC) {
2391                 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2392                 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2393         } else if (ifp->if_flags & IFF_ALLMULTI) {
2394                 rctl |= IXGBE_FCTRL_MPE;
2395                 rctl &= ~IXGBE_FCTRL_UPE;
2396                 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2397         }
2398         return (0);
2399 } /* ixgbe_if_promisc_set */
2400
2401 /************************************************************************
2402  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2403  ************************************************************************/
2404 static int
2405 ixgbe_msix_link(void *arg)
2406 {
2407         struct ixgbe_softc  *sc = arg;
2408         struct ixgbe_hw *hw = &sc->hw;
2409         u32             eicr, eicr_mask;
2410         s32             retval;
2411
2412         ++sc->link_irq;
2413
2414         /* Pause other interrupts */
2415         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2416
2417         /* First get the cause */
2418         eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2419         /* Be sure the queue bits are not cleared */
2420         eicr &= ~IXGBE_EICR_RTX_QUEUE;
2421         /* Clear interrupt with write */
2422         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2423
2424         /* Link status change */
2425         if (eicr & IXGBE_EICR_LSC) {
2426                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2427                 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2428         }
2429
2430         if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2431                 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2432                     (eicr & IXGBE_EICR_FLOW_DIR)) {
2433                         /* This is probably overkill :) */
2434                         if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2435                                 return (FILTER_HANDLED);
2436                         /* Disable the interrupt */
2437                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2438                         sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2439                 } else
2440                         if (eicr & IXGBE_EICR_ECC) {
2441                                 device_printf(iflib_get_dev(sc->ctx),
2442                                    "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2443                                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2444                         }
2445
2446                 /* Check for over temp condition */
2447                 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2448                         switch (sc->hw.mac.type) {
2449                         case ixgbe_mac_X550EM_a:
2450                                 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2451                                         break;
2452                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2453                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
2454                                 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2455                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
2456                                 retval = hw->phy.ops.check_overtemp(hw);
2457                                 if (retval != IXGBE_ERR_OVERTEMP)
2458                                         break;
2459                                 device_printf(iflib_get_dev(sc->ctx),
2460                                     "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2461                                 device_printf(iflib_get_dev(sc->ctx),
2462                                     "System shutdown required!\n");
2463                                 break;
2464                         default:
2465                                 if (!(eicr & IXGBE_EICR_TS))
2466                                         break;
2467                                 retval = hw->phy.ops.check_overtemp(hw);
2468                                 if (retval != IXGBE_ERR_OVERTEMP)
2469                                         break;
2470                                 device_printf(iflib_get_dev(sc->ctx),
2471                                     "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2472                                 device_printf(iflib_get_dev(sc->ctx),
2473                                     "System shutdown required!\n");
2474                                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2475                                 break;
2476                         }
2477                 }
2478
2479                 /* Check for VF message */
2480                 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2481                     (eicr & IXGBE_EICR_MAILBOX))
2482                         sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2483         }
2484
2485         if (ixgbe_is_sfp(hw)) {
2486                 /* Pluggable optics-related interrupt */
2487                 if (hw->mac.type >= ixgbe_mac_X540)
2488                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2489                 else
2490                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2491
2492                 if (eicr & eicr_mask) {
2493                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2494                         sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2495                 }
2496
2497                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2498                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2499                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
2500                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2501                         sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2502                 }
2503         }
2504
2505         /* Check for fan failure */
2506         if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2507                 ixgbe_check_fan_failure(sc, eicr, true);
2508                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2509         }
2510
2511         /* External PHY interrupt */
2512         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2513             (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2514                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2515                 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2516         }
2517
2518         return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2519 } /* ixgbe_msix_link */
2520
2521 /************************************************************************
2522  * ixgbe_sysctl_interrupt_rate_handler
2523  ************************************************************************/
2524 static int
2525 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2526 {
2527         struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2528         int                error;
2529         unsigned int       reg, usec, rate;
2530
2531         reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2532         usec = ((reg & 0x0FF8) >> 3);
2533         if (usec > 0)
2534                 rate = 500000 / usec;
2535         else
2536                 rate = 0;
2537         error = sysctl_handle_int(oidp, &rate, 0, req);
2538         if (error || !req->newptr)
2539                 return error;
2540         reg &= ~0xfff; /* default, no limitation */
2541         ixgbe_max_interrupt_rate = 0;
2542         if (rate > 0 && rate < 500000) {
2543                 if (rate < 1000)
2544                         rate = 1000;
2545                 ixgbe_max_interrupt_rate = rate;
2546                 reg |= ((4000000/rate) & 0xff8);
2547         }
2548         IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2549
2550         return (0);
2551 } /* ixgbe_sysctl_interrupt_rate_handler */
2552
2553 /************************************************************************
2554  * ixgbe_add_device_sysctls
2555  ************************************************************************/
2556 static void
2557 ixgbe_add_device_sysctls(if_ctx_t ctx)
2558 {
2559         struct ixgbe_softc         *sc = iflib_get_softc(ctx);
2560         device_t               dev = iflib_get_dev(ctx);
2561         struct ixgbe_hw        *hw = &sc->hw;
2562         struct sysctl_oid_list *child;
2563         struct sysctl_ctx_list *ctx_list;
2564
2565         ctx_list = device_get_sysctl_ctx(dev);
2566         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2567
2568         /* Sysctls for all devices */
2569         SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2570             CTLTYPE_INT | CTLFLAG_RW, sc, 0, ixgbe_sysctl_flowcntl, "I",
2571             IXGBE_SYSCTL_DESC_SET_FC);
2572
2573         SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2574             CTLTYPE_INT | CTLFLAG_RW, sc, 0, ixgbe_sysctl_advertise, "I",
2575             IXGBE_SYSCTL_DESC_ADV_SPEED);
2576
2577         SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2578             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2579             ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2580
2581 #ifdef IXGBE_DEBUG
2582         /* testing sysctls (for all devices) */
2583         SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2584             CTLTYPE_INT | CTLFLAG_RW, sc, 0, ixgbe_sysctl_power_state,
2585             "I", "PCI Power State");
2586
2587         SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2588             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2589             ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2590 #endif
2591         /* for X550 series devices */
2592         if (hw->mac.type >= ixgbe_mac_X550)
2593                 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2594                     CTLTYPE_U16 | CTLFLAG_RW, sc, 0, ixgbe_sysctl_dmac,
2595                     "I", "DMA Coalesce");
2596
2597         /* for WoL-capable devices */
2598         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2599                 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2600                     CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2601                     ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2602
2603                 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2604                     CTLTYPE_U32 | CTLFLAG_RW, sc, 0, ixgbe_sysctl_wufc,
2605                     "I", "Enable/Disable Wake Up Filters");
2606         }
2607
2608         /* for X552/X557-AT devices */
2609         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2610                 struct sysctl_oid *phy_node;
2611                 struct sysctl_oid_list *phy_list;
2612
2613                 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2614                     CTLFLAG_RD, NULL, "External PHY sysctls");
2615                 phy_list = SYSCTL_CHILDREN(phy_node);
2616
2617                 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2618                     CTLTYPE_U16 | CTLFLAG_RD, sc, 0, ixgbe_sysctl_phy_temp,
2619                     "I", "Current External PHY Temperature (Celsius)");
2620
2621                 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2622                     "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
2623                     ixgbe_sysctl_phy_overtemp_occurred, "I",
2624                     "External PHY High Temperature Event Occurred");
2625         }
2626
2627         if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2628                 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2629                     CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2630                     ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2631         }
2632 } /* ixgbe_add_device_sysctls */
2633
2634 /************************************************************************
2635  * ixgbe_allocate_pci_resources
2636  ************************************************************************/
2637 static int
2638 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2639 {
2640         struct ixgbe_softc *sc = iflib_get_softc(ctx);
2641         device_t        dev = iflib_get_dev(ctx);
2642         int             rid;
2643
2644         rid = PCIR_BAR(0);
2645         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2646             RF_ACTIVE);
2647
2648         if (!(sc->pci_mem)) {
2649                 device_printf(dev, "Unable to allocate bus resource: memory\n");
2650                 return (ENXIO);
2651         }
2652
2653         /* Save bus_space values for READ/WRITE_REG macros */
2654         sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
2655         sc->osdep.mem_bus_space_handle =
2656             rman_get_bushandle(sc->pci_mem);
2657         /* Set hw values for shared code */
2658         sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
2659
2660         return (0);
2661 } /* ixgbe_allocate_pci_resources */
2662
2663 /************************************************************************
2664  * ixgbe_detach - Device removal routine
2665  *
2666  *   Called when the driver is being removed.
2667  *   Stops the adapter and deallocates all the resources
2668  *   that were allocated for driver operation.
2669  *
2670  *   return 0 on success, positive on failure
2671  ************************************************************************/
2672 static int
2673 ixgbe_if_detach(if_ctx_t ctx)
2674 {
2675         struct ixgbe_softc *sc = iflib_get_softc(ctx);
2676         device_t       dev = iflib_get_dev(ctx);
2677         u32            ctrl_ext;
2678
2679         INIT_DEBUGOUT("ixgbe_detach: begin");
2680
2681         if (ixgbe_pci_iov_detach(dev) != 0) {
2682                 device_printf(dev, "SR-IOV in use; detach first.\n");
2683                 return (EBUSY);
2684         }
2685
2686         ixgbe_setup_low_power_mode(ctx);
2687
2688         /* let hardware know driver is unloading */
2689         ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
2690         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2691         IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
2692
2693         ixgbe_free_pci_resources(ctx);
2694         free(sc->mta, M_IXGBE);
2695
2696         return (0);
2697 } /* ixgbe_if_detach */
2698
2699 /************************************************************************
2700  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2701  *
2702  *   Prepare the adapter/port for LPLU and/or WoL
2703  ************************************************************************/
2704 static int
2705 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2706 {
2707         struct ixgbe_softc  *sc = iflib_get_softc(ctx);
2708         struct ixgbe_hw *hw = &sc->hw;
2709         device_t        dev = iflib_get_dev(ctx);
2710         s32             error = 0;
2711
2712         if (!hw->wol_enabled)
2713                 ixgbe_set_phy_power(hw, false);
2714
2715         /* Limit power management flow to X550EM baseT */
2716         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2717             hw->phy.ops.enter_lplu) {
2718                 /* Turn off support for APM wakeup. (Using ACPI instead) */
2719                 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2720                     IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2721
2722                 /*
2723                  * Clear Wake Up Status register to prevent any previous wakeup
2724                  * events from waking us up immediately after we suspend.
2725                  */
2726                 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2727
2728                 /*
2729                  * Program the Wakeup Filter Control register with user filter
2730                  * settings
2731                  */
2732                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
2733
2734                 /* Enable wakeups and power management in Wakeup Control */
2735                 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2736                     IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2737
2738                 /* X550EM baseT adapters need a special LPLU flow */
2739                 hw->phy.reset_disable = true;
2740                 ixgbe_if_stop(ctx);
2741                 error = hw->phy.ops.enter_lplu(hw);
2742                 if (error)
2743                         device_printf(dev, "Error entering LPLU: %d\n", error);
2744                 hw->phy.reset_disable = false;
2745         } else {
2746                 /* Just stop for other adapters */
2747                 ixgbe_if_stop(ctx);
2748         }
2749
2750         return error;
2751 } /* ixgbe_setup_low_power_mode */
2752
2753 /************************************************************************
2754  * ixgbe_shutdown - Shutdown entry point
2755  ************************************************************************/
2756 static int
2757 ixgbe_if_shutdown(if_ctx_t ctx)
2758 {
2759         int error = 0;
2760
2761         INIT_DEBUGOUT("ixgbe_shutdown: begin");
2762
2763         error = ixgbe_setup_low_power_mode(ctx);
2764
2765         return (error);
2766 } /* ixgbe_if_shutdown */
2767
2768 /************************************************************************
2769  * ixgbe_suspend
2770  *
2771  *   From D0 to D3
2772  ************************************************************************/
2773 static int
2774 ixgbe_if_suspend(if_ctx_t ctx)
2775 {
2776         int error = 0;
2777
2778         INIT_DEBUGOUT("ixgbe_suspend: begin");
2779
2780         error = ixgbe_setup_low_power_mode(ctx);
2781
2782         return (error);
2783 } /* ixgbe_if_suspend */
2784
2785 /************************************************************************
2786  * ixgbe_resume
2787  *
2788  *   From D3 to D0
2789  ************************************************************************/
2790 static int
2791 ixgbe_if_resume(if_ctx_t ctx)
2792 {
2793         struct ixgbe_softc  *sc = iflib_get_softc(ctx);
2794         device_t        dev = iflib_get_dev(ctx);
2795         struct ifnet    *ifp = iflib_get_ifp(ctx);
2796         struct ixgbe_hw *hw = &sc->hw;
2797         u32             wus;
2798
2799         INIT_DEBUGOUT("ixgbe_resume: begin");
2800
2801         /* Read & clear WUS register */
2802         wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2803         if (wus)
2804                 device_printf(dev, "Woken up by (WUS): %#010x\n",
2805                     IXGBE_READ_REG(hw, IXGBE_WUS));
2806         IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2807         /* And clear WUFC until next low-power transition */
2808         IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2809
2810         /*
2811          * Required after D3->D0 transition;
2812          * will re-advertise all previous advertised speeds
2813          */
2814         if (ifp->if_flags & IFF_UP)
2815                 ixgbe_if_init(ctx);
2816
2817         return (0);
2818 } /* ixgbe_if_resume */
2819
2820 /************************************************************************
2821  * ixgbe_if_mtu_set - Ioctl mtu entry point
2822  *
2823  *   Return 0 on success, EINVAL on failure
2824  ************************************************************************/
2825 static int
2826 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2827 {
2828         struct ixgbe_softc *sc = iflib_get_softc(ctx);
2829         int error = 0;
2830
2831         IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2832
2833         if (mtu > IXGBE_MAX_MTU) {
2834                 error = EINVAL;
2835         } else {
2836                 sc->max_frame_size = mtu + IXGBE_MTU_HDR;
2837         }
2838
2839         return error;
2840 } /* ixgbe_if_mtu_set */
2841
2842 /************************************************************************
2843  * ixgbe_if_crcstrip_set
2844  ************************************************************************/
2845 static void
2846 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2847 {
2848         struct ixgbe_softc *sc = iflib_get_softc(ctx);
2849         struct ixgbe_hw *hw = &sc->hw;
2850         /* crc stripping is set in two places:
2851          * IXGBE_HLREG0 (modified on init_locked and hw reset)
2852          * IXGBE_RDRXCTL (set by the original driver in
2853          *      ixgbe_setup_hw_rsc() called in init_locked.
2854          *      We disable the setting when netmap is compiled in).
2855          * We update the values here, but also in ixgbe.c because
2856          * init_locked sometimes is called outside our control.
2857          */
2858         uint32_t hl, rxc;
2859
2860         hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2861         rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2862 #ifdef NETMAP
2863         if (netmap_verbose)
2864                 D("%s read  HLREG 0x%x rxc 0x%x",
2865                         onoff ? "enter" : "exit", hl, rxc);
2866 #endif
2867         /* hw requirements ... */
2868         rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2869         rxc |= IXGBE_RDRXCTL_RSCACKC;
2870         if (onoff && !crcstrip) {
2871                 /* keep the crc. Fast rx */
2872                 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2873                 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2874         } else {
2875                 /* reset default mode */
2876                 hl |= IXGBE_HLREG0_RXCRCSTRP;
2877                 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2878         }
2879 #ifdef NETMAP
2880         if (netmap_verbose)
2881                 D("%s write HLREG 0x%x rxc 0x%x",
2882                         onoff ? "enter" : "exit", hl, rxc);
2883 #endif
2884         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2885         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2886 } /* ixgbe_if_crcstrip_set */
2887
2888 /*********************************************************************
2889  * ixgbe_if_init - Init entry point
2890  *
2891  *   Used in two ways: It is used by the stack as an init
2892  *   entry point in network interface structure. It is also
2893  *   used by the driver as a hw/sw initialization routine to
2894  *   get to a consistent state.
2895  *
2896  *   Return 0 on success, positive on failure
2897  **********************************************************************/
2898 void
2899 ixgbe_if_init(if_ctx_t ctx)
2900 {
2901         struct ixgbe_softc     *sc = iflib_get_softc(ctx);
2902         struct ifnet       *ifp = iflib_get_ifp(ctx);
2903         device_t           dev = iflib_get_dev(ctx);
2904         struct ixgbe_hw *hw = &sc->hw;
2905         struct ix_rx_queue *rx_que;
2906         struct ix_tx_queue *tx_que;
2907         u32             txdctl, mhadd;
2908         u32             rxdctl, rxctrl;
2909         u32             ctrl_ext;
2910
2911         int             i, j, err;
2912
2913         INIT_DEBUGOUT("ixgbe_if_init: begin");
2914
2915         /* Queue indices may change with IOV mode */
2916         ixgbe_align_all_queue_indices(sc);
2917
2918         /* reprogram the RAR[0] in case user changed it. */
2919         ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
2920
2921         /* Get the latest mac address, User can use a LAA */
2922         bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2923         ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
2924         hw->addr_ctrl.rar_used_count = 1;
2925
2926         ixgbe_init_hw(hw);
2927
2928         ixgbe_initialize_iov(sc);
2929
2930         ixgbe_initialize_transmit_units(ctx);
2931
2932         /* Setup Multicast table */
2933         ixgbe_if_multi_set(ctx);
2934
2935         /* Determine the correct mbuf pool, based on frame size */
2936         sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2937
2938         /* Configure RX settings */
2939         ixgbe_initialize_receive_units(ctx);
2940
2941         /*
2942          * Initialize variable holding task enqueue requests
2943          * from MSI-X interrupts
2944          */
2945         sc->task_requests = 0;
2946
2947         /* Enable SDP & MSI-X interrupts based on adapter */
2948         ixgbe_config_gpie(sc);
2949
2950         /* Set MTU size */
2951         if (ifp->if_mtu > ETHERMTU) {
2952                 /* aka IXGBE_MAXFRS on 82599 and newer */
2953                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2954                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2955                 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2956                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2957         }
2958
2959         /* Now enable all the queues */
2960         for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
2961                 struct tx_ring *txr = &tx_que->txr;
2962
2963                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2964                 txdctl |= IXGBE_TXDCTL_ENABLE;
2965                 /* Set WTHRESH to 8, burst writeback */
2966                 txdctl |= (8 << 16);
2967                 /*
2968                  * When the internal queue falls below PTHRESH (32),
2969                  * start prefetching as long as there are at least
2970                  * HTHRESH (1) buffers ready. The values are taken
2971                  * from the Intel linux driver 3.8.21.
2972                  * Prefetching enables tx line rate even with 1 queue.
2973                  */
2974                 txdctl |= (32 << 0) | (1 << 8);
2975                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2976         }
2977
2978         for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
2979                 struct rx_ring *rxr = &rx_que->rxr;
2980
2981                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2982                 if (hw->mac.type == ixgbe_mac_82598EB) {
2983                         /*
2984                          * PTHRESH = 21
2985                          * HTHRESH = 4
2986                          * WTHRESH = 8
2987                          */
2988                         rxdctl &= ~0x3FFFFF;
2989                         rxdctl |= 0x080420;
2990                 }
2991                 rxdctl |= IXGBE_RXDCTL_ENABLE;
2992                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2993                 for (j = 0; j < 10; j++) {
2994                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2995                             IXGBE_RXDCTL_ENABLE)
2996                                 break;
2997                         else
2998                                 msec_delay(1);
2999                 }
3000                 wmb();
3001         }
3002
3003         /* Enable Receive engine */
3004         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3005         if (hw->mac.type == ixgbe_mac_82598EB)
3006                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3007         rxctrl |= IXGBE_RXCTRL_RXEN;
3008         ixgbe_enable_rx_dma(hw, rxctrl);
3009
3010         /* Set up MSI/MSI-X routing */
3011         if (ixgbe_enable_msix)  {
3012                 ixgbe_configure_ivars(sc);
3013                 /* Set up auto-mask */
3014                 if (hw->mac.type == ixgbe_mac_82598EB)
3015                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3016                 else {
3017                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3018                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3019                 }
3020         } else {  /* Simple settings for Legacy/MSI */
3021                 ixgbe_set_ivar(sc, 0, 0, 0);
3022                 ixgbe_set_ivar(sc, 0, 0, 1);
3023                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3024         }
3025
3026         ixgbe_init_fdir(sc);
3027
3028         /*
3029          * Check on any SFP devices that
3030          * need to be kick-started
3031          */
3032         if (hw->phy.type == ixgbe_phy_none) {
3033                 err = hw->phy.ops.identify(hw);
3034                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3035                         device_printf(dev,
3036                             "Unsupported SFP+ module type was detected.\n");
3037                         return;
3038                 }
3039         }
3040
3041         /* Set moderation on the Link interrupt */
3042         IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3043
3044         /* Enable power to the phy. */
3045         ixgbe_set_phy_power(hw, true);
3046
3047         /* Config/Enable Link */
3048         ixgbe_config_link(ctx);
3049
3050         /* Hardware Packet Buffer & Flow Control setup */
3051         ixgbe_config_delay_values(sc);
3052
3053         /* Initialize the FC settings */
3054         ixgbe_start_hw(hw);
3055
3056         /* Set up VLAN support and filter */
3057         ixgbe_setup_vlan_hw_support(ctx);
3058
3059         /* Setup DMA Coalescing */
3060         ixgbe_config_dmac(sc);
3061
3062         /* And now turn on interrupts */
3063         ixgbe_if_enable_intr(ctx);
3064
3065         /* Enable the use of the MBX by the VF's */
3066         if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3067                 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3068                 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3069                 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3070         }
3071
3072 } /* ixgbe_init_locked */
3073
3074 /************************************************************************
3075  * ixgbe_set_ivar
3076  *
3077  *   Setup the correct IVAR register for a particular MSI-X interrupt
3078  *     (yes this is all very magic and confusing :)
3079  *    - entry is the register array entry
3080  *    - vector is the MSI-X vector for this queue
3081  *    - type is RX/TX/MISC
3082  ************************************************************************/
3083 static void
3084 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3085 {
3086         struct ixgbe_hw *hw = &sc->hw;
3087         u32 ivar, index;
3088
3089         vector |= IXGBE_IVAR_ALLOC_VAL;
3090
3091         switch (hw->mac.type) {
3092         case ixgbe_mac_82598EB:
3093                 if (type == -1)
3094                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3095                 else
3096                         entry += (type * 64);
3097                 index = (entry >> 2) & 0x1F;
3098                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3099                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3100                 ivar |= (vector << (8 * (entry & 0x3)));
3101                 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3102                 break;
3103         case ixgbe_mac_82599EB:
3104         case ixgbe_mac_X540:
3105         case ixgbe_mac_X550:
3106         case ixgbe_mac_X550EM_x:
3107         case ixgbe_mac_X550EM_a:
3108                 if (type == -1) { /* MISC IVAR */
3109                         index = (entry & 1) * 8;
3110                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3111                         ivar &= ~(0xFF << index);
3112                         ivar |= (vector << index);
3113                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3114                 } else {          /* RX/TX IVARS */
3115                         index = (16 * (entry & 1)) + (8 * type);
3116                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3117                         ivar &= ~(0xFF << index);
3118                         ivar |= (vector << index);
3119                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3120                 }
3121         default:
3122                 break;
3123         }
3124 } /* ixgbe_set_ivar */
3125
3126 /************************************************************************
3127  * ixgbe_configure_ivars
3128  ************************************************************************/
3129 static void
3130 ixgbe_configure_ivars(struct ixgbe_softc *sc)
3131 {
3132         struct ix_rx_queue *rx_que = sc->rx_queues;
3133         struct ix_tx_queue *tx_que = sc->tx_queues;
3134         u32                newitr;
3135
3136         if (ixgbe_max_interrupt_rate > 0)
3137                 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3138         else {
3139                 /*
3140                  * Disable DMA coalescing if interrupt moderation is
3141                  * disabled.
3142                  */
3143                 sc->dmac = 0;
3144                 newitr = 0;
3145         }
3146
3147         for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3148                 struct rx_ring *rxr = &rx_que->rxr;
3149
3150                 /* First the RX queue entry */
3151                 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3152
3153                 /* Set an Initial EITR value */
3154                 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3155         }
3156         for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3157                 struct tx_ring *txr = &tx_que->txr;
3158
3159                 /* ... and the TX */
3160                 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3161         }
3162         /* For the Link interrupt */
3163         ixgbe_set_ivar(sc, 1, sc->vector, -1);
3164 } /* ixgbe_configure_ivars */
3165
3166 /************************************************************************
3167  * ixgbe_config_gpie
3168  ************************************************************************/
3169 static void
3170 ixgbe_config_gpie(struct ixgbe_softc *sc)
3171 {
3172         struct ixgbe_hw *hw = &sc->hw;
3173         u32             gpie;
3174
3175         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3176
3177         if (sc->intr_type == IFLIB_INTR_MSIX) {
3178                 /* Enable Enhanced MSI-X mode */
3179                 gpie |= IXGBE_GPIE_MSIX_MODE
3180                      |  IXGBE_GPIE_EIAME
3181                      |  IXGBE_GPIE_PBA_SUPPORT
3182                      |  IXGBE_GPIE_OCD;
3183         }
3184
3185         /* Fan Failure Interrupt */
3186         if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3187                 gpie |= IXGBE_SDP1_GPIEN;
3188
3189         /* Thermal Sensor Interrupt */
3190         if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3191                 gpie |= IXGBE_SDP0_GPIEN_X540;
3192
3193         /* Link detection */
3194         switch (hw->mac.type) {
3195         case ixgbe_mac_82599EB:
3196                 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3197                 break;
3198         case ixgbe_mac_X550EM_x:
3199         case ixgbe_mac_X550EM_a:
3200                 gpie |= IXGBE_SDP0_GPIEN_X540;
3201                 break;
3202         default:
3203                 break;
3204         }
3205
3206         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3207
3208 } /* ixgbe_config_gpie */
3209
3210 /************************************************************************
3211  * ixgbe_config_delay_values
3212  *
3213  *   Requires sc->max_frame_size to be set.
3214  ************************************************************************/
3215 static void
3216 ixgbe_config_delay_values(struct ixgbe_softc *sc)
3217 {
3218         struct ixgbe_hw *hw = &sc->hw;
3219         u32             rxpb, frame, size, tmp;
3220
3221         frame = sc->max_frame_size;
3222
3223         /* Calculate High Water */
3224         switch (hw->mac.type) {
3225         case ixgbe_mac_X540:
3226         case ixgbe_mac_X550:
3227         case ixgbe_mac_X550EM_x:
3228         case ixgbe_mac_X550EM_a:
3229                 tmp = IXGBE_DV_X540(frame, frame);
3230                 break;
3231         default:
3232                 tmp = IXGBE_DV(frame, frame);
3233                 break;
3234         }
3235         size = IXGBE_BT2KB(tmp);
3236         rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3237         hw->fc.high_water[0] = rxpb - size;
3238
3239         /* Now calculate Low Water */
3240         switch (hw->mac.type) {
3241         case ixgbe_mac_X540:
3242         case ixgbe_mac_X550:
3243         case ixgbe_mac_X550EM_x:
3244         case ixgbe_mac_X550EM_a:
3245                 tmp = IXGBE_LOW_DV_X540(frame);
3246                 break;
3247         default:
3248                 tmp = IXGBE_LOW_DV(frame);
3249                 break;
3250         }
3251         hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3252
3253         hw->fc.pause_time = IXGBE_FC_PAUSE;
3254         hw->fc.send_xon = true;
3255 } /* ixgbe_config_delay_values */
3256
3257 /************************************************************************
3258  * ixgbe_set_multi - Multicast Update
3259  *
3260  *   Called whenever multicast address list is updated.
3261  ************************************************************************/
3262 static int
3263 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int idx)
3264 {
3265         struct ixgbe_softc *sc = arg;
3266         struct ixgbe_mc_addr *mta = sc->mta;
3267
3268         if (ifma->ifma_addr->sa_family != AF_LINK)
3269                 return (0);
3270         if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3271                 return (0);
3272         bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3273             mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3274         mta[idx].vmdq = sc->pool;
3275
3276         return (1);
3277 } /* ixgbe_mc_filter_apply */
3278
3279 static void
3280 ixgbe_if_multi_set(if_ctx_t ctx)
3281 {
3282         struct ixgbe_softc       *sc = iflib_get_softc(ctx);
3283         struct ixgbe_mc_addr *mta;
3284         struct ifnet         *ifp = iflib_get_ifp(ctx);
3285         u8                   *update_ptr;
3286         int                  mcnt = 0;
3287         u32                  fctrl;
3288
3289         IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3290
3291         mta = sc->mta;
3292         bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3293
3294         mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc);
3295
3296         fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3297
3298         if (ifp->if_flags & IFF_PROMISC)
3299                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3300         else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3301             ifp->if_flags & IFF_ALLMULTI) {
3302                 fctrl |= IXGBE_FCTRL_MPE;
3303                 fctrl &= ~IXGBE_FCTRL_UPE;
3304         } else
3305                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3306
3307         IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3308
3309         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3310                 update_ptr = (u8 *)mta;
3311                 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3312                     ixgbe_mc_array_itr, true);
3313         }
3314
3315 } /* ixgbe_if_multi_set */
3316
3317 /************************************************************************
3318  * ixgbe_mc_array_itr
3319  *
3320  *   An iterator function needed by the multicast shared code.
3321  *   It feeds the shared code routine the addresses in the
3322  *   array of ixgbe_set_multi() one by one.
3323  ************************************************************************/
3324 static u8 *
3325 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3326 {
3327         struct ixgbe_mc_addr *mta;
3328
3329         mta = (struct ixgbe_mc_addr *)*update_ptr;
3330         *vmdq = mta->vmdq;
3331
3332         *update_ptr = (u8*)(mta + 1);
3333
3334         return (mta->addr);
3335 } /* ixgbe_mc_array_itr */
3336
3337 /************************************************************************
3338  * ixgbe_local_timer - Timer routine
3339  *
3340  *   Checks for link status, updates statistics,
3341  *   and runs the watchdog check.
3342  ************************************************************************/
3343 static void
3344 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3345 {
3346         struct ixgbe_softc *sc = iflib_get_softc(ctx);
3347
3348         if (qid != 0)
3349                 return;
3350
3351         /* Check for pluggable optics */
3352         if (sc->sfp_probe)
3353                 if (!ixgbe_sfp_probe(ctx))
3354                         return; /* Nothing to do */
3355
3356         ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3357
3358         /* Fire off the adminq task */
3359         iflib_admin_intr_deferred(ctx);
3360
3361 } /* ixgbe_if_timer */
3362
3363 /************************************************************************
3364  * ixgbe_sfp_probe
3365  *
3366  *   Determine if a port had optics inserted.
3367  ************************************************************************/
3368 static bool
3369 ixgbe_sfp_probe(if_ctx_t ctx)
3370 {
3371         struct ixgbe_softc  *sc = iflib_get_softc(ctx);
3372         struct ixgbe_hw *hw = &sc->hw;
3373         device_t        dev = iflib_get_dev(ctx);
3374         bool            result = false;
3375
3376         if ((hw->phy.type == ixgbe_phy_nl) &&
3377             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3378                 s32 ret = hw->phy.ops.identify_sfp(hw);
3379                 if (ret)
3380                         goto out;
3381                 ret = hw->phy.ops.reset(hw);
3382                 sc->sfp_probe = false;
3383                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3384                         device_printf(dev, "Unsupported SFP+ module detected!");
3385                         device_printf(dev,
3386                             "Reload driver with supported module.\n");
3387                         goto out;
3388                 } else
3389                         device_printf(dev, "SFP+ module detected!\n");
3390                 /* We now have supported optics */
3391                 result = true;
3392         }
3393 out:
3394
3395         return (result);
3396 } /* ixgbe_sfp_probe */
3397
3398 /************************************************************************
3399  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3400  ************************************************************************/
3401 static void
3402 ixgbe_handle_mod(void *context)
3403 {
3404         if_ctx_t        ctx = context;
3405         struct ixgbe_softc  *sc = iflib_get_softc(ctx);
3406         struct ixgbe_hw *hw = &sc->hw;
3407         device_t        dev = iflib_get_dev(ctx);
3408         u32             err, cage_full = 0;
3409
3410         if (sc->hw.need_crosstalk_fix) {
3411                 switch (hw->mac.type) {
3412                 case ixgbe_mac_82599EB:
3413                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3414                             IXGBE_ESDP_SDP2;
3415                         break;
3416                 case ixgbe_mac_X550EM_x:
3417                 case ixgbe_mac_X550EM_a:
3418                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3419                             IXGBE_ESDP_SDP0;
3420                         break;
3421                 default:
3422                         break;
3423                 }
3424
3425                 if (!cage_full)
3426                         goto handle_mod_out;
3427         }
3428
3429         err = hw->phy.ops.identify_sfp(hw);
3430         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3431                 device_printf(dev,
3432                     "Unsupported SFP+ module type was detected.\n");
3433                 goto handle_mod_out;
3434         }
3435
3436         if (hw->mac.type == ixgbe_mac_82598EB)
3437                 err = hw->phy.ops.reset(hw);
3438         else
3439                 err = hw->mac.ops.setup_sfp(hw);
3440
3441         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3442                 device_printf(dev,
3443                     "Setup failure - unsupported SFP+ module type.\n");
3444                 goto handle_mod_out;
3445         }
3446         sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3447         return;
3448
3449 handle_mod_out:
3450         sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3451 } /* ixgbe_handle_mod */
3452
3453
3454 /************************************************************************
3455  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3456  ************************************************************************/
3457 static void
3458 ixgbe_handle_msf(void *context)
3459 {
3460         if_ctx_t        ctx = context;
3461         struct ixgbe_softc  *sc = iflib_get_softc(ctx);
3462         struct ixgbe_hw *hw = &sc->hw;
3463         u32             autoneg;
3464         bool            negotiate;
3465
3466         /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3467         sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3468
3469         autoneg = hw->phy.autoneg_advertised;
3470         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3471                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3472         if (hw->mac.ops.setup_link)
3473                 hw->mac.ops.setup_link(hw, autoneg, true);
3474
3475         /* Adjust media types shown in ifconfig */
3476         ifmedia_removeall(sc->media);
3477         ixgbe_add_media_types(sc->ctx);
3478         ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3479 } /* ixgbe_handle_msf */
3480
3481 /************************************************************************
3482  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3483  ************************************************************************/
3484 static void
3485 ixgbe_handle_phy(void *context)
3486 {
3487         if_ctx_t        ctx = context;
3488         struct ixgbe_softc  *sc = iflib_get_softc(ctx);
3489         struct ixgbe_hw *hw = &sc->hw;
3490         int             error;
3491
3492         error = hw->phy.ops.handle_lasi(hw);
3493         if (error == IXGBE_ERR_OVERTEMP)
3494                 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3495         else if (error)
3496                 device_printf(sc->dev,
3497                     "Error handling LASI interrupt: %d\n", error);
3498 } /* ixgbe_handle_phy */
3499
3500 /************************************************************************
3501  * ixgbe_if_stop - Stop the hardware
3502  *
3503  *   Disables all traffic on the adapter by issuing a
3504  *   global reset on the MAC and deallocates TX/RX buffers.
3505  ************************************************************************/
3506 static void
3507 ixgbe_if_stop(if_ctx_t ctx)
3508 {
3509         struct ixgbe_softc  *sc = iflib_get_softc(ctx);
3510         struct ixgbe_hw *hw = &sc->hw;
3511
3512         INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3513
3514         ixgbe_reset_hw(hw);
3515         hw->adapter_stopped = false;
3516         ixgbe_stop_adapter(hw);
3517         if (hw->mac.type == ixgbe_mac_82599EB)
3518                 ixgbe_stop_mac_link_on_d3_82599(hw);
3519         /* Turn off the laser - noop with no optics */
3520         ixgbe_disable_tx_laser(hw);
3521
3522         /* Update the stack */
3523         sc->link_up = false;
3524         ixgbe_if_update_admin_status(ctx);
3525
3526         /* reprogram the RAR[0] in case user changed it. */
3527         ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
3528
3529         return;
3530 } /* ixgbe_if_stop */
3531
3532 /************************************************************************
3533  * ixgbe_update_link_status - Update OS on link state
3534  *
3535  * Note: Only updates the OS on the cached link state.
3536  *       The real check of the hardware only happens with
3537  *       a link interrupt.
3538  ************************************************************************/
3539 static void
3540 ixgbe_if_update_admin_status(if_ctx_t ctx)
3541 {
3542         struct ixgbe_softc *sc = iflib_get_softc(ctx);
3543         device_t       dev = iflib_get_dev(ctx);
3544
3545         if (sc->link_up) {
3546                 if (sc->link_active == false) {
3547                         if (bootverbose)
3548                                 device_printf(dev, "Link is up %d Gbps %s \n",
3549                                     ((sc->link_speed == 128) ? 10 : 1),
3550                                     "Full Duplex");
3551                         sc->link_active = true;
3552                         /* Update any Flow Control changes */
3553                         ixgbe_fc_enable(&sc->hw);
3554                         /* Update DMA coalescing config */
3555                         ixgbe_config_dmac(sc);
3556                         /* should actually be negotiated value */
3557                         iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3558
3559                         if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3560                                 ixgbe_ping_all_vfs(sc);
3561                 }
3562         } else { /* Link down */
3563                 if (sc->link_active == true) {
3564                         if (bootverbose)
3565                                 device_printf(dev, "Link is Down\n");
3566                         iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3567                         sc->link_active = false;
3568                         if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3569                                 ixgbe_ping_all_vfs(sc);
3570                 }
3571         }
3572
3573         /* Handle task requests from msix_link() */
3574         if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
3575                 ixgbe_handle_mod(ctx);
3576         if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
3577                 ixgbe_handle_msf(ctx);
3578         if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
3579                 ixgbe_handle_mbx(ctx);
3580         if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
3581                 ixgbe_reinit_fdir(ctx);
3582         if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
3583                 ixgbe_handle_phy(ctx);
3584         sc->task_requests = 0;
3585
3586         ixgbe_update_stats_counters(sc);
3587 } /* ixgbe_if_update_admin_status */
3588
3589 /************************************************************************
3590  * ixgbe_config_dmac - Configure DMA Coalescing
3591  ************************************************************************/
3592 static void
3593 ixgbe_config_dmac(struct ixgbe_softc *sc)
3594 {
3595         struct ixgbe_hw          *hw = &sc->hw;
3596         struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3597
3598         if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3599                 return;
3600
3601         if (dcfg->watchdog_timer ^ sc->dmac ||
3602             dcfg->link_speed ^ sc->link_speed) {
3603                 dcfg->watchdog_timer = sc->dmac;
3604                 dcfg->fcoe_en = false;
3605                 dcfg->link_speed = sc->link_speed;
3606                 dcfg->num_tcs = 1;
3607
3608                 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3609                     dcfg->watchdog_timer, dcfg->link_speed);
3610
3611                 hw->mac.ops.dmac_config(hw);
3612         }
3613 } /* ixgbe_config_dmac */
3614
3615 /************************************************************************
3616  * ixgbe_if_enable_intr
3617  ************************************************************************/
3618 void
3619 ixgbe_if_enable_intr(if_ctx_t ctx)
3620 {
3621         struct ixgbe_softc     *sc = iflib_get_softc(ctx);
3622         struct ixgbe_hw    *hw = &sc->hw;
3623         struct ix_rx_queue *que = sc->rx_queues;
3624         u32                mask, fwsm;
3625
3626         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3627
3628         switch (sc->hw.mac.type) {
3629         case ixgbe_mac_82599EB:
3630                 mask |= IXGBE_EIMS_ECC;
3631                 /* Temperature sensor on some scs */
3632                 mask |= IXGBE_EIMS_GPI_SDP0;
3633                 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3634                 mask |= IXGBE_EIMS_GPI_SDP1;
3635                 mask |= IXGBE_EIMS_GPI_SDP2;
3636                 break;
3637         case ixgbe_mac_X540:
3638                 /* Detect if Thermal Sensor is enabled */
3639                 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3640                 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3641                         mask |= IXGBE_EIMS_TS;
3642                 mask |= IXGBE_EIMS_ECC;
3643                 break;
3644         case ixgbe_mac_X550:
3645                 /* MAC thermal sensor is automatically enabled */
3646                 mask |= IXGBE_EIMS_TS;
3647                 mask |= IXGBE_EIMS_ECC;
3648                 break;
3649         case ixgbe_mac_X550EM_x:
3650         case ixgbe_mac_X550EM_a:
3651                 /* Some devices use SDP0 for important information */
3652                 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3653                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3654                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3655                     hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3656                         mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3657                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3658                         mask |= IXGBE_EICR_GPI_SDP0_X540;
3659                 mask |= IXGBE_EIMS_ECC;
3660                 break;
3661         default:
3662                 break;
3663         }
3664
3665         /* Enable Fan Failure detection */
3666         if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3667                 mask |= IXGBE_EIMS_GPI_SDP1;
3668         /* Enable SR-IOV */
3669         if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3670                 mask |= IXGBE_EIMS_MAILBOX;
3671         /* Enable Flow Director */
3672         if (sc->feat_en & IXGBE_FEATURE_FDIR)
3673                 mask |= IXGBE_EIMS_FLOW_DIR;
3674
3675         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3676
3677         /* With MSI-X we use auto clear */
3678         if (sc->intr_type == IFLIB_INTR_MSIX) {
3679                 mask = IXGBE_EIMS_ENABLE_MASK;
3680                 /* Don't autoclear Link */
3681                 mask &= ~IXGBE_EIMS_OTHER;
3682                 mask &= ~IXGBE_EIMS_LSC;
3683                 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
3684                         mask &= ~IXGBE_EIMS_MAILBOX;
3685                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3686         }
3687
3688         /*
3689          * Now enable all queues, this is done separately to
3690          * allow for handling the extended (beyond 32) MSI-X
3691          * vectors that can be used by 82599
3692          */
3693         for (int i = 0; i < sc->num_rx_queues; i++, que++)
3694                 ixgbe_enable_queue(sc, que->msix);
3695
3696         IXGBE_WRITE_FLUSH(hw);
3697
3698 } /* ixgbe_if_enable_intr */
3699
3700 /************************************************************************
3701  * ixgbe_disable_intr
3702  ************************************************************************/
3703 static void
3704 ixgbe_if_disable_intr(if_ctx_t ctx)
3705 {
3706         struct ixgbe_softc *sc = iflib_get_softc(ctx);
3707
3708         if (sc->intr_type == IFLIB_INTR_MSIX)
3709                 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3710         if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3711                 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3712         } else {
3713                 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3714                 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3715                 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3716         }
3717         IXGBE_WRITE_FLUSH(&sc->hw);
3718
3719 } /* ixgbe_if_disable_intr */
3720
3721 /************************************************************************
3722  * ixgbe_link_intr_enable
3723  ************************************************************************/
3724 static void
3725 ixgbe_link_intr_enable(if_ctx_t ctx)
3726 {
3727         struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
3728
3729         /* Re-enable other interrupts */
3730         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3731 } /* ixgbe_link_intr_enable */
3732
3733 /************************************************************************
3734  * ixgbe_if_rx_queue_intr_enable
3735  ************************************************************************/
3736 static int
3737 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3738 {
3739         struct ixgbe_softc     *sc = iflib_get_softc(ctx);
3740         struct ix_rx_queue *que = &sc->rx_queues[rxqid];
3741
3742         ixgbe_enable_queue(sc, que->msix);
3743
3744         return (0);
3745 } /* ixgbe_if_rx_queue_intr_enable */
3746
3747 /************************************************************************
3748  * ixgbe_enable_queue
3749  ************************************************************************/
3750 static void
3751 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
3752 {
3753         struct ixgbe_hw *hw = &sc->hw;
3754         u64             queue = 1ULL << vector;
3755         u32             mask;
3756
3757         if (hw->mac.type == ixgbe_mac_82598EB) {
3758                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3759                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3760         } else {
3761                 mask = (queue & 0xFFFFFFFF);
3762                 if (mask)
3763                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3764                 mask = (queue >> 32);
3765                 if (mask)
3766                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3767         }
3768 } /* ixgbe_enable_queue */
3769
3770 /************************************************************************
3771  * ixgbe_disable_queue
3772  ************************************************************************/
3773 static void
3774 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
3775 {
3776         struct ixgbe_hw *hw = &sc->hw;
3777         u64             queue = 1ULL << vector;
3778         u32             mask;
3779
3780         if (hw->mac.type == ixgbe_mac_82598EB) {
3781                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3782                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3783         } else {
3784                 mask = (queue & 0xFFFFFFFF);
3785                 if (mask)
3786                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3787                 mask = (queue >> 32);
3788                 if (mask)
3789                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3790         }
3791 } /* ixgbe_disable_queue */
3792
3793 /************************************************************************
3794  * ixgbe_intr - Legacy Interrupt Service Routine
3795  ************************************************************************/
3796 int
3797 ixgbe_intr(void *arg)
3798 {
3799         struct ixgbe_softc     *sc = arg;
3800         struct ix_rx_queue *que = sc->rx_queues;
3801         struct ixgbe_hw    *hw = &sc->hw;
3802         if_ctx_t           ctx = sc->ctx;
3803         u32                eicr, eicr_mask;
3804
3805         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3806
3807         ++que->irqs;
3808         if (eicr == 0) {
3809                 ixgbe_if_enable_intr(ctx);
3810                 return (FILTER_HANDLED);
3811         }
3812
3813         /* Check for fan failure */
3814         if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3815             (eicr & IXGBE_EICR_GPI_SDP1)) {
3816                 device_printf(sc->dev,
3817                     "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3818                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3819         }
3820
3821         /* Link status change */
3822         if (eicr & IXGBE_EICR_LSC) {
3823                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3824                 iflib_admin_intr_deferred(ctx);
3825         }
3826
3827         if (ixgbe_is_sfp(hw)) {
3828                 /* Pluggable optics-related interrupt */
3829                 if (hw->mac.type >= ixgbe_mac_X540)
3830                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3831                 else
3832                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3833
3834                 if (eicr & eicr_mask) {
3835                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3836                         sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
3837                 }
3838
3839                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3840                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3841                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
3842                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3843                         sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3844                 }
3845         }
3846
3847         /* External PHY interrupt */
3848         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3849             (eicr & IXGBE_EICR_GPI_SDP0_X540))
3850                 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
3851
3852         return (FILTER_SCHEDULE_THREAD);
3853 } /* ixgbe_intr */
3854
3855 /************************************************************************
3856  * ixgbe_free_pci_resources
3857  ************************************************************************/
3858 static void
3859 ixgbe_free_pci_resources(if_ctx_t ctx)
3860 {
3861         struct ixgbe_softc *sc = iflib_get_softc(ctx);
3862         struct         ix_rx_queue *que = sc->rx_queues;
3863         device_t       dev = iflib_get_dev(ctx);
3864
3865         /* Release all MSI-X queue resources */
3866         if (sc->intr_type == IFLIB_INTR_MSIX)
3867                 iflib_irq_free(ctx, &sc->irq);
3868
3869         if (que != NULL) {
3870                 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
3871                         iflib_irq_free(ctx, &que->que_irq);
3872                 }
3873         }
3874
3875         if (sc->pci_mem != NULL)
3876                 bus_release_resource(dev, SYS_RES_MEMORY,
3877                     rman_get_rid(sc->pci_mem), sc->pci_mem);
3878 } /* ixgbe_free_pci_resources */
3879
3880 /************************************************************************
3881  * ixgbe_sysctl_flowcntl
3882  *
3883  *   SYSCTL wrapper around setting Flow Control
3884  ************************************************************************/
3885 static int
3886 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3887 {
3888         struct ixgbe_softc *sc;
3889         int            error, fc;
3890
3891         sc = (struct ixgbe_softc *)arg1;
3892         fc = sc->hw.fc.current_mode;
3893
3894         error = sysctl_handle_int(oidp, &fc, 0, req);
3895         if ((error) || (req->newptr == NULL))
3896                 return (error);
3897
3898         /* Don't bother if it's not changed */
3899         if (fc == sc->hw.fc.current_mode)
3900                 return (0);
3901
3902         return ixgbe_set_flowcntl(sc, fc);
3903 } /* ixgbe_sysctl_flowcntl */
3904
3905 /************************************************************************
3906  * ixgbe_set_flowcntl - Set flow control
3907  *
3908  *   Flow control values:
3909  *     0 - off
3910  *     1 - rx pause
3911  *     2 - tx pause
3912  *     3 - full
3913  ************************************************************************/
3914 static int
3915 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
3916 {
3917         switch (fc) {
3918         case ixgbe_fc_rx_pause:
3919         case ixgbe_fc_tx_pause:
3920         case ixgbe_fc_full:
3921                 sc->hw.fc.requested_mode = fc;
3922                 if (sc->num_rx_queues > 1)
3923                         ixgbe_disable_rx_drop(sc);
3924                 break;
3925         case ixgbe_fc_none:
3926                 sc->hw.fc.requested_mode = ixgbe_fc_none;
3927                 if (sc->num_rx_queues > 1)
3928                         ixgbe_enable_rx_drop(sc);
3929                 break;
3930         default:
3931                 return (EINVAL);
3932         }
3933
3934         /* Don't autoneg if forcing a value */
3935         sc->hw.fc.disable_fc_autoneg = true;
3936         ixgbe_fc_enable(&sc->hw);
3937
3938         return (0);
3939 } /* ixgbe_set_flowcntl */
3940
3941 /************************************************************************
3942  * ixgbe_enable_rx_drop
3943  *
3944  *   Enable the hardware to drop packets when the buffer is
3945  *   full. This is useful with multiqueue, so that no single
3946  *   queue being full stalls the entire RX engine. We only
3947  *   enable this when Multiqueue is enabled AND Flow Control
3948  *   is disabled.
3949  ************************************************************************/
3950 static void
3951 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
3952 {
3953         struct ixgbe_hw *hw = &sc->hw;
3954         struct rx_ring  *rxr;
3955         u32             srrctl;
3956
3957         for (int i = 0; i < sc->num_rx_queues; i++) {
3958                 rxr = &sc->rx_queues[i].rxr;
3959                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3960                 srrctl |= IXGBE_SRRCTL_DROP_EN;
3961                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3962         }
3963
3964         /* enable drop for each vf */
3965         for (int i = 0; i < sc->num_vfs; i++) {
3966                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3967                                 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3968                                 IXGBE_QDE_ENABLE));
3969         }
3970 } /* ixgbe_enable_rx_drop */
3971
3972 /************************************************************************
3973  * ixgbe_disable_rx_drop
3974  ************************************************************************/
3975 static void
3976 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
3977 {
3978         struct ixgbe_hw *hw = &sc->hw;
3979         struct rx_ring  *rxr;
3980         u32             srrctl;
3981
3982         for (int i = 0; i < sc->num_rx_queues; i++) {
3983                 rxr = &sc->rx_queues[i].rxr;
3984                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3985                 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3986                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3987         }
3988
3989         /* disable drop for each vf */
3990         for (int i = 0; i < sc->num_vfs; i++) {
3991                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3992                     (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3993         }
3994 } /* ixgbe_disable_rx_drop */
3995
3996 /************************************************************************
3997  * ixgbe_sysctl_advertise
3998  *
3999  *   SYSCTL wrapper around setting advertised speed
4000  ************************************************************************/
4001 static int
4002 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4003 {
4004         struct ixgbe_softc *sc;
4005         int            error, advertise;
4006
4007         sc = (struct ixgbe_softc *)arg1;
4008         advertise = sc->advertise;
4009
4010         error = sysctl_handle_int(oidp, &advertise, 0, req);
4011         if ((error) || (req->newptr == NULL))
4012                 return (error);
4013
4014         return ixgbe_set_advertise(sc, advertise);
4015 } /* ixgbe_sysctl_advertise */
4016
4017 /************************************************************************
4018  * ixgbe_set_advertise - Control advertised link speed
4019  *
4020  *   Flags:
4021  *     0x1 - advertise 100 Mb
4022  *     0x2 - advertise 1G
4023  *     0x4 - advertise 10G
4024  *     0x8 - advertise 10 Mb (yes, Mb)
4025  ************************************************************************/
4026 static int
4027 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
4028 {
4029         device_t         dev = iflib_get_dev(sc->ctx);
4030         struct ixgbe_hw  *hw;
4031         ixgbe_link_speed speed = 0;
4032         ixgbe_link_speed link_caps = 0;
4033         s32              err = IXGBE_NOT_IMPLEMENTED;
4034         bool             negotiate = false;
4035
4036         /* Checks to validate new value */
4037         if (sc->advertise == advertise) /* no change */
4038                 return (0);
4039
4040         hw = &sc->hw;
4041
4042         /* No speed changes for backplane media */
4043         if (hw->phy.media_type == ixgbe_media_type_backplane)
4044                 return (ENODEV);
4045
4046         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4047               (hw->phy.multispeed_fiber))) {
4048                 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4049                 return (EINVAL);
4050         }
4051
4052         if (advertise < 0x1 || advertise > 0xF) {
4053                 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4054                 return (EINVAL);
4055         }
4056
4057         if (hw->mac.ops.get_link_capabilities) {
4058                 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4059                     &negotiate);
4060                 if (err != IXGBE_SUCCESS) {
4061                         device_printf(dev, "Unable to determine supported advertise speeds\n");
4062                         return (ENODEV);
4063                 }
4064         }
4065
4066         /* Set new value and report new advertised mode */
4067         if (advertise & 0x1) {
4068                 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4069                         device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4070                         return (EINVAL);
4071                 }
4072                 speed |= IXGBE_LINK_SPEED_100_FULL;
4073         }
4074         if (advertise & 0x2) {
4075                 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4076                         device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4077                         return (EINVAL);
4078                 }
4079                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4080         }
4081         if (advertise & 0x4) {
4082                 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4083                         device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4084                         return (EINVAL);
4085                 }
4086                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4087         }
4088         if (advertise & 0x8) {
4089                 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4090                         device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4091                         return (EINVAL);
4092                 }
4093                 speed |= IXGBE_LINK_SPEED_10_FULL;
4094         }
4095
4096         hw->mac.autotry_restart = true;
4097         hw->mac.ops.setup_link(hw, speed, true);
4098         sc->advertise = advertise;
4099
4100         return (0);
4101 } /* ixgbe_set_advertise */
4102
4103 /************************************************************************
4104  * ixgbe_get_advertise - Get current advertised speed settings
4105  *
4106  *   Formatted for sysctl usage.
4107  *   Flags:
4108  *     0x1 - advertise 100 Mb
4109  *     0x2 - advertise 1G
4110  *     0x4 - advertise 10G
4111  *     0x8 - advertise 10 Mb (yes, Mb)
4112  ************************************************************************/
4113 static int
4114 ixgbe_get_advertise(struct ixgbe_softc *sc)
4115 {
4116         struct ixgbe_hw  *hw = &sc->hw;
4117         int              speed;
4118         ixgbe_link_speed link_caps = 0;
4119         s32              err;
4120         bool             negotiate = false;
4121
4122         /*
4123          * Advertised speed means nothing unless it's copper or
4124          * multi-speed fiber
4125          */
4126         if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4127             !(hw->phy.multispeed_fiber))
4128                 return (0);
4129
4130         err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4131         if (err != IXGBE_SUCCESS)
4132                 return (0);
4133
4134         speed =
4135             ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4136             ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4137             ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4138             ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4139
4140         return speed;
4141 } /* ixgbe_get_advertise */
4142
4143 /************************************************************************
4144  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4145  *
4146  *   Control values:
4147  *     0/1 - off / on (use default value of 1000)
4148  *
4149  *     Legal timer values are:
4150  *     50,100,250,500,1000,2000,5000,10000
4151  *
4152  *     Turning off interrupt moderation will also turn this off.
4153  ************************************************************************/
4154 static int
4155 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4156 {
4157         struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4158         struct ifnet   *ifp = iflib_get_ifp(sc->ctx);
4159         int            error;
4160         u16            newval;
4161
4162         newval = sc->dmac;
4163         error = sysctl_handle_16(oidp, &newval, 0, req);
4164         if ((error) || (req->newptr == NULL))
4165                 return (error);
4166
4167         switch (newval) {
4168         case 0:
4169                 /* Disabled */
4170                 sc->dmac = 0;
4171                 break;
4172         case 1:
4173                 /* Enable and use default */
4174                 sc->dmac = 1000;
4175                 break;
4176         case 50:
4177         case 100:
4178         case 250:
4179         case 500:
4180         case 1000:
4181         case 2000:
4182         case 5000:
4183         case 10000:
4184                 /* Legal values - allow */
4185                 sc->dmac = newval;
4186                 break;
4187         default:
4188                 /* Do nothing, illegal value */
4189                 return (EINVAL);
4190         }
4191
4192         /* Re-initialize hardware if it's already running */
4193         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4194                 ifp->if_init(ifp);
4195
4196         return (0);
4197 } /* ixgbe_sysctl_dmac */
4198
4199 #ifdef IXGBE_DEBUG
4200 /************************************************************************
4201  * ixgbe_sysctl_power_state
4202  *
4203  *   Sysctl to test power states
4204  *   Values:
4205  *     0      - set device to D0
4206  *     3      - set device to D3
4207  *     (none) - get current device power state
4208  ************************************************************************/
4209 static int
4210 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4211 {
4212         struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4213         device_t       dev = sc->dev;
4214         int            curr_ps, new_ps, error = 0;
4215
4216         curr_ps = new_ps = pci_get_powerstate(dev);
4217
4218         error = sysctl_handle_int(oidp, &new_ps, 0, req);
4219         if ((error) || (req->newptr == NULL))
4220                 return (error);
4221
4222         if (new_ps == curr_ps)
4223                 return (0);
4224
4225         if (new_ps == 3 && curr_ps == 0)
4226                 error = DEVICE_SUSPEND(dev);
4227         else if (new_ps == 0 && curr_ps == 3)
4228                 error = DEVICE_RESUME(dev);
4229         else
4230                 return (EINVAL);
4231
4232         device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4233
4234         return (error);
4235 } /* ixgbe_sysctl_power_state */
4236 #endif
4237
4238 /************************************************************************
4239  * ixgbe_sysctl_wol_enable
4240  *
4241  *   Sysctl to enable/disable the WoL capability,
4242  *   if supported by the adapter.
4243  *
4244  *   Values:
4245  *     0 - disabled
4246  *     1 - enabled
4247  ************************************************************************/
4248 static int
4249 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4250 {
4251         struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
4252         struct ixgbe_hw *hw = &sc->hw;
4253         int             new_wol_enabled;
4254         int             error = 0;
4255
4256         new_wol_enabled = hw->wol_enabled;
4257         error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4258         if ((error) || (req->newptr == NULL))
4259                 return (error);
4260         new_wol_enabled = !!(new_wol_enabled);
4261         if (new_wol_enabled == hw->wol_enabled)
4262                 return (0);
4263
4264         if (new_wol_enabled > 0 && !sc->wol_support)
4265                 return (ENODEV);
4266         else
4267                 hw->wol_enabled = new_wol_enabled;
4268
4269         return (0);
4270 } /* ixgbe_sysctl_wol_enable */
4271
4272 /************************************************************************
4273  * ixgbe_sysctl_wufc - Wake Up Filter Control
4274  *
4275  *   Sysctl to enable/disable the types of packets that the
4276  *   adapter will wake up on upon receipt.
4277  *   Flags:
4278  *     0x1  - Link Status Change
4279  *     0x2  - Magic Packet
4280  *     0x4  - Direct Exact
4281  *     0x8  - Directed Multicast
4282  *     0x10 - Broadcast
4283  *     0x20 - ARP/IPv4 Request Packet
4284  *     0x40 - Direct IPv4 Packet
4285  *     0x80 - Direct IPv6 Packet
4286  *
4287  *   Settings not listed above will cause the sysctl to return an error.
4288  ************************************************************************/
4289 static int
4290 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4291 {
4292         struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4293         int            error = 0;
4294         u32            new_wufc;
4295
4296         new_wufc = sc->wufc;
4297
4298         error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4299         if ((error) || (req->newptr == NULL))
4300                 return (error);
4301         if (new_wufc == sc->wufc)
4302                 return (0);
4303
4304         if (new_wufc & 0xffffff00)
4305                 return (EINVAL);
4306
4307         new_wufc &= 0xff;
4308         new_wufc |= (0xffffff & sc->wufc);
4309         sc->wufc = new_wufc;
4310
4311         return (0);
4312 } /* ixgbe_sysctl_wufc */
4313
4314 #ifdef IXGBE_DEBUG
4315 /************************************************************************
4316  * ixgbe_sysctl_print_rss_config
4317  ************************************************************************/
4318 static int
4319 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4320 {
4321         struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
4322         struct ixgbe_hw *hw = &sc->hw;
4323         device_t        dev = sc->dev;
4324         struct sbuf     *buf;
4325         int             error = 0, reta_size;
4326         u32             reg;
4327
4328         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4329         if (!buf) {
4330                 device_printf(dev, "Could not allocate sbuf for output.\n");
4331                 return (ENOMEM);
4332         }
4333
4334         // TODO: use sbufs to make a string to print out
4335         /* Set multiplier for RETA setup and table size based on MAC */
4336         switch (sc->hw.mac.type) {
4337         case ixgbe_mac_X550:
4338         case ixgbe_mac_X550EM_x:
4339         case ixgbe_mac_X550EM_a:
4340                 reta_size = 128;
4341                 break;
4342         default:
4343                 reta_size = 32;
4344                 break;
4345         }
4346
4347         /* Print out the redirection table */
4348         sbuf_cat(buf, "\n");
4349         for (int i = 0; i < reta_size; i++) {
4350                 if (i < 32) {
4351                         reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4352                         sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4353                 } else {
4354                         reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4355                         sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4356                 }
4357         }
4358
4359         // TODO: print more config
4360
4361         error = sbuf_finish(buf);
4362         if (error)
4363                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4364
4365         sbuf_delete(buf);
4366
4367         return (0);
4368 } /* ixgbe_sysctl_print_rss_config */
4369 #endif /* IXGBE_DEBUG */
4370
4371 /************************************************************************
4372  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4373  *
4374  *   For X552/X557-AT devices using an external PHY
4375  ************************************************************************/
4376 static int
4377 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4378 {
4379         struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
4380         struct ixgbe_hw *hw = &sc->hw;
4381         u16             reg;
4382
4383         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4384                 device_printf(iflib_get_dev(sc->ctx),
4385                     "Device has no supported external thermal sensor.\n");
4386                 return (ENODEV);
4387         }
4388
4389         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4390             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4391                 device_printf(iflib_get_dev(sc->ctx),
4392                     "Error reading from PHY's current temperature register\n");
4393                 return (EAGAIN);
4394         }
4395
4396         /* Shift temp for output */
4397         reg = reg >> 8;
4398
4399         return (sysctl_handle_16(oidp, NULL, reg, req));
4400 } /* ixgbe_sysctl_phy_temp */
4401
4402 /************************************************************************
4403  * ixgbe_sysctl_phy_overtemp_occurred
4404  *
4405  *   Reports (directly from the PHY) whether the current PHY
4406  *   temperature is over the overtemp threshold.
4407  ************************************************************************/
4408 static int
4409 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4410 {
4411         struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
4412         struct ixgbe_hw *hw = &sc->hw;
4413         u16             reg;
4414
4415         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4416                 device_printf(iflib_get_dev(sc->ctx),
4417                     "Device has no supported external thermal sensor.\n");
4418                 return (ENODEV);
4419         }
4420
4421         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4422             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4423                 device_printf(iflib_get_dev(sc->ctx),
4424                     "Error reading from PHY's temperature status register\n");
4425                 return (EAGAIN);
4426         }
4427
4428         /* Get occurrence bit */
4429         reg = !!(reg & 0x4000);
4430
4431         return (sysctl_handle_16(oidp, 0, reg, req));
4432 } /* ixgbe_sysctl_phy_overtemp_occurred */
4433
4434 /************************************************************************
4435  * ixgbe_sysctl_eee_state
4436  *
4437  *   Sysctl to set EEE power saving feature
4438  *   Values:
4439  *     0      - disable EEE
4440  *     1      - enable EEE
4441  *     (none) - get current device EEE state
4442  ************************************************************************/
4443 static int
4444 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4445 {
4446         struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4447         device_t       dev = sc->dev;
4448         struct ifnet   *ifp = iflib_get_ifp(sc->ctx);
4449         int            curr_eee, new_eee, error = 0;
4450         s32            retval;
4451
4452         curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
4453
4454         error = sysctl_handle_int(oidp, &new_eee, 0, req);
4455         if ((error) || (req->newptr == NULL))
4456                 return (error);
4457
4458         /* Nothing to do */
4459         if (new_eee == curr_eee)
4460                 return (0);
4461
4462         /* Not supported */
4463         if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
4464                 return (EINVAL);
4465
4466         /* Bounds checking */
4467         if ((new_eee < 0) || (new_eee > 1))
4468                 return (EINVAL);
4469
4470         retval = ixgbe_setup_eee(&sc->hw, new_eee);
4471         if (retval) {
4472                 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4473                 return (EINVAL);
4474         }
4475
4476         /* Restart auto-neg */
4477         ifp->if_init(ifp);
4478
4479         device_printf(dev, "New EEE state: %d\n", new_eee);
4480
4481         /* Cache new value */
4482         if (new_eee)
4483                 sc->feat_en |= IXGBE_FEATURE_EEE;
4484         else
4485                 sc->feat_en &= ~IXGBE_FEATURE_EEE;
4486
4487         return (error);
4488 } /* ixgbe_sysctl_eee_state */
4489
4490 /************************************************************************
4491  * ixgbe_init_device_features
4492  ************************************************************************/
4493 static void
4494 ixgbe_init_device_features(struct ixgbe_softc *sc)
4495 {
4496         sc->feat_cap = IXGBE_FEATURE_NETMAP
4497                           | IXGBE_FEATURE_RSS
4498                           | IXGBE_FEATURE_MSI
4499                           | IXGBE_FEATURE_MSIX
4500                           | IXGBE_FEATURE_LEGACY_IRQ;
4501
4502         /* Set capabilities first... */
4503         switch (sc->hw.mac.type) {
4504         case ixgbe_mac_82598EB:
4505                 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
4506                         sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4507                 break;
4508         case ixgbe_mac_X540:
4509                 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4510                 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4511                 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4512                     (sc->hw.bus.func == 0))
4513                         sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4514                 break;
4515         case ixgbe_mac_X550:
4516                 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4517                 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4518                 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4519                 break;
4520         case ixgbe_mac_X550EM_x:
4521                 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4522                 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4523                 break;
4524         case ixgbe_mac_X550EM_a:
4525                 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4526                 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4527                 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4528                 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4529                     (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4530                         sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4531                         sc->feat_cap |= IXGBE_FEATURE_EEE;
4532                 }
4533                 break;
4534         case ixgbe_mac_82599EB:
4535                 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4536                 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4537                 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4538                     (sc->hw.bus.func == 0))
4539                         sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4540                 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4541                         sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4542                 break;
4543         default:
4544                 break;
4545         }
4546
4547         /* Enabled by default... */
4548         /* Fan failure detection */
4549         if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4550                 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4551         /* Netmap */
4552         if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
4553                 sc->feat_en |= IXGBE_FEATURE_NETMAP;
4554         /* EEE */
4555         if (sc->feat_cap & IXGBE_FEATURE_EEE)
4556                 sc->feat_en |= IXGBE_FEATURE_EEE;
4557         /* Thermal Sensor */
4558         if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4559                 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4560
4561         /* Enabled via global sysctl... */
4562         /* Flow Director */
4563         if (ixgbe_enable_fdir) {
4564                 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
4565                         sc->feat_en |= IXGBE_FEATURE_FDIR;
4566                 else
4567                         device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled.");
4568         }
4569         /*
4570          * Message Signal Interrupts - Extended (MSI-X)
4571          * Normal MSI is only enabled if MSI-X calls fail.
4572          */
4573         if (!ixgbe_enable_msix)
4574                 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
4575         /* Receive-Side Scaling (RSS) */
4576         if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4577                 sc->feat_en |= IXGBE_FEATURE_RSS;
4578
4579         /* Disable features with unmet dependencies... */
4580         /* No MSI-X */
4581         if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
4582                 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
4583                 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4584                 sc->feat_en &= ~IXGBE_FEATURE_RSS;
4585                 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
4586         }
4587 } /* ixgbe_init_device_features */
4588
4589 /************************************************************************
4590  * ixgbe_check_fan_failure
4591  ************************************************************************/
4592 static void
4593 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
4594 {
4595         u32 mask;
4596
4597         mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
4598             IXGBE_ESDP_SDP1;
4599
4600         if (reg & mask)
4601                 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4602 } /* ixgbe_check_fan_failure */
4603
4604 /************************************************************************
4605  * ixgbe_sbuf_fw_version
4606  ************************************************************************/
4607 static void
4608 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
4609 {
4610         struct ixgbe_nvm_version nvm_ver = {0};
4611         uint16_t phyfw = 0;
4612         int status;
4613         const char *space = "";
4614
4615         ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
4616         ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
4617         ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */
4618         status = ixgbe_get_phy_firmware_version(hw, &phyfw);
4619
4620         if (nvm_ver.oem_valid) {
4621                 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
4622                     nvm_ver.oem_minor, nvm_ver.oem_release);
4623                 space = " ";
4624         }
4625
4626         if (nvm_ver.or_valid) {
4627                 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
4628                     space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
4629                 space = " ";
4630         }
4631
4632         if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
4633             NVM_VER_INVALID)) {
4634                 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
4635                 space = " ";
4636         }
4637
4638         if (phyfw != 0 && status == IXGBE_SUCCESS)
4639                 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw);
4640 } /* ixgbe_sbuf_fw_version */
4641
4642 /************************************************************************
4643  * ixgbe_print_fw_version
4644  ************************************************************************/
4645 static void
4646 ixgbe_print_fw_version(if_ctx_t ctx)
4647 {
4648         struct ixgbe_softc *sc = iflib_get_softc(ctx);
4649         struct ixgbe_hw *hw = &sc->hw;
4650         device_t dev = sc->dev;
4651         struct sbuf *buf;
4652         int error = 0;
4653
4654         buf = sbuf_new_auto();
4655         if (!buf) {
4656                 device_printf(dev, "Could not allocate sbuf for output.\n");
4657                 return;
4658         }
4659
4660         ixgbe_sbuf_fw_version(hw, buf);
4661
4662         error = sbuf_finish(buf);
4663         if (error)
4664                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4665         else if (sbuf_len(buf))
4666                 device_printf(dev, "%s\n", sbuf_data(buf));
4667
4668         sbuf_delete(buf);
4669 } /* ixgbe_print_fw_version */
4670
4671 /************************************************************************
4672  * ixgbe_sysctl_print_fw_version
4673  ************************************************************************/
4674 static int
4675 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
4676 {
4677         struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
4678         struct ixgbe_hw *hw = &sc->hw;
4679         device_t dev = sc->dev;
4680         struct sbuf *buf;
4681         int error = 0;
4682
4683         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4684         if (!buf) {
4685                 device_printf(dev, "Could not allocate sbuf for output.\n");
4686                 return (ENOMEM);
4687         }
4688
4689         ixgbe_sbuf_fw_version(hw, buf);
4690
4691         error = sbuf_finish(buf);
4692         if (error)
4693                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4694
4695         sbuf_delete(buf);
4696
4697         return (0);
4698 } /* ixgbe_sysctl_print_fw_version */