]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/ixgbe/if_ix.c
MFC r356551: arp(8): avoid segfaulting due to out-of-bounds memory access
[FreeBSD/stable/10.git] / sys / dev / ixgbe / if_ix.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include "ixgbe.h"
42
43 /************************************************************************
44  * Driver version
45  ************************************************************************/
46 char ixgbe_driver_version[] = "3.2.11-k";
47
48
49 /************************************************************************
50  * PCI Device ID Table
51  *
52  *   Used by probe to select devices to load on
53  *   Last field stores an index into ixgbe_strings
54  *   Last entry must be all 0s
55  *
56  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
57  ************************************************************************/
58 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
59 {
60         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
61         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
64         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
84         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
85         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
86         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
87         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
88         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
89         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
90         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
91         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
92         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
93         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
94         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
95         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
96         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
97         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
98         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
99         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
100         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
101         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
102         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
103         /* required last entry */
104         {0, 0, 0, 0, 0}
105 };
106
107 /************************************************************************
108  * Table of branding strings
109  ************************************************************************/
110 static char    *ixgbe_strings[] = {
111         "Intel(R) PRO/10GbE PCI-Express Network Driver"
112 };
113
114 /************************************************************************
115  * Function prototypes
116  ************************************************************************/
117 static int      ixgbe_probe(device_t);
118 static int      ixgbe_attach(device_t);
119 static int      ixgbe_detach(device_t);
120 static int      ixgbe_shutdown(device_t);
121 static int      ixgbe_suspend(device_t);
122 static int      ixgbe_resume(device_t);
123 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
124 static void     ixgbe_init(void *);
125 static void     ixgbe_stop(void *);
126 #if __FreeBSD_version >= 1100036
127 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
128 #endif
129 static void     ixgbe_init_device_features(struct adapter *);
130 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
131 static void     ixgbe_add_media_types(struct adapter *);
132 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
133 static int      ixgbe_media_change(struct ifnet *);
134 static int      ixgbe_allocate_pci_resources(struct adapter *);
135 static void     ixgbe_get_slot_info(struct adapter *);
136 static int      ixgbe_allocate_msix(struct adapter *);
137 static int      ixgbe_allocate_legacy(struct adapter *);
138 static int      ixgbe_configure_interrupts(struct adapter *);
139 static void     ixgbe_free_pci_resources(struct adapter *);
140 static void     ixgbe_local_timer(void *);
141 static int      ixgbe_setup_interface(device_t, struct adapter *);
142 static void     ixgbe_config_gpie(struct adapter *);
143 static void     ixgbe_config_dmac(struct adapter *);
144 static void     ixgbe_config_delay_values(struct adapter *);
145 static void     ixgbe_config_link(struct adapter *);
146 static void     ixgbe_check_wol_support(struct adapter *);
147 static int      ixgbe_setup_low_power_mode(struct adapter *);
148 static void     ixgbe_rearm_queues(struct adapter *, u64);
149
150 static void     ixgbe_initialize_transmit_units(struct adapter *);
151 static void     ixgbe_initialize_receive_units(struct adapter *);
152 static void     ixgbe_enable_rx_drop(struct adapter *);
153 static void     ixgbe_disable_rx_drop(struct adapter *);
154 static void     ixgbe_initialize_rss_mapping(struct adapter *);
155
156 static void     ixgbe_enable_intr(struct adapter *);
157 static void     ixgbe_disable_intr(struct adapter *);
158 static void     ixgbe_update_stats_counters(struct adapter *);
159 static void     ixgbe_set_promisc(struct adapter *);
160 static void     ixgbe_set_multi(struct adapter *);
161 static void     ixgbe_update_link_status(struct adapter *);
162 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
163 static void     ixgbe_configure_ivars(struct adapter *);
164 static u8       *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
165
166 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
167 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
168 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
169
170 static void     ixgbe_add_device_sysctls(struct adapter *);
171 static void     ixgbe_add_hw_stats(struct adapter *);
172 static int      ixgbe_set_flowcntl(struct adapter *, int);
173 static int      ixgbe_set_advertise(struct adapter *, int);
174 static int      ixgbe_get_advertise(struct adapter *);
175
176 /* Sysctl handlers */
177 static void     ixgbe_set_sysctl_value(struct adapter *, const char *,
178                                        const char *, int *, int);
179 static int      ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
180 static int      ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
181 static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
182 static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
183 static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
184 #ifdef IXGBE_DEBUG
185 static int      ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
186 static int      ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
187 #endif
188 static int      ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
189 static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
190 static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
191
192 /* Support for pluggable optic modules */
193 static bool     ixgbe_sfp_probe(struct adapter *);
194
195 /* Legacy (single vector) interrupt handler */
196 static void     ixgbe_legacy_irq(void *);
197
198 /* The MSI/MSI-X Interrupt handlers */
199 static void     ixgbe_msix_que(void *);
200 static void     ixgbe_msix_link(void *);
201
202 /* Deferred interrupt tasklets */
203 static void     ixgbe_handle_que(void *, int);
204 static void     ixgbe_handle_link(void *, int);
205 static void     ixgbe_handle_msf(void *, int);
206 static void     ixgbe_handle_mod(void *, int);
207 static void     ixgbe_handle_phy(void *, int);
208
209
210 /************************************************************************
211  *  FreeBSD Device Interface Entry Points
212  ************************************************************************/
213 static device_method_t ix_methods[] = {
214         /* Device interface */
215         DEVMETHOD(device_probe, ixgbe_probe),
216         DEVMETHOD(device_attach, ixgbe_attach),
217         DEVMETHOD(device_detach, ixgbe_detach),
218         DEVMETHOD(device_shutdown, ixgbe_shutdown),
219         DEVMETHOD(device_suspend, ixgbe_suspend),
220         DEVMETHOD(device_resume, ixgbe_resume),
221 #ifdef PCI_IOV
222         DEVMETHOD(pci_iov_init, ixgbe_init_iov),
223         DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
224         DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
225 #endif /* PCI_IOV */
226         DEVMETHOD_END
227 };
228
229 static driver_t ix_driver = {
230         "ix", ix_methods, sizeof(struct adapter),
231 };
232
233 devclass_t ix_devclass;
234 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
235
236 MODULE_DEPEND(ix, pci, 1, 1, 1);
237 MODULE_DEPEND(ix, ether, 1, 1, 1);
238 #if __FreeBSD_version >= 1100000
239 MODULE_DEPEND(ix, netmap, 1, 1, 1);
240 #endif
241
242 /*
243  * TUNEABLE PARAMETERS:
244  */
245
246 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
247
248 /*
249  * AIM: Adaptive Interrupt Moderation
250  * which means that the interrupt rate
251  * is varied over time based on the
252  * traffic for that interrupt vector
253  */
254 static int ixgbe_enable_aim = TRUE;
255 TUNABLE_INT("hw.ix.enable_aim", &ixgbe_enable_aim);
256 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
257     "Enable adaptive interrupt moderation");
258
259 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
260 TUNABLE_INT("hw.ix.max_interrupt_rate", &ixgbe_max_interrupt_rate);
261 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
262     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
263
264 /* How many packets rxeof tries to clean at a time */
265 static int ixgbe_rx_process_limit = 256;
266 TUNABLE_INT("hw.ix.rx_process_limit", &ixgbe_rx_process_limit);
267 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
268     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
269
270 /* How many packets txeof tries to clean at a time */
271 static int ixgbe_tx_process_limit = 256;
272 TUNABLE_INT("hw.ix.tx_process_limit", &ixgbe_tx_process_limit);
273 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
274     &ixgbe_tx_process_limit, 0,
275     "Maximum number of sent packets to process at a time, -1 means unlimited");
276
277 /* Flow control setting, default to full */
278 static int ixgbe_flow_control = ixgbe_fc_full;
279 TUNABLE_INT("hw.ix.flow_control", &ixgbe_flow_control);
280 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
281     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
282
283 /* Advertise Speed, default to 0 (auto) */
284 static int ixgbe_advertise_speed = 0;
285 TUNABLE_INT("hw.ix.advertise_speed", &ixgbe_advertise_speed);
286 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
287     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
288
289 /*
290  * Smart speed setting, default to on
291  * this only works as a compile option
292  * right now as its during attach, set
293  * this to 'ixgbe_smart_speed_off' to
294  * disable.
295  */
296 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
297
298 /*
299  * MSI-X should be the default for best performance,
300  * but this allows it to be forced off for testing.
301  */
302 static int ixgbe_enable_msix = 1;
303 TUNABLE_INT("hw.ix.enable_msix", &ixgbe_enable_msix);
304 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
305     "Enable MSI-X interrupts");
306
307 /*
308  * Number of Queues, can be set to 0,
309  * it then autoconfigures based on the
310  * number of cpus with a max of 8. This
311  * can be overriden manually here.
312  */
313 static int ixgbe_num_queues = 0;
314 TUNABLE_INT("hw.ix.num_queues", &ixgbe_num_queues);
315 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
316     "Number of queues to configure, 0 indicates autoconfigure");
317
318 /*
319  * Number of TX descriptors per ring,
320  * setting higher than RX as this seems
321  * the better performing choice.
322  */
323 static int ixgbe_txd = PERFORM_TXD;
324 TUNABLE_INT("hw.ix.txd", &ixgbe_txd);
325 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
326     "Number of transmit descriptors per queue");
327
328 /* Number of RX descriptors per ring */
329 static int ixgbe_rxd = PERFORM_RXD;
330 TUNABLE_INT("hw.ix.rxd", &ixgbe_rxd);
331 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
332     "Number of receive descriptors per queue");
333
334 /*
335  * Defining this on will allow the use
336  * of unsupported SFP+ modules, note that
337  * doing so you are on your own :)
338  */
339 static int allow_unsupported_sfp = FALSE;
340 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
341 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
342     &allow_unsupported_sfp, 0,
343     "Allow unsupported SFP modules...use at your own risk");
344
345 /*
346  * Not sure if Flow Director is fully baked,
347  * so we'll default to turning it off.
348  */
349 static int ixgbe_enable_fdir = 0;
350 TUNABLE_INT("hw.ix.enable_fdir", &ixgbe_enable_fdir);
351 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
352     "Enable Flow Director");
353
354 /* Legacy Transmit (single queue) */
355 static int ixgbe_enable_legacy_tx = 0;
356 TUNABLE_INT("hw.ix.enable_legacy_tx", &ixgbe_enable_legacy_tx);
357 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
358     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
359
360 /* Receive-Side Scaling */
361 static int ixgbe_enable_rss = 1;
362 TUNABLE_INT("hw.ix.enable_rss", &ixgbe_enable_rss);
363 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
364     "Enable Receive-Side Scaling (RSS)");
365
366 /* Keep running tab on them for sanity check */
367 static int ixgbe_total_ports;
368
369 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
370 static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
371
372 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
373
374 /************************************************************************
375  * ixgbe_probe - Device identification routine
376  *
377  *   Determines if the driver should be loaded on
378  *   adapter based on its PCI vendor/device ID.
379  *
380  *   return BUS_PROBE_DEFAULT on success, positive on failure
381  ************************************************************************/
382 static int
383 ixgbe_probe(device_t dev)
384 {
385         ixgbe_vendor_info_t *ent;
386
387         u16  pci_vendor_id = 0;
388         u16  pci_device_id = 0;
389         u16  pci_subvendor_id = 0;
390         u16  pci_subdevice_id = 0;
391         char adapter_name[256];
392
393         INIT_DEBUGOUT("ixgbe_probe: begin");
394
395         pci_vendor_id = pci_get_vendor(dev);
396         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
397                 return (ENXIO);
398
399         pci_device_id = pci_get_device(dev);
400         pci_subvendor_id = pci_get_subvendor(dev);
401         pci_subdevice_id = pci_get_subdevice(dev);
402
403         ent = ixgbe_vendor_info_array;
404         while (ent->vendor_id != 0) {
405                 if ((pci_vendor_id == ent->vendor_id) &&
406                     (pci_device_id == ent->device_id) &&
407                     ((pci_subvendor_id == ent->subvendor_id) ||
408                      (ent->subvendor_id == 0)) &&
409                     ((pci_subdevice_id == ent->subdevice_id) ||
410                      (ent->subdevice_id == 0))) {
411                         sprintf(adapter_name, "%s, Version - %s",
412                                 ixgbe_strings[ent->index],
413                                 ixgbe_driver_version);
414                         device_set_desc_copy(dev, adapter_name);
415                         ++ixgbe_total_ports;
416                         return (BUS_PROBE_DEFAULT);
417                 }
418                 ent++;
419         }
420
421         return (ENXIO);
422 } /* ixgbe_probe */
423
424 /************************************************************************
425  * ixgbe_attach - Device initialization routine
426  *
427  *   Called when the driver is being loaded.
428  *   Identifies the type of hardware, allocates all resources
429  *   and initializes the hardware.
430  *
431  *   return 0 on success, positive on failure
432  ************************************************************************/
433 static int
434 ixgbe_attach(device_t dev)
435 {
436         struct adapter  *adapter;
437         struct ixgbe_hw *hw;
438         int             error = 0;
439         u32             ctrl_ext;
440
441         INIT_DEBUGOUT("ixgbe_attach: begin");
442
443         /* Allocate, clear, and link in our adapter structure */
444         adapter = device_get_softc(dev);
445         adapter->hw.back = adapter;
446         adapter->dev = dev;
447         hw = &adapter->hw;
448
449         /* Core Lock Init*/
450         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
451
452         /* Set up the timer callout */
453         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
454
455         /* Determine hardware revision */
456         hw->vendor_id = pci_get_vendor(dev);
457         hw->device_id = pci_get_device(dev);
458         hw->revision_id = pci_get_revid(dev);
459         hw->subsystem_vendor_id = pci_get_subvendor(dev);
460         hw->subsystem_device_id = pci_get_subdevice(dev);
461
462         /*
463          * Make sure BUSMASTER is set
464          */
465         pci_enable_busmaster(dev);
466
467         /* Do base PCI setup - map BAR0 */
468         if (ixgbe_allocate_pci_resources(adapter)) {
469                 device_printf(dev, "Allocation of PCI resources failed\n");
470                 error = ENXIO;
471                 goto err_out;
472         }
473
474         /* let hardware know driver is loaded */
475         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
476         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
477         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
478
479         /*
480          * Initialize the shared code
481          */
482         if (ixgbe_init_shared_code(hw)) {
483                 device_printf(dev, "Unable to initialize the shared code\n");
484                 error = ENXIO;
485                 goto err_out;
486         }
487
488         if (hw->mbx.ops.init_params)
489                 hw->mbx.ops.init_params(hw);
490
491         hw->allow_unsupported_sfp = allow_unsupported_sfp;
492
493         /* Pick up the 82599 settings */
494         if (hw->mac.type != ixgbe_mac_82598EB) {
495                 hw->phy.smart_speed = ixgbe_smart_speed;
496                 adapter->num_segs = IXGBE_82599_SCATTER;
497         } else
498                 adapter->num_segs = IXGBE_82598_SCATTER;
499
500         ixgbe_init_device_features(adapter);
501
502         if (ixgbe_configure_interrupts(adapter)) {
503                 error = ENXIO;
504                 goto err_out;
505         }
506
507         /* Allocate multicast array memory. */
508         adapter->mta = malloc(sizeof(*adapter->mta) *
509             MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
510         if (adapter->mta == NULL) {
511                 device_printf(dev, "Can not allocate multicast setup array\n");
512                 error = ENOMEM;
513                 goto err_out;
514         }
515
516         /* Enable WoL (if supported) */
517         ixgbe_check_wol_support(adapter);
518
519         /* Register for VLAN events */
520         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
521             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
522         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
523             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
524
525         /* Verify adapter fan is still functional (if applicable) */
526         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
527                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
528                 ixgbe_check_fan_failure(adapter, esdp, false);
529         }
530
531         /* Enable EEE power saving */
532         if (adapter->feat_en & IXGBE_FEATURE_EEE)
533                 hw->mac.ops.setup_eee(hw, true);
534
535         /* Set an initial default flow control value */
536         hw->fc.requested_mode = ixgbe_flow_control;
537
538         /* Put the semaphore in a known state (released) */
539         ixgbe_init_swfw_semaphore(hw);
540
541         /* Sysctls for limiting the amount of work done in the taskqueues */
542         ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
543             "max number of rx packets to process",
544             &adapter->rx_process_limit, ixgbe_rx_process_limit);
545
546         ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
547             "max number of tx packets to process",
548             &adapter->tx_process_limit, ixgbe_tx_process_limit);
549
550         /* Do descriptor calc and sanity checks */
551         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
552             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
553                 device_printf(dev, "TXD config issue, using default!\n");
554                 adapter->num_tx_desc = DEFAULT_TXD;
555         } else
556                 adapter->num_tx_desc = ixgbe_txd;
557
558         /*
559          * With many RX rings it is easy to exceed the
560          * system mbuf allocation. Tuning nmbclusters
561          * can alleviate this.
562          */
563         if (nmbclusters > 0) {
564                 int s;
565                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
566                 if (s > nmbclusters) {
567                         device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
568                         ixgbe_rxd = DEFAULT_RXD;
569                 }
570         }
571
572         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
573             ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
574                 device_printf(dev, "RXD config issue, using default!\n");
575                 adapter->num_rx_desc = DEFAULT_RXD;
576         } else
577                 adapter->num_rx_desc = ixgbe_rxd;
578
579         /* Allocate our TX/RX Queues */
580         if (ixgbe_allocate_queues(adapter)) {
581                 error = ENOMEM;
582                 goto err_out;
583         }
584
585         hw->phy.reset_if_overtemp = TRUE;
586         error = ixgbe_reset_hw(hw);
587         hw->phy.reset_if_overtemp = FALSE;
588         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
589                 /*
590                  * No optics in this port, set up
591                  * so the timer routine will probe
592                  * for later insertion.
593                  */
594                 adapter->sfp_probe = TRUE;
595                 error = IXGBE_SUCCESS;
596         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
597                 device_printf(dev, "Unsupported SFP+ module detected!\n");
598                 error = EIO;
599                 goto err_late;
600         } else if (error) {
601                 device_printf(dev, "Hardware initialization failed\n");
602                 error = EIO;
603                 goto err_late;
604         }
605
606         /* Make sure we have a good EEPROM before we read from it */
607         if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
608                 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
609                 error = EIO;
610                 goto err_late;
611         }
612
613         /* Setup OS specific network interface */
614         if (ixgbe_setup_interface(dev, adapter) != 0)
615                 goto err_late;
616
617         if (adapter->feat_en & IXGBE_FEATURE_MSIX)
618                 error = ixgbe_allocate_msix(adapter);
619         else
620                 error = ixgbe_allocate_legacy(adapter);
621         if (error)
622                 goto err_late;
623
624         error = ixgbe_start_hw(hw);
625         switch (error) {
626         case IXGBE_ERR_EEPROM_VERSION:
627                 device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
628                 break;
629         case IXGBE_ERR_SFP_NOT_SUPPORTED:
630                 device_printf(dev, "Unsupported SFP+ Module\n");
631                 error = EIO;
632                 goto err_late;
633         case IXGBE_ERR_SFP_NOT_PRESENT:
634                 device_printf(dev, "No SFP+ Module found\n");
635                 /* falls thru */
636         default:
637                 break;
638         }
639
640         /* Enable the optics for 82599 SFP+ fiber */
641         ixgbe_enable_tx_laser(hw);
642
643         /* Enable power to the phy. */
644         ixgbe_set_phy_power(hw, TRUE);
645
646         /* Initialize statistics */
647         ixgbe_update_stats_counters(adapter);
648
649         /* Check PCIE slot type/speed/width */
650         ixgbe_get_slot_info(adapter);
651
652         /*
653          * Do time init and sysctl init here, but
654          * only on the first port of a bypass adapter.
655          */
656         ixgbe_bypass_init(adapter);
657
658         /* Set an initial dmac value */
659         adapter->dmac = 0;
660         /* Set initial advertised speeds (if applicable) */
661         adapter->advertise = ixgbe_get_advertise(adapter);
662
663         if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
664                 ixgbe_define_iov_schemas(dev, &error);
665
666         /* Add sysctls */
667         ixgbe_add_device_sysctls(adapter);
668         ixgbe_add_hw_stats(adapter);
669
670         /* For Netmap */
671         adapter->init_locked = ixgbe_init_locked;
672         adapter->stop_locked = ixgbe_stop;
673
674         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
675                 ixgbe_netmap_attach(adapter);
676
677         INIT_DEBUGOUT("ixgbe_attach: end");
678
679         return (0);
680
681 err_late:
682         ixgbe_free_transmit_structures(adapter);
683         ixgbe_free_receive_structures(adapter);
684         free(adapter->queues, M_IXGBE);
685 err_out:
686         if (adapter->ifp != NULL)
687                 if_free(adapter->ifp);
688         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
689         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
690         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
691         ixgbe_free_pci_resources(adapter);
692         free(adapter->mta, M_IXGBE);
693         IXGBE_CORE_LOCK_DESTROY(adapter);
694
695         return (error);
696 } /* ixgbe_attach */
697
698 /************************************************************************
699  * ixgbe_detach - Device removal routine
700  *
701  *   Called when the driver is being removed.
702  *   Stops the adapter and deallocates all the resources
703  *   that were allocated for driver operation.
704  *
705  *   return 0 on success, positive on failure
706  ************************************************************************/
707 static int
708 ixgbe_detach(device_t dev)
709 {
710         struct adapter  *adapter = device_get_softc(dev);
711         struct ix_queue *que = adapter->queues;
712         struct tx_ring  *txr = adapter->tx_rings;
713         u32             ctrl_ext;
714
715         INIT_DEBUGOUT("ixgbe_detach: begin");
716
717         /* Make sure VLANS are not using driver */
718         if (adapter->ifp->if_vlantrunk != NULL) {
719                 device_printf(dev, "Vlan in use, detach first\n");
720                 return (EBUSY);
721         }
722
723         if (ixgbe_pci_iov_detach(dev) != 0) {
724                 device_printf(dev, "SR-IOV in use; detach first.\n");
725                 return (EBUSY);
726         }
727
728         ether_ifdetach(adapter->ifp);
729         /* Stop the adapter */
730         IXGBE_CORE_LOCK(adapter);
731         ixgbe_setup_low_power_mode(adapter);
732         IXGBE_CORE_UNLOCK(adapter);
733
734         for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
735                 if (que->tq) {
736                         if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
737                                 taskqueue_drain(que->tq, &txr->txq_task);
738                         taskqueue_drain(que->tq, &que->que_task);
739                         taskqueue_free(que->tq);
740                 }
741         }
742
743         /* Drain the Link queue */
744         if (adapter->tq) {
745                 taskqueue_drain(adapter->tq, &adapter->link_task);
746                 taskqueue_drain(adapter->tq, &adapter->mod_task);
747                 taskqueue_drain(adapter->tq, &adapter->msf_task);
748                 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
749                         taskqueue_drain(adapter->tq, &adapter->mbx_task);
750                 taskqueue_drain(adapter->tq, &adapter->phy_task);
751                 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
752                         taskqueue_drain(adapter->tq, &adapter->fdir_task);
753                 taskqueue_free(adapter->tq);
754         }
755
756         /* let hardware know driver is unloading */
757         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
758         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
759         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
760
761         /* Unregister VLAN events */
762         if (adapter->vlan_attach != NULL)
763                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
764         if (adapter->vlan_detach != NULL)
765                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
766
767         callout_drain(&adapter->timer);
768
769         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
770                 netmap_detach(adapter->ifp);
771
772         ixgbe_free_pci_resources(adapter);
773         bus_generic_detach(dev);
774         if_free(adapter->ifp);
775
776         ixgbe_free_transmit_structures(adapter);
777         ixgbe_free_receive_structures(adapter);
778         free(adapter->queues, M_IXGBE);
779         free(adapter->mta, M_IXGBE);
780
781         IXGBE_CORE_LOCK_DESTROY(adapter);
782
783         return (0);
784 } /* ixgbe_detach */
785
786 /************************************************************************
787  * ixgbe_shutdown - Shutdown entry point
788  ************************************************************************/
789 static int
790 ixgbe_shutdown(device_t dev)
791 {
792         struct adapter *adapter = device_get_softc(dev);
793         int            error = 0;
794
795         INIT_DEBUGOUT("ixgbe_shutdown: begin");
796
797         IXGBE_CORE_LOCK(adapter);
798         error = ixgbe_setup_low_power_mode(adapter);
799         IXGBE_CORE_UNLOCK(adapter);
800
801         return (error);
802 } /* ixgbe_shutdown */
803
804 /************************************************************************
805  * ixgbe_suspend
806  *
807  *   From D0 to D3
808  ************************************************************************/
809 static int
810 ixgbe_suspend(device_t dev)
811 {
812         struct adapter *adapter = device_get_softc(dev);
813         int            error = 0;
814
815         INIT_DEBUGOUT("ixgbe_suspend: begin");
816
817         IXGBE_CORE_LOCK(adapter);
818
819         error = ixgbe_setup_low_power_mode(adapter);
820
821         IXGBE_CORE_UNLOCK(adapter);
822
823         return (error);
824 } /* ixgbe_suspend */
825
826 /************************************************************************
827  * ixgbe_resume
828  *
829  *   From D3 to D0
830  ************************************************************************/
831 static int
832 ixgbe_resume(device_t dev)
833 {
834         struct adapter  *adapter = device_get_softc(dev);
835         struct ifnet    *ifp = adapter->ifp;
836         struct ixgbe_hw *hw = &adapter->hw;
837         u32             wus;
838
839         INIT_DEBUGOUT("ixgbe_resume: begin");
840
841         IXGBE_CORE_LOCK(adapter);
842
843         /* Read & clear WUS register */
844         wus = IXGBE_READ_REG(hw, IXGBE_WUS);
845         if (wus)
846                 device_printf(dev, "Woken up by (WUS): %#010x\n",
847                     IXGBE_READ_REG(hw, IXGBE_WUS));
848         IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
849         /* And clear WUFC until next low-power transition */
850         IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
851
852         /*
853          * Required after D3->D0 transition;
854          * will re-advertise all previous advertised speeds
855          */
856         if (ifp->if_flags & IFF_UP)
857                 ixgbe_init_locked(adapter);
858
859         IXGBE_CORE_UNLOCK(adapter);
860
861         return (0);
862 } /* ixgbe_resume */
863
864
865 /************************************************************************
866  * ixgbe_ioctl - Ioctl entry point
867  *
868  *   Called when the user wants to configure the interface.
869  *
870  *   return 0 on success, positive on failure
871  ************************************************************************/
872 static int
873 ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
874 {
875         struct adapter *adapter = ifp->if_softc;
876         struct ifreq   *ifr = (struct ifreq *) data;
877 #if defined(INET) || defined(INET6)
878         struct ifaddr  *ifa = (struct ifaddr *)data;
879 #endif
880         int            error = 0;
881         bool           avoid_reset = FALSE;
882
883         switch (command) {
884         case SIOCSIFADDR:
885 #ifdef INET
886                 if (ifa->ifa_addr->sa_family == AF_INET)
887                         avoid_reset = TRUE;
888 #endif
889 #ifdef INET6
890                 if (ifa->ifa_addr->sa_family == AF_INET6)
891                         avoid_reset = TRUE;
892 #endif
893                 /*
894                  * Calling init results in link renegotiation,
895                  * so we avoid doing it when possible.
896                  */
897                 if (avoid_reset) {
898                         ifp->if_flags |= IFF_UP;
899                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
900                                 ixgbe_init(adapter);
901 #ifdef INET
902                         if (!(ifp->if_flags & IFF_NOARP))
903                                 arp_ifinit(ifp, ifa);
904 #endif
905                 } else
906                         error = ether_ioctl(ifp, command, data);
907                 break;
908         case SIOCSIFMTU:
909                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
910                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
911                         error = EINVAL;
912                 } else {
913                         IXGBE_CORE_LOCK(adapter);
914                         ifp->if_mtu = ifr->ifr_mtu;
915                         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
916                         ixgbe_init_locked(adapter);
917                         ixgbe_recalculate_max_frame(adapter);
918                         IXGBE_CORE_UNLOCK(adapter);
919                 }
920                 break;
921         case SIOCSIFFLAGS:
922                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
923                 IXGBE_CORE_LOCK(adapter);
924                 if (ifp->if_flags & IFF_UP) {
925                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
926                                 if ((ifp->if_flags ^ adapter->if_flags) &
927                                     (IFF_PROMISC | IFF_ALLMULTI)) {
928                                         ixgbe_set_promisc(adapter);
929                                 }
930                         } else
931                                 ixgbe_init_locked(adapter);
932                 } else
933                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
934                                 ixgbe_stop(adapter);
935                 adapter->if_flags = ifp->if_flags;
936                 IXGBE_CORE_UNLOCK(adapter);
937                 break;
938         case SIOCADDMULTI:
939         case SIOCDELMULTI:
940                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
941                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
942                         IXGBE_CORE_LOCK(adapter);
943                         ixgbe_disable_intr(adapter);
944                         ixgbe_set_multi(adapter);
945                         ixgbe_enable_intr(adapter);
946                         IXGBE_CORE_UNLOCK(adapter);
947                 }
948                 break;
949         case SIOCSIFMEDIA:
950         case SIOCGIFMEDIA:
951                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
952                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
953                 break;
954         case SIOCSIFCAP:
955         {
956                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
957
958                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
959
960                 if (!mask)
961                         break;
962
963                 /* HW cannot turn these on/off separately */
964                 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
965                         ifp->if_capenable ^= IFCAP_RXCSUM;
966                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
967                 }
968                 if (mask & IFCAP_TXCSUM)
969                         ifp->if_capenable ^= IFCAP_TXCSUM;
970                 if (mask & IFCAP_TXCSUM_IPV6)
971                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
972                 if (mask & IFCAP_TSO4)
973                         ifp->if_capenable ^= IFCAP_TSO4;
974                 if (mask & IFCAP_TSO6)
975                         ifp->if_capenable ^= IFCAP_TSO6;
976                 if (mask & IFCAP_LRO)
977                         ifp->if_capenable ^= IFCAP_LRO;
978                 if (mask & IFCAP_VLAN_HWTAGGING)
979                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
980                 if (mask & IFCAP_VLAN_HWFILTER)
981                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
982                 if (mask & IFCAP_VLAN_HWTSO)
983                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
984
985                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
986                         IXGBE_CORE_LOCK(adapter);
987                         ixgbe_init_locked(adapter);
988                         IXGBE_CORE_UNLOCK(adapter);
989                 }
990                 VLAN_CAPABILITIES(ifp);
991                 break;
992         }
993 #if __FreeBSD_version >= 1002500
994         case SIOCGI2C:
995         {
996                 struct ixgbe_hw *hw = &adapter->hw;
997                 struct ifi2creq i2c;
998                 int i;
999
1000                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
1001                 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1002                 if (error != 0)
1003                         break;
1004                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1005                         error = EINVAL;
1006                         break;
1007                 }
1008                 if (i2c.len > sizeof(i2c.data)) {
1009                         error = EINVAL;
1010                         break;
1011                 }
1012
1013                 for (i = 0; i < i2c.len; i++)
1014                         hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
1015                             i2c.dev_addr, &i2c.data[i]);
1016                 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1017                 break;
1018         }
1019 #endif
1020         default:
1021                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
1022                 error = ether_ioctl(ifp, command, data);
1023                 break;
1024         }
1025
1026         return (error);
1027 } /* ixgbe_ioctl */
1028
1029 /************************************************************************
1030  * ixgbe_init_device_features
1031  ************************************************************************/
1032 static void
1033 ixgbe_init_device_features(struct adapter *adapter)
1034 {
1035         adapter->feat_cap = IXGBE_FEATURE_NETMAP
1036                           | IXGBE_FEATURE_RSS
1037                           | IXGBE_FEATURE_MSI
1038                           | IXGBE_FEATURE_MSIX
1039                           | IXGBE_FEATURE_LEGACY_IRQ
1040                           | IXGBE_FEATURE_LEGACY_TX;
1041
1042         /* Set capabilities first... */
1043         switch (adapter->hw.mac.type) {
1044         case ixgbe_mac_82598EB:
1045                 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
1046                         adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
1047                 break;
1048         case ixgbe_mac_X540:
1049                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1050                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1051                 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
1052                     (adapter->hw.bus.func == 0))
1053                         adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
1054                 break;
1055         case ixgbe_mac_X550:
1056                 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
1057                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1058                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1059                 break;
1060         case ixgbe_mac_X550EM_x:
1061                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1062                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1063                 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
1064                         adapter->feat_cap |= IXGBE_FEATURE_EEE;
1065                 break;
1066         case ixgbe_mac_X550EM_a:
1067                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1068                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1069                 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
1070                 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
1071                     (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
1072                         adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
1073                         adapter->feat_cap |= IXGBE_FEATURE_EEE;
1074                 }
1075                 break;
1076         case ixgbe_mac_82599EB:
1077                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1078                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1079                 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
1080                     (adapter->hw.bus.func == 0))
1081                         adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
1082                 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
1083                         adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
1084                 break;
1085         default:
1086                 break;
1087         }
1088
1089         /* Enabled by default... */
1090         /* Fan failure detection */
1091         if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
1092                 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
1093         /* Netmap */
1094         if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1095                 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1096         /* EEE */
1097         if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1098                 adapter->feat_en |= IXGBE_FEATURE_EEE;
1099         /* Thermal Sensor */
1100         if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
1101                 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
1102
1103         /* Enabled via global sysctl... */
1104         /* Flow Director */
1105         if (ixgbe_enable_fdir) {
1106                 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
1107                         adapter->feat_en |= IXGBE_FEATURE_FDIR;
1108                 else
1109                         device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
1110         }
1111         /* Legacy (single queue) transmit */
1112         if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
1113             ixgbe_enable_legacy_tx)
1114                 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
1115         /*
1116          * Message Signal Interrupts - Extended (MSI-X)
1117          * Normal MSI is only enabled if MSI-X calls fail.
1118          */
1119         if (!ixgbe_enable_msix)
1120                 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
1121         /* Receive-Side Scaling (RSS) */
1122         if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
1123                 adapter->feat_en |= IXGBE_FEATURE_RSS;
1124
1125         /* Disable features with unmet dependencies... */
1126         /* No MSI-X */
1127         if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
1128                 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
1129                 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
1130                 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
1131                 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
1132         }
1133 } /* ixgbe_init_device_features */
1134
1135 /************************************************************************
1136  * ixgbe_check_fan_failure
1137  ************************************************************************/
1138 static void
1139 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
1140 {
1141         u32 mask;
1142
1143         mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
1144             IXGBE_ESDP_SDP1;
1145
1146         if (reg & mask)
1147                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
1148 } /* ixgbe_check_fan_failure */
1149
1150 /************************************************************************
1151  * ixgbe_is_sfp
1152  ************************************************************************/
1153 static inline bool
1154 ixgbe_is_sfp(struct ixgbe_hw *hw)
1155 {
1156         switch (hw->mac.type) {
1157         case ixgbe_mac_82598EB:
1158                 if (hw->phy.type == ixgbe_phy_nl)
1159                         return TRUE;
1160                 return FALSE;
1161         case ixgbe_mac_82599EB:
1162                 switch (hw->mac.ops.get_media_type(hw)) {
1163                 case ixgbe_media_type_fiber:
1164                 case ixgbe_media_type_fiber_qsfp:
1165                         return TRUE;
1166                 default:
1167                         return FALSE;
1168                 }
1169         case ixgbe_mac_X550EM_x:
1170         case ixgbe_mac_X550EM_a:
1171                 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1172                         return TRUE;
1173                 return FALSE;
1174         default:
1175                 return FALSE;
1176         }
1177 } /* ixgbe_is_sfp */
1178
1179 /************************************************************************
1180  * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
1181  *
1182  *   Takes the ifnet's if_capenable flags (e.g. set by the user using
1183  *   ifconfig) and indicates to the OS via the ifnet's if_hwassist
1184  *   field what mbuf offload flags the driver will understand.
1185  ************************************************************************/
1186 static void
1187 ixgbe_set_if_hwassist(struct adapter *adapter)
1188 {
1189         struct ifnet *ifp = adapter->ifp;
1190
1191         ifp->if_hwassist = 0;
1192 #if __FreeBSD_version >= 1000000
1193         if (ifp->if_capenable & IFCAP_TSO4)
1194                 ifp->if_hwassist |= CSUM_IP_TSO;
1195         if (ifp->if_capenable & IFCAP_TSO6)
1196                 ifp->if_hwassist |= CSUM_IP6_TSO;
1197         if (ifp->if_capenable & IFCAP_TXCSUM) {
1198                 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
1199                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
1200                         ifp->if_hwassist |= CSUM_IP_SCTP;
1201         }
1202         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
1203                 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
1204                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
1205                         ifp->if_hwassist |= CSUM_IP6_SCTP;
1206         }
1207 #else
1208         if (ifp->if_capenable & IFCAP_TSO)
1209                 ifp->if_hwassist |= CSUM_TSO;
1210         if (ifp->if_capenable & IFCAP_TXCSUM) {
1211                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1212                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
1213                         ifp->if_hwassist |= CSUM_SCTP;
1214         }
1215 #endif
1216 } /* ixgbe_set_if_hwassist */
1217
1218 /************************************************************************
1219  * ixgbe_init_locked - Init entry point
1220  *
1221  *   Used in two ways: It is used by the stack as an init
1222  *   entry point in network interface structure. It is also
1223  *   used by the driver as a hw/sw initialization routine to
1224  *   get to a consistent state.
1225  *
1226  *   return 0 on success, positive on failure
1227  ************************************************************************/
1228 void
1229 ixgbe_init_locked(struct adapter *adapter)
1230 {
1231         struct ifnet    *ifp = adapter->ifp;
1232         device_t        dev = adapter->dev;
1233         struct ixgbe_hw *hw = &adapter->hw;
1234         struct tx_ring  *txr;
1235         struct rx_ring  *rxr;
1236         u32             txdctl, mhadd;
1237         u32             rxdctl, rxctrl;
1238         u32             ctrl_ext;
1239         int             err = 0;
1240
1241         mtx_assert(&adapter->core_mtx, MA_OWNED);
1242         INIT_DEBUGOUT("ixgbe_init_locked: begin");
1243
1244         hw->adapter_stopped = FALSE;
1245         ixgbe_stop_adapter(hw);
1246         callout_stop(&adapter->timer);
1247
1248         /* Queue indices may change with IOV mode */
1249         ixgbe_align_all_queue_indices(adapter);
1250
1251         /* reprogram the RAR[0] in case user changed it. */
1252         ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1253
1254         /* Get the latest mac address, User can use a LAA */
1255         bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1256         ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1257         hw->addr_ctrl.rar_used_count = 1;
1258
1259         /* Set hardware offload abilities from ifnet flags */
1260         ixgbe_set_if_hwassist(adapter);
1261
1262         /* Prepare transmit descriptors and buffers */
1263         if (ixgbe_setup_transmit_structures(adapter)) {
1264                 device_printf(dev, "Could not setup transmit structures\n");
1265                 ixgbe_stop(adapter);
1266                 return;
1267         }
1268
1269         ixgbe_init_hw(hw);
1270         ixgbe_initialize_iov(adapter);
1271         ixgbe_initialize_transmit_units(adapter);
1272
1273         /* Setup Multicast table */
1274         ixgbe_set_multi(adapter);
1275
1276         /* Determine the correct mbuf pool, based on frame size */
1277         if (adapter->max_frame_size <= MCLBYTES)
1278                 adapter->rx_mbuf_sz = MCLBYTES;
1279         else
1280                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1281
1282         /* Prepare receive descriptors and buffers */
1283         if (ixgbe_setup_receive_structures(adapter)) {
1284                 device_printf(dev, "Could not setup receive structures\n");
1285                 ixgbe_stop(adapter);
1286                 return;
1287         }
1288
1289         /* Configure RX settings */
1290         ixgbe_initialize_receive_units(adapter);
1291
1292         /* Enable SDP & MSI-X interrupts based on adapter */
1293         ixgbe_config_gpie(adapter);
1294
1295         /* Set MTU size */
1296         if (ifp->if_mtu > ETHERMTU) {
1297                 /* aka IXGBE_MAXFRS on 82599 and newer */
1298                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1299                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1300                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1301                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1302         }
1303
1304         /* Now enable all the queues */
1305         for (int i = 0; i < adapter->num_queues; i++) {
1306                 txr = &adapter->tx_rings[i];
1307                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1308                 txdctl |= IXGBE_TXDCTL_ENABLE;
1309                 /* Set WTHRESH to 8, burst writeback */
1310                 txdctl |= (8 << 16);
1311                 /*
1312                  * When the internal queue falls below PTHRESH (32),
1313                  * start prefetching as long as there are at least
1314                  * HTHRESH (1) buffers ready. The values are taken
1315                  * from the Intel linux driver 3.8.21.
1316                  * Prefetching enables tx line rate even with 1 queue.
1317                  */
1318                 txdctl |= (32 << 0) | (1 << 8);
1319                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1320         }
1321
1322         for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1323                 rxr = &adapter->rx_rings[i];
1324                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1325                 if (hw->mac.type == ixgbe_mac_82598EB) {
1326                         /*
1327                          * PTHRESH = 21
1328                          * HTHRESH = 4
1329                          * WTHRESH = 8
1330                          */
1331                         rxdctl &= ~0x3FFFFF;
1332                         rxdctl |= 0x080420;
1333                 }
1334                 rxdctl |= IXGBE_RXDCTL_ENABLE;
1335                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1336                 for (; j < 10; j++) {
1337                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1338                             IXGBE_RXDCTL_ENABLE)
1339                                 break;
1340                         else
1341                                 msec_delay(1);
1342                 }
1343                 wmb();
1344
1345                 /*
1346                  * In netmap mode, we must preserve the buffers made
1347                  * available to userspace before the if_init()
1348                  * (this is true by default on the TX side, because
1349                  * init makes all buffers available to userspace).
1350                  *
1351                  * netmap_reset() and the device specific routines
1352                  * (e.g. ixgbe_setup_receive_rings()) map these
1353                  * buffers at the end of the NIC ring, so here we
1354                  * must set the RDT (tail) register to make sure
1355                  * they are not overwritten.
1356                  *
1357                  * In this driver the NIC ring starts at RDH = 0,
1358                  * RDT points to the last slot available for reception (?),
1359                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1360                  */
1361 #ifdef DEV_NETMAP
1362                 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1363                     (ifp->if_capenable & IFCAP_NETMAP)) {
1364                         struct netmap_adapter *na = NA(adapter->ifp);
1365                         struct netmap_kring *kring = &na->rx_rings[i];
1366                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1367
1368                         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1369                 } else
1370 #endif /* DEV_NETMAP */
1371                         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
1372                             adapter->num_rx_desc - 1);
1373         }
1374
1375         /* Enable Receive engine */
1376         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1377         if (hw->mac.type == ixgbe_mac_82598EB)
1378                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1379         rxctrl |= IXGBE_RXCTRL_RXEN;
1380         ixgbe_enable_rx_dma(hw, rxctrl);
1381
1382         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1383
1384         /* Set up MSI-X routing */
1385         if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1386                 ixgbe_configure_ivars(adapter);
1387                 /* Set up auto-mask */
1388                 if (hw->mac.type == ixgbe_mac_82598EB)
1389                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1390                 else {
1391                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1392                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1393                 }
1394         } else {  /* Simple settings for Legacy/MSI */
1395                 ixgbe_set_ivar(adapter, 0, 0, 0);
1396                 ixgbe_set_ivar(adapter, 0, 0, 1);
1397                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1398         }
1399
1400         ixgbe_init_fdir(adapter);
1401
1402         /*
1403          * Check on any SFP devices that
1404          * need to be kick-started
1405          */
1406         if (hw->phy.type == ixgbe_phy_none) {
1407                 err = hw->phy.ops.identify(hw);
1408                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1409                         device_printf(dev,
1410                             "Unsupported SFP+ module type was detected.\n");
1411                         return;
1412                 }
1413         }
1414
1415         /* Set moderation on the Link interrupt */
1416         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1417
1418         /* Config/Enable Link */
1419         ixgbe_config_link(adapter);
1420
1421         /* Hardware Packet Buffer & Flow Control setup */
1422         ixgbe_config_delay_values(adapter);
1423
1424         /* Initialize the FC settings */
1425         ixgbe_start_hw(hw);
1426
1427         /* Set up VLAN support and filter */
1428         ixgbe_setup_vlan_hw_support(adapter);
1429
1430         /* Setup DMA Coalescing */
1431         ixgbe_config_dmac(adapter);
1432
1433         /* And now turn on interrupts */
1434         ixgbe_enable_intr(adapter);
1435
1436         /* Enable the use of the MBX by the VF's */
1437         if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
1438                 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1439                 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1440                 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1441         }
1442
1443         /* Now inform the stack we're ready */
1444         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1445
1446         return;
1447 } /* ixgbe_init_locked */
1448
1449 /************************************************************************
1450  * ixgbe_init
1451  ************************************************************************/
1452 static void
1453 ixgbe_init(void *arg)
1454 {
1455         struct adapter *adapter = arg;
1456
1457         IXGBE_CORE_LOCK(adapter);
1458         ixgbe_init_locked(adapter);
1459         IXGBE_CORE_UNLOCK(adapter);
1460
1461         return;
1462 } /* ixgbe_init */
1463
1464 /************************************************************************
1465  * ixgbe_config_gpie
1466  ************************************************************************/
1467 static void
1468 ixgbe_config_gpie(struct adapter *adapter)
1469 {
1470         struct ixgbe_hw *hw = &adapter->hw;
1471         u32             gpie;
1472
1473         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1474
1475         if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1476                 /* Enable Enhanced MSI-X mode */
1477                 gpie |= IXGBE_GPIE_MSIX_MODE
1478                      |  IXGBE_GPIE_EIAME
1479                      |  IXGBE_GPIE_PBA_SUPPORT
1480                      |  IXGBE_GPIE_OCD;
1481         }
1482
1483         /* Fan Failure Interrupt */
1484         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
1485                 gpie |= IXGBE_SDP1_GPIEN;
1486
1487         /* Thermal Sensor Interrupt */
1488         if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
1489                 gpie |= IXGBE_SDP0_GPIEN_X540;
1490
1491         /* Link detection */
1492         switch (hw->mac.type) {
1493         case ixgbe_mac_82599EB:
1494                 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
1495                 break;
1496         case ixgbe_mac_X550EM_x:
1497         case ixgbe_mac_X550EM_a:
1498                 gpie |= IXGBE_SDP0_GPIEN_X540;
1499                 break;
1500         default:
1501                 break;
1502         }
1503
1504         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1505
1506         return;
1507 } /* ixgbe_config_gpie */
1508
1509 /************************************************************************
1510  * ixgbe_config_delay_values
1511  *
1512  *   Requires adapter->max_frame_size to be set.
1513  ************************************************************************/
1514 static void
1515 ixgbe_config_delay_values(struct adapter *adapter)
1516 {
1517         struct ixgbe_hw *hw = &adapter->hw;
1518         u32             rxpb, frame, size, tmp;
1519
1520         frame = adapter->max_frame_size;
1521
1522         /* Calculate High Water */
1523         switch (hw->mac.type) {
1524         case ixgbe_mac_X540:
1525         case ixgbe_mac_X550:
1526         case ixgbe_mac_X550EM_x:
1527         case ixgbe_mac_X550EM_a:
1528                 tmp = IXGBE_DV_X540(frame, frame);
1529                 break;
1530         default:
1531                 tmp = IXGBE_DV(frame, frame);
1532                 break;
1533         }
1534         size = IXGBE_BT2KB(tmp);
1535         rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1536         hw->fc.high_water[0] = rxpb - size;
1537
1538         /* Now calculate Low Water */
1539         switch (hw->mac.type) {
1540         case ixgbe_mac_X540:
1541         case ixgbe_mac_X550:
1542         case ixgbe_mac_X550EM_x:
1543         case ixgbe_mac_X550EM_a:
1544                 tmp = IXGBE_LOW_DV_X540(frame);
1545                 break;
1546         default:
1547                 tmp = IXGBE_LOW_DV(frame);
1548                 break;
1549         }
1550         hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1551
1552         hw->fc.pause_time = IXGBE_FC_PAUSE;
1553         hw->fc.send_xon = TRUE;
1554 } /* ixgbe_config_delay_values */
1555
1556 /************************************************************************
1557  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1558  ************************************************************************/
1559 static inline void
1560 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1561 {
1562         struct ixgbe_hw *hw = &adapter->hw;
1563         u64             queue = (u64)(1 << vector);
1564         u32             mask;
1565
1566         if (hw->mac.type == ixgbe_mac_82598EB) {
1567                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1568                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1569         } else {
1570                 mask = (queue & 0xFFFFFFFF);
1571                 if (mask)
1572                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1573                 mask = (queue >> 32);
1574                 if (mask)
1575                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1576         }
1577 } /* ixgbe_enable_queue */
1578
1579 /************************************************************************
1580  * ixgbe_disable_queue
1581  ************************************************************************/
1582 static inline void
1583 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1584 {
1585         struct ixgbe_hw *hw = &adapter->hw;
1586         u64             queue = (u64)(1 << vector);
1587         u32             mask;
1588
1589         if (hw->mac.type == ixgbe_mac_82598EB) {
1590                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1591                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1592         } else {
1593                 mask = (queue & 0xFFFFFFFF);
1594                 if (mask)
1595                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1596                 mask = (queue >> 32);
1597                 if (mask)
1598                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1599         }
1600 } /* ixgbe_disable_queue */
1601
1602 /************************************************************************
1603  * ixgbe_handle_que
1604  ************************************************************************/
1605 static void
1606 ixgbe_handle_que(void *context, int pending)
1607 {
1608         struct ix_queue *que = context;
1609         struct adapter  *adapter = que->adapter;
1610         struct tx_ring  *txr = que->txr;
1611         struct ifnet    *ifp = adapter->ifp;
1612
1613         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1614                 ixgbe_rxeof(que);
1615                 IXGBE_TX_LOCK(txr);
1616                 ixgbe_txeof(txr);
1617                 if (!ixgbe_ring_empty(ifp, txr->br))
1618                         ixgbe_start_locked(ifp, txr);
1619                 IXGBE_TX_UNLOCK(txr);
1620         }
1621
1622         /* Reenable this interrupt */
1623         if (que->res != NULL)
1624                 ixgbe_enable_queue(adapter, que->msix);
1625         else
1626                 ixgbe_enable_intr(adapter);
1627
1628         return;
1629 } /* ixgbe_handle_que */
1630
1631
1632 /************************************************************************
1633  * ixgbe_legacy_irq - Legacy Interrupt Service routine
1634  ************************************************************************/
1635 static void
1636 ixgbe_legacy_irq(void *arg)
1637 {
1638         struct ix_queue *que = arg;
1639         struct adapter  *adapter = que->adapter;
1640         struct ixgbe_hw *hw = &adapter->hw;
1641         struct ifnet    *ifp = adapter->ifp;
1642         struct tx_ring  *txr = adapter->tx_rings;
1643         bool            more;
1644         u32             eicr, eicr_mask;
1645
1646         /* Silicon errata #26 on 82598 */
1647         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1648
1649         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1650
1651         ++que->irqs;
1652         if (!eicr) {
1653                 ixgbe_enable_intr(adapter);
1654                 return;
1655         }
1656
1657         more = ixgbe_rxeof(que);
1658
1659         IXGBE_TX_LOCK(txr);
1660         ixgbe_txeof(txr);
1661         if (!ixgbe_ring_empty(ifp, txr->br))
1662                 ixgbe_start_locked(ifp, txr);
1663         IXGBE_TX_UNLOCK(txr);
1664
1665         /* Check for fan failure */
1666         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1667                 ixgbe_check_fan_failure(adapter, eicr, true);
1668                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1669         }
1670
1671         /* Link status change */
1672         if (eicr & IXGBE_EICR_LSC)
1673                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1674
1675         if (ixgbe_is_sfp(hw)) {
1676                 /* Pluggable optics-related interrupt */
1677                 if (hw->mac.type >= ixgbe_mac_X540)
1678                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
1679                 else
1680                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1681
1682                 if (eicr & eicr_mask) {
1683                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
1684                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1685                 }
1686
1687                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
1688                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1689                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
1690                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1691                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1692                 }
1693         }
1694
1695         /* External PHY interrupt */
1696         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
1697             (eicr & IXGBE_EICR_GPI_SDP0_X540))
1698                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1699
1700         if (more)
1701                 taskqueue_enqueue(que->tq, &que->que_task);
1702         else
1703                 ixgbe_enable_intr(adapter);
1704
1705         return;
1706 } /* ixgbe_legacy_irq */
1707
1708
1709 /************************************************************************
1710  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1711  ************************************************************************/
1712 void
1713 ixgbe_msix_que(void *arg)
1714 {
1715         struct ix_queue *que = arg;
1716         struct adapter  *adapter = que->adapter;
1717         struct ifnet    *ifp = adapter->ifp;
1718         struct tx_ring  *txr = que->txr;
1719         struct rx_ring  *rxr = que->rxr;
1720         bool            more;
1721         u32             newitr = 0;
1722
1723
1724         /* Protect against spurious interrupts */
1725         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1726                 return;
1727
1728         ixgbe_disable_queue(adapter, que->msix);
1729         ++que->irqs;
1730
1731         more = ixgbe_rxeof(que);
1732
1733         IXGBE_TX_LOCK(txr);
1734         ixgbe_txeof(txr);
1735         if (!ixgbe_ring_empty(ifp, txr->br))
1736                 ixgbe_start_locked(ifp, txr);
1737         IXGBE_TX_UNLOCK(txr);
1738
1739         /* Do AIM now? */
1740
1741         if (adapter->enable_aim == FALSE)
1742                 goto no_calc;
1743         /*
1744          * Do Adaptive Interrupt Moderation:
1745          *  - Write out last calculated setting
1746          *  - Calculate based on average size over
1747          *    the last interval.
1748          */
1749         if (que->eitr_setting)
1750                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1751                     que->eitr_setting);
1752
1753         que->eitr_setting = 0;
1754
1755         /* Idle, do nothing */
1756         if ((txr->bytes == 0) && (rxr->bytes == 0))
1757                 goto no_calc;
1758
1759         if ((txr->bytes) && (txr->packets))
1760                 newitr = txr->bytes/txr->packets;
1761         if ((rxr->bytes) && (rxr->packets))
1762                 newitr = max(newitr, (rxr->bytes / rxr->packets));
1763         newitr += 24; /* account for hardware frame, crc */
1764
1765         /* set an upper boundary */
1766         newitr = min(newitr, 3000);
1767
1768         /* Be nice to the mid range */
1769         if ((newitr > 300) && (newitr < 1200))
1770                 newitr = (newitr / 3);
1771         else
1772                 newitr = (newitr / 2);
1773
1774         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1775                 newitr |= newitr << 16;
1776         else
1777                 newitr |= IXGBE_EITR_CNT_WDIS;
1778
1779         /* save for next interrupt */
1780         que->eitr_setting = newitr;
1781
1782         /* Reset state */
1783         txr->bytes = 0;
1784         txr->packets = 0;
1785         rxr->bytes = 0;
1786         rxr->packets = 0;
1787
1788 no_calc:
1789         if (more)
1790                 taskqueue_enqueue(que->tq, &que->que_task);
1791         else
1792                 ixgbe_enable_queue(adapter, que->msix);
1793
1794         return;
1795 } /* ixgbe_msix_que */
1796
1797
1798 /************************************************************************
1799  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
1800  ************************************************************************/
1801 static void
1802 ixgbe_msix_link(void *arg)
1803 {
1804         struct adapter  *adapter = arg;
1805         struct ixgbe_hw *hw = &adapter->hw;
1806         u32             eicr, eicr_mask;
1807         s32             retval;
1808
1809         ++adapter->link_irq;
1810
1811         /* Pause other interrupts */
1812         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1813
1814         /* First get the cause */
1815         eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1816         /* Be sure the queue bits are not cleared */
1817         eicr &= ~IXGBE_EICR_RTX_QUEUE;
1818         /* Clear interrupt with write */
1819         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
1820
1821         /* Link status change */
1822         if (eicr & IXGBE_EICR_LSC) {
1823                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1824                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1825         }
1826
1827         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1828                 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
1829                     (eicr & IXGBE_EICR_FLOW_DIR)) {
1830                         /* This is probably overkill :) */
1831                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1832                                 return;
1833                         /* Disable the interrupt */
1834                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
1835                         taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1836                 }
1837
1838                 if (eicr & IXGBE_EICR_ECC) {
1839                         device_printf(adapter->dev,
1840                             "CRITICAL: ECC ERROR!!  Please Reboot!!\n");
1841                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1842                 }
1843
1844                 /* Check for over temp condition */
1845                 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
1846                         switch (adapter->hw.mac.type) {
1847                         case ixgbe_mac_X550EM_a:
1848                                 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
1849                                         break;
1850                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
1851                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
1852                                 IXGBE_WRITE_REG(hw, IXGBE_EICR,
1853                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
1854                                 retval = hw->phy.ops.check_overtemp(hw);
1855                                 if (retval != IXGBE_ERR_OVERTEMP)
1856                                         break;
1857                                 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
1858                                 device_printf(adapter->dev, "System shutdown required!\n");
1859                                 break;
1860                         default:
1861                                 if (!(eicr & IXGBE_EICR_TS))
1862                                         break;
1863                                 retval = hw->phy.ops.check_overtemp(hw);
1864                                 if (retval != IXGBE_ERR_OVERTEMP)
1865                                         break;
1866                                 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
1867                                 device_printf(adapter->dev, "System shutdown required!\n");
1868                                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1869                                 break;
1870                         }
1871                 }
1872
1873                 /* Check for VF message */
1874                 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
1875                     (eicr & IXGBE_EICR_MAILBOX))
1876                         taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1877         }
1878
1879         if (ixgbe_is_sfp(hw)) {
1880                 /* Pluggable optics-related interrupt */
1881                 if (hw->mac.type >= ixgbe_mac_X540)
1882                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
1883                 else
1884                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1885
1886                 if (eicr & eicr_mask) {
1887                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
1888                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1889                 }
1890
1891                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
1892                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1893                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
1894                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1895                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1896                 }
1897         }
1898
1899         /* Check for fan failure */
1900         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1901                 ixgbe_check_fan_failure(adapter, eicr, true);
1902                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1903         }
1904
1905         /* External PHY interrupt */
1906         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
1907             (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1908                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1909                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1910         }
1911
1912         /* Re-enable other interrupts */
1913         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1914
1915         return;
1916 } /* ixgbe_msix_link */
1917
1918 /************************************************************************
1919  * ixgbe_media_status - Media Ioctl callback
1920  *
1921  *   Called whenever the user queries the status of
1922  *   the interface using ifconfig.
1923  ************************************************************************/
1924 static void
1925 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1926 {
1927         struct adapter  *adapter = ifp->if_softc;
1928         struct ixgbe_hw *hw = &adapter->hw;
1929         int             layer;
1930
1931         INIT_DEBUGOUT("ixgbe_media_status: begin");
1932         IXGBE_CORE_LOCK(adapter);
1933         ixgbe_update_link_status(adapter);
1934
1935         ifmr->ifm_status = IFM_AVALID;
1936         ifmr->ifm_active = IFM_ETHER;
1937
1938         if (!adapter->link_active) {
1939                 IXGBE_CORE_UNLOCK(adapter);
1940                 return;
1941         }
1942
1943         ifmr->ifm_status |= IFM_ACTIVE;
1944         layer = adapter->phy_layer;
1945
1946         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1947             layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1948             layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
1949             layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1950                 switch (adapter->link_speed) {
1951                 case IXGBE_LINK_SPEED_10GB_FULL:
1952                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1953                         break;
1954                 case IXGBE_LINK_SPEED_1GB_FULL:
1955                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1956                         break;
1957                 case IXGBE_LINK_SPEED_100_FULL:
1958                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1959                         break;
1960                 case IXGBE_LINK_SPEED_10_FULL:
1961                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1962                         break;
1963                 }
1964         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1965             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1966                 switch (adapter->link_speed) {
1967                 case IXGBE_LINK_SPEED_10GB_FULL:
1968                         ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1969                         break;
1970                 }
1971         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1972                 switch (adapter->link_speed) {
1973                 case IXGBE_LINK_SPEED_10GB_FULL:
1974                         ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1975                         break;
1976                 case IXGBE_LINK_SPEED_1GB_FULL:
1977                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1978                         break;
1979                 }
1980         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1981                 switch (adapter->link_speed) {
1982                 case IXGBE_LINK_SPEED_10GB_FULL:
1983                         ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1984                         break;
1985                 case IXGBE_LINK_SPEED_1GB_FULL:
1986                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1987                         break;
1988                 }
1989         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1990             layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1991                 switch (adapter->link_speed) {
1992                 case IXGBE_LINK_SPEED_10GB_FULL:
1993                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1994                         break;
1995                 case IXGBE_LINK_SPEED_1GB_FULL:
1996                         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1997                         break;
1998                 }
1999         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2000                 switch (adapter->link_speed) {
2001                 case IXGBE_LINK_SPEED_10GB_FULL:
2002                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2003                         break;
2004                 }
2005         /*
2006          * XXX: These need to use the proper media types once
2007          * they're added.
2008          */
2009 #ifndef IFM_ETH_XTYPE
2010         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2011                 switch (adapter->link_speed) {
2012                 case IXGBE_LINK_SPEED_10GB_FULL:
2013                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2014                         break;
2015                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2016                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2017                         break;
2018                 case IXGBE_LINK_SPEED_1GB_FULL:
2019                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2020                         break;
2021                 }
2022         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2023             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2024                 switch (adapter->link_speed) {
2025                 case IXGBE_LINK_SPEED_10GB_FULL:
2026                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2027                         break;
2028                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2029                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2030                         break;
2031                 case IXGBE_LINK_SPEED_1GB_FULL:
2032                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2033                         break;
2034                 }
2035 #else
2036         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2037                 switch (adapter->link_speed) {
2038                 case IXGBE_LINK_SPEED_10GB_FULL:
2039                         ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2040                         break;
2041                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2042                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2043                         break;
2044                 case IXGBE_LINK_SPEED_1GB_FULL:
2045                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2046                         break;
2047                 }
2048         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2049             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2050                 switch (adapter->link_speed) {
2051                 case IXGBE_LINK_SPEED_10GB_FULL:
2052                         ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2053                         break;
2054                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2055                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2056                         break;
2057                 case IXGBE_LINK_SPEED_1GB_FULL:
2058                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2059                         break;
2060                 }
2061 #endif
2062
2063         /* If nothing is recognized... */
2064         if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2065                 ifmr->ifm_active |= IFM_UNKNOWN;
2066
2067 #if __FreeBSD_version >= 900025
2068         /* Display current flow control setting used on link */
2069         if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2070             hw->fc.current_mode == ixgbe_fc_full)
2071                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2072         if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2073             hw->fc.current_mode == ixgbe_fc_full)
2074                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2075 #endif
2076
2077         IXGBE_CORE_UNLOCK(adapter);
2078
2079         return;
2080 } /* ixgbe_media_status */
2081
2082 /************************************************************************
2083  * ixgbe_media_change - Media Ioctl callback
2084  *
2085  *   Called when the user changes speed/duplex using
2086  *   media/mediopt option with ifconfig.
2087  ************************************************************************/
2088 static int
2089 ixgbe_media_change(struct ifnet *ifp)
2090 {
2091         struct adapter   *adapter = ifp->if_softc;
2092         struct ifmedia   *ifm = &adapter->media;
2093         struct ixgbe_hw  *hw = &adapter->hw;
2094         ixgbe_link_speed speed = 0;
2095
2096         INIT_DEBUGOUT("ixgbe_media_change: begin");
2097
2098         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2099                 return (EINVAL);
2100
2101         if (hw->phy.media_type == ixgbe_media_type_backplane)
2102                 return (ENODEV);
2103
2104         /*
2105          * We don't actually need to check against the supported
2106          * media types of the adapter; ifmedia will take care of
2107          * that for us.
2108          */
2109         switch (IFM_SUBTYPE(ifm->ifm_media)) {
2110                 case IFM_AUTO:
2111                 case IFM_10G_T:
2112                         speed |= IXGBE_LINK_SPEED_100_FULL;
2113                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2114                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2115                         break;
2116                 case IFM_10G_LRM:
2117                 case IFM_10G_LR:
2118 #ifndef IFM_ETH_XTYPE
2119                 case IFM_10G_SR: /* KR, too */
2120                 case IFM_10G_CX4: /* KX4 */
2121 #else
2122                 case IFM_10G_KR:
2123                 case IFM_10G_KX4:
2124 #endif
2125                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2126                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2127                         break;
2128 #ifndef IFM_ETH_XTYPE
2129                 case IFM_1000_CX: /* KX */
2130 #else
2131                 case IFM_1000_KX:
2132 #endif
2133                 case IFM_1000_LX:
2134                 case IFM_1000_SX:
2135                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2136                         break;
2137                 case IFM_1000_T:
2138                         speed |= IXGBE_LINK_SPEED_100_FULL;
2139                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2140                         break;
2141                 case IFM_10G_TWINAX:
2142                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2143                         break;
2144                 case IFM_100_TX:
2145                         speed |= IXGBE_LINK_SPEED_100_FULL;
2146                         break;
2147                 case IFM_10_T:
2148                         speed |= IXGBE_LINK_SPEED_10_FULL;
2149                         break;
2150                 default:
2151                         goto invalid;
2152         }
2153
2154         hw->mac.autotry_restart = TRUE;
2155         hw->mac.ops.setup_link(hw, speed, TRUE);
2156         adapter->advertise =
2157             ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2158             ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2159             ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2160             ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2161
2162         return (0);
2163
2164 invalid:
2165         device_printf(adapter->dev, "Invalid media type!\n");
2166
2167         return (EINVAL);
2168 } /* ixgbe_media_change */
2169
2170 /************************************************************************
2171  * ixgbe_set_promisc
2172  ************************************************************************/
2173 static void
2174 ixgbe_set_promisc(struct adapter *adapter)
2175 {
2176         struct ifnet *ifp = adapter->ifp;
2177         int          mcnt = 0;
2178         u32          rctl;
2179
2180         rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2181         rctl &= (~IXGBE_FCTRL_UPE);
2182         if (ifp->if_flags & IFF_ALLMULTI)
2183                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2184         else {
2185                 struct ifmultiaddr *ifma;
2186 #if __FreeBSD_version < 800000
2187                 IF_ADDR_LOCK(ifp);
2188 #else
2189                 if_maddr_rlock(ifp);
2190 #endif
2191                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2192                         if (ifma->ifma_addr->sa_family != AF_LINK)
2193                                 continue;
2194                         if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2195                                 break;
2196                         mcnt++;
2197                 }
2198 #if __FreeBSD_version < 800000
2199                 IF_ADDR_UNLOCK(ifp);
2200 #else
2201                 if_maddr_runlock(ifp);
2202 #endif
2203         }
2204         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2205                 rctl &= (~IXGBE_FCTRL_MPE);
2206         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2207
2208         if (ifp->if_flags & IFF_PROMISC) {
2209                 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2210                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2211         } else if (ifp->if_flags & IFF_ALLMULTI) {
2212                 rctl |= IXGBE_FCTRL_MPE;
2213                 rctl &= ~IXGBE_FCTRL_UPE;
2214                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2215         }
2216
2217         return;
2218 } /* ixgbe_set_promisc */
2219
2220
2221 /************************************************************************
2222  * ixgbe_set_multi - Multicast Update
2223  *
2224  *   Called whenever multicast address list is updated.
2225  ************************************************************************/
2226 static void
2227 ixgbe_set_multi(struct adapter *adapter)
2228 {
2229         struct ifmultiaddr   *ifma;
2230         struct ixgbe_mc_addr *mta;
2231         struct ifnet         *ifp = adapter->ifp;
2232         u8                   *update_ptr;
2233         int                  mcnt = 0;
2234         u32                  fctrl;
2235
2236         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
2237
2238         mta = adapter->mta;
2239         bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
2240
2241 #if __FreeBSD_version < 800000
2242         IF_ADDR_LOCK(ifp);
2243 #else
2244         if_maddr_rlock(ifp);
2245 #endif
2246         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2247                 if (ifma->ifma_addr->sa_family != AF_LINK)
2248                         continue;
2249                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2250                         break;
2251                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2252                     mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2253                 mta[mcnt].vmdq = adapter->pool;
2254                 mcnt++;
2255         }
2256 #if __FreeBSD_version < 800000
2257         IF_ADDR_UNLOCK(ifp);
2258 #else
2259         if_maddr_runlock(ifp);
2260 #endif
2261
2262         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2263         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2264         if (ifp->if_flags & IFF_PROMISC)
2265                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2266         else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
2267             ifp->if_flags & IFF_ALLMULTI) {
2268                 fctrl |= IXGBE_FCTRL_MPE;
2269                 fctrl &= ~IXGBE_FCTRL_UPE;
2270         } else
2271                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2272
2273         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2274
2275         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
2276                 update_ptr = (u8 *)mta;
2277                 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
2278                     ixgbe_mc_array_itr, TRUE);
2279         }
2280
2281         return;
2282 } /* ixgbe_set_multi */
2283
2284 /************************************************************************
2285  * ixgbe_mc_array_itr
2286  *
2287  *   An iterator function needed by the multicast shared code.
2288  *   It feeds the shared code routine the addresses in the
2289  *   array of ixgbe_set_multi() one by one.
2290  ************************************************************************/
2291 static u8 *
2292 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
2293 {
2294         struct ixgbe_mc_addr *mta;
2295
2296         mta = (struct ixgbe_mc_addr *)*update_ptr;
2297         *vmdq = mta->vmdq;
2298
2299         *update_ptr = (u8*)(mta + 1);
2300
2301         return (mta->addr);
2302 } /* ixgbe_mc_array_itr */
2303
2304
2305 /************************************************************************
2306  * ixgbe_local_timer - Timer routine
2307  *
2308  *   Checks for link status, updates statistics,
2309  *   and runs the watchdog check.
2310  ************************************************************************/
2311 static void
2312 ixgbe_local_timer(void *arg)
2313 {
2314         struct adapter  *adapter = arg;
2315         device_t        dev = adapter->dev;
2316         struct ix_queue *que = adapter->queues;
2317         u64             queues = 0;
2318         int             hung = 0;
2319
2320         mtx_assert(&adapter->core_mtx, MA_OWNED);
2321
2322         /* Check for pluggable optics */
2323         if (adapter->sfp_probe)
2324                 if (!ixgbe_sfp_probe(adapter))
2325                         goto out; /* Nothing to do */
2326
2327         ixgbe_update_link_status(adapter);
2328         ixgbe_update_stats_counters(adapter);
2329
2330         /*
2331          * Check the TX queues status
2332          *      - mark hung queues so we don't schedule on them
2333          *      - watchdog only if all queues show hung
2334          */
2335         for (int i = 0; i < adapter->num_queues; i++, que++) {
2336                 /* Keep track of queues with work for soft irq */
2337                 if (que->txr->busy)
2338                         queues |= ((u64)1 << que->me);
2339                 /*
2340                  * Each time txeof runs without cleaning, but there
2341                  * are uncleaned descriptors it increments busy. If
2342                  * we get to the MAX we declare it hung.
2343                  */
2344                 if (que->busy == IXGBE_QUEUE_HUNG) {
2345                         ++hung;
2346                         /* Mark the queue as inactive */
2347                         adapter->active_queues &= ~((u64)1 << que->me);
2348                         continue;
2349                 } else {
2350                         /* Check if we've come back from hung */
2351                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2352                                 adapter->active_queues |= ((u64)1 << que->me);
2353                 }
2354                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
2355                         device_printf(dev,
2356                             "Warning queue %d appears to be hung!\n", i);
2357                         que->txr->busy = IXGBE_QUEUE_HUNG;
2358                         ++hung;
2359                 }
2360         }
2361
2362         /* Only truly watchdog if all queues show hung */
2363         if (hung == adapter->num_queues)
2364                 goto watchdog;
2365         else if (queues != 0) { /* Force an IRQ on queues with work */
2366                 ixgbe_rearm_queues(adapter, queues);
2367         }
2368
2369 out:
2370         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2371         return;
2372
2373 watchdog:
2374         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2375         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2376         adapter->watchdog_events++;
2377         ixgbe_init_locked(adapter);
2378 } /* ixgbe_local_timer */
2379
2380
2381 /************************************************************************
2382  * ixgbe_update_link_status - Update OS on link state
2383  *
2384  * Note: Only updates the OS on the cached link state.
2385  *       The real check of the hardware only happens with
2386  *       a link interrupt.
2387  ************************************************************************/
2388 static void
2389 ixgbe_update_link_status(struct adapter *adapter)
2390 {
2391         struct ifnet *ifp = adapter->ifp;
2392         device_t     dev = adapter->dev;
2393
2394         if (adapter->link_up) {
2395                 if (adapter->link_active == FALSE) {
2396                         if (bootverbose)
2397                                 device_printf(dev, "Link is up %d Gbps %s \n",
2398                                     ((adapter->link_speed == 128) ? 10 : 1),
2399                                     "Full Duplex");
2400                         adapter->link_active = TRUE;
2401                         /* Update any Flow Control changes */
2402                         ixgbe_fc_enable(&adapter->hw);
2403                         /* Update DMA coalescing config */
2404                         ixgbe_config_dmac(adapter);
2405                         if_link_state_change(ifp, LINK_STATE_UP);
2406                         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
2407                                 ixgbe_ping_all_vfs(adapter);
2408                 }
2409         } else { /* Link down */
2410                 if (adapter->link_active == TRUE) {
2411                         if (bootverbose)
2412                                 device_printf(dev, "Link is Down\n");
2413                         if_link_state_change(ifp, LINK_STATE_DOWN);
2414                         adapter->link_active = FALSE;
2415                         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
2416                                 ixgbe_ping_all_vfs(adapter);
2417                 }
2418         }
2419
2420         return;
2421 } /* ixgbe_update_link_status */
2422
2423
2424 /************************************************************************
2425  * ixgbe_stop - Stop the hardware
2426  *
2427  *   Disables all traffic on the adapter by issuing a
2428  *   global reset on the MAC and deallocates TX/RX buffers.
2429  ************************************************************************/
2430 static void
2431 ixgbe_stop(void *arg)
2432 {
2433         struct ifnet    *ifp;
2434         struct adapter  *adapter = arg;
2435         struct ixgbe_hw *hw = &adapter->hw;
2436
2437         ifp = adapter->ifp;
2438
2439         mtx_assert(&adapter->core_mtx, MA_OWNED);
2440
2441         INIT_DEBUGOUT("ixgbe_stop: begin\n");
2442         ixgbe_disable_intr(adapter);
2443         callout_stop(&adapter->timer);
2444
2445         /* Let the stack know...*/
2446         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2447
2448         ixgbe_reset_hw(hw);
2449         hw->adapter_stopped = FALSE;
2450         ixgbe_stop_adapter(hw);
2451         if (hw->mac.type == ixgbe_mac_82599EB)
2452                 ixgbe_stop_mac_link_on_d3_82599(hw);
2453         /* Turn off the laser - noop with no optics */
2454         ixgbe_disable_tx_laser(hw);
2455
2456         /* Update the stack */
2457         adapter->link_up = FALSE;
2458         ixgbe_update_link_status(adapter);
2459
2460         /* reprogram the RAR[0] in case user changed it. */
2461         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2462
2463         return;
2464 } /* ixgbe_stop */
2465
2466
2467 /************************************************************************
2468  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
2469  ************************************************************************/
2470 static int
2471 ixgbe_allocate_legacy(struct adapter *adapter)
2472 {
2473         device_t        dev = adapter->dev;
2474         struct ix_queue *que = adapter->queues;
2475         struct tx_ring  *txr = adapter->tx_rings;
2476         int             error;
2477
2478         /* We allocate a single interrupt resource */
2479         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2480             &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
2481         if (adapter->res == NULL) {
2482                 device_printf(dev,
2483                     "Unable to allocate bus resource: interrupt\n");
2484                 return (ENXIO);
2485         }
2486
2487         /*
2488          * Try allocating a fast interrupt and the associated deferred
2489          * processing contexts.
2490          */
2491         if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2492                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2493         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2494         que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2495             taskqueue_thread_enqueue, &que->tq);
2496         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2497             device_get_nameunit(adapter->dev));
2498
2499         /* Tasklets for Link, SFP and Multispeed Fiber */
2500         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2501         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2502         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2503         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2504         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2505                 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2506         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2507             taskqueue_thread_enqueue, &adapter->tq);
2508         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2509             device_get_nameunit(adapter->dev));
2510
2511         if ((error = bus_setup_intr(dev, adapter->res,
2512             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
2513             &adapter->tag)) != 0) {
2514                 device_printf(dev,
2515                     "Failed to register fast interrupt handler: %d\n", error);
2516                 taskqueue_free(que->tq);
2517                 taskqueue_free(adapter->tq);
2518                 que->tq = NULL;
2519                 adapter->tq = NULL;
2520
2521                 return (error);
2522         }
2523         /* For simplicity in the handlers */
2524         adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2525
2526         return (0);
2527 } /* ixgbe_allocate_legacy */
2528
2529
2530 /************************************************************************
2531  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
2532  ************************************************************************/
2533 static int
2534 ixgbe_allocate_msix(struct adapter *adapter)
2535 {
2536         device_t        dev = adapter->dev;
2537         struct ix_queue *que = adapter->queues;
2538         struct tx_ring  *txr = adapter->tx_rings;
2539         int             error, rid, vector = 0;
2540         int             cpu_id = 0;
2541         unsigned int    rss_buckets = 0;
2542         cpuset_t        cpu_mask;
2543
2544         /*
2545          * If we're doing RSS, the number of queues needs to
2546          * match the number of RSS buckets that are configured.
2547          *
2548          * + If there's more queues than RSS buckets, we'll end
2549          *   up with queues that get no traffic.
2550          *
2551          * + If there's more RSS buckets than queues, we'll end
2552          *   up having multiple RSS buckets map to the same queue,
2553          *   so there'll be some contention.
2554          */
2555         rss_buckets = rss_getnumbuckets();
2556         if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
2557             (adapter->num_queues != rss_buckets)) {
2558                 device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
2559                     __func__, adapter->num_queues, rss_buckets);
2560         }
2561
2562         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2563                 rid = vector + 1;
2564                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2565                     RF_SHAREABLE | RF_ACTIVE);
2566                 if (que->res == NULL) {
2567                         device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
2568                             vector);
2569                         return (ENXIO);
2570                 }
2571                 /* Set the handler function */
2572                 error = bus_setup_intr(dev, que->res,
2573                     INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
2574                     &que->tag);
2575                 if (error) {
2576                         que->res = NULL;
2577                         device_printf(dev, "Failed to register QUE handler");
2578                         return (error);
2579                 }
2580 #if __FreeBSD_version >= 800504
2581                 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2582 #endif
2583                 que->msix = vector;
2584                 adapter->active_queues |= (u64)(1 << que->msix);
2585
2586                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2587                         /*
2588                          * The queue ID is used as the RSS layer bucket ID.
2589                          * We look up the queue ID -> RSS CPU ID and select
2590                          * that.
2591                          */
2592                         cpu_id = rss_getcpu(i % rss_buckets);
2593                         CPU_SETOF(cpu_id, &cpu_mask);
2594                 } else {
2595                         /*
2596                          * Bind the msix vector, and thus the
2597                          * rings to the corresponding cpu.
2598                          *
2599                          * This just happens to match the default RSS
2600                          * round-robin bucket -> queue -> CPU allocation.
2601                          */
2602                         if (adapter->num_queues > 1)
2603                                 cpu_id = i;
2604                 }
2605                 if (adapter->num_queues > 1)
2606                         bus_bind_intr(dev, que->res, cpu_id);
2607 #ifdef IXGBE_DEBUG
2608                 if (adapter->feat_en & IXGBE_FEATURE_RSS)
2609                         device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
2610                             cpu_id);
2611                 else
2612                         device_printf(dev, "Bound queue %d to cpu %d\n", i,
2613                             cpu_id);
2614 #endif /* IXGBE_DEBUG */
2615
2616
2617                 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2618                         TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
2619                             txr);
2620                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2621                 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2622                     taskqueue_thread_enqueue, &que->tq);
2623 #if __FreeBSD_version < 1100000
2624                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
2625                     device_get_nameunit(adapter->dev), i);
2626 #else
2627                 if (adapter->feat_en & IXGBE_FEATURE_RSS)
2628                         taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2629                             &cpu_mask, "%s (bucket %d)",
2630                             device_get_nameunit(adapter->dev), cpu_id);
2631                 else
2632                         taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2633                             NULL, "%s:q%d", device_get_nameunit(adapter->dev),
2634                             i);
2635 #endif
2636         }
2637
2638         /* and Link */
2639         adapter->link_rid = vector + 1;
2640         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2641             &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
2642         if (!adapter->res) {
2643                 device_printf(dev,
2644                     "Unable to allocate bus resource: Link interrupt [%d]\n",
2645                     adapter->link_rid);
2646                 return (ENXIO);
2647         }
2648         /* Set the link handler function */
2649         error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
2650             NULL, ixgbe_msix_link, adapter, &adapter->tag);
2651         if (error) {
2652                 adapter->res = NULL;
2653                 device_printf(dev, "Failed to register LINK handler");
2654                 return (error);
2655         }
2656 #if __FreeBSD_version >= 800504
2657         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2658 #endif
2659         adapter->vector = vector;
2660         /* Tasklets for Link, SFP and Multispeed Fiber */
2661         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2662         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2663         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2664         if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2665                 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2666         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2667         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2668                 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2669         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2670             taskqueue_thread_enqueue, &adapter->tq);
2671         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2672             device_get_nameunit(adapter->dev));
2673
2674         return (0);
2675 } /* ixgbe_allocate_msix */
2676
2677 /************************************************************************
2678  * ixgbe_configure_interrupts
2679  *
2680  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
2681  *   This will also depend on user settings.
2682  ************************************************************************/
2683 static int
2684 ixgbe_configure_interrupts(struct adapter *adapter)
2685 {
2686         device_t dev = adapter->dev;
2687         int      rid, want, queues, msgs;
2688
2689         /* Default to 1 queue if MSI-X setup fails */
2690         adapter->num_queues = 1;
2691
2692         /* Override by tuneable */
2693         if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
2694                 goto msi;
2695
2696         /* First try MSI-X */
2697         msgs = pci_msix_count(dev);
2698         if (msgs == 0)
2699                 goto msi;
2700         rid = PCIR_BAR(MSIX_82598_BAR);
2701         adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2702             RF_ACTIVE);
2703         if (adapter->msix_mem == NULL) {
2704                 rid += 4;  /* 82599 maps in higher BAR */
2705                 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2706                     &rid, RF_ACTIVE);
2707         }
2708         if (adapter->msix_mem == NULL) {
2709                 /* May not be enabled */
2710                 device_printf(adapter->dev, "Unable to map MSI-X table.\n");
2711                 goto msi;
2712         }
2713
2714         /* Figure out a reasonable auto config value */
2715         queues = min(mp_ncpus, msgs - 1);
2716         /* If we're doing RSS, clamp at the number of RSS buckets */
2717         if (adapter->feat_en & IXGBE_FEATURE_RSS)
2718                 queues = min(queues, rss_getnumbuckets());
2719         if (ixgbe_num_queues > queues) {
2720                 device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
2721                 ixgbe_num_queues = queues;
2722         }
2723
2724         if (ixgbe_num_queues != 0)
2725                 queues = ixgbe_num_queues;
2726         /* Set max queues to 8 when autoconfiguring */
2727         else
2728                 queues = min(queues, 8);
2729
2730         /* reflect correct sysctl value */
2731         ixgbe_num_queues = queues;
2732
2733         /*
2734          * Want one vector (RX/TX pair) per queue
2735          * plus an additional for Link.
2736          */
2737         want = queues + 1;
2738         if (msgs >= want)
2739                 msgs = want;
2740         else {
2741                 device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
2742                     msgs, want);
2743                 goto msi;
2744         }
2745         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2746                 device_printf(adapter->dev,
2747                     "Using MSI-X interrupts with %d vectors\n", msgs);
2748                 adapter->num_queues = queues;
2749                 adapter->feat_en |= IXGBE_FEATURE_MSIX;
2750                 return 0;
2751         }
2752         /*
2753          * MSI-X allocation failed or provided us with
2754          * less vectors than needed. Free MSI-X resources
2755          * and we'll try enabling MSI.
2756          */
2757         pci_release_msi(dev);
2758
2759 msi:
2760         /* Without MSI-X, some features are no longer supported */
2761         adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
2762         adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
2763         adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
2764         adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
2765
2766         if (adapter->msix_mem != NULL) {
2767                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
2768                     adapter->msix_mem);
2769                 adapter->msix_mem = NULL;
2770         }
2771         msgs = 1;
2772         if (pci_alloc_msi(dev, &msgs) == 0) {
2773                 adapter->feat_en |= IXGBE_FEATURE_MSI;
2774                 adapter->link_rid = 1;
2775                 device_printf(adapter->dev, "Using an MSI interrupt\n");
2776                 return 0;
2777         }
2778
2779         if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
2780                 device_printf(adapter->dev,
2781                     "Device does not support legacy interrupts.\n");
2782                 return 1;
2783         }
2784
2785         adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
2786         adapter->link_rid = 0;
2787         device_printf(adapter->dev, "Using a Legacy interrupt\n");
2788
2789         return 0;
2790 } /* ixgbe_configure_interrupts */
2791
2792
2793 /************************************************************************
2794  * ixgbe_allocate_pci_resources
2795  ************************************************************************/
2796 static int
2797 ixgbe_allocate_pci_resources(struct adapter *adapter)
2798 {
2799         device_t dev = adapter->dev;
2800         int      rid;
2801
2802         rid = PCIR_BAR(0);
2803         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2804             RF_ACTIVE);
2805
2806         if (!(adapter->pci_mem)) {
2807                 device_printf(dev, "Unable to allocate bus resource: memory\n");
2808                 return (ENXIO);
2809         }
2810
2811         /* Save bus_space values for READ/WRITE_REG macros */
2812         adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2813         adapter->osdep.mem_bus_space_handle =
2814             rman_get_bushandle(adapter->pci_mem);
2815         /* Set hw values for shared code */
2816         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2817
2818         return (0);
2819 } /* ixgbe_allocate_pci_resources */
2820
2821 /************************************************************************
2822  * ixgbe_free_pci_resources
2823  ************************************************************************/
2824 static void
2825 ixgbe_free_pci_resources(struct adapter *adapter)
2826 {
2827         struct ix_queue *que = adapter->queues;
2828         device_t        dev = adapter->dev;
2829         int             rid, memrid;
2830
2831         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2832                 memrid = PCIR_BAR(MSIX_82598_BAR);
2833         else
2834                 memrid = PCIR_BAR(MSIX_82599_BAR);
2835
2836         /*
2837          * There is a slight possibility of a failure mode
2838          * in attach that will result in entering this function
2839          * before interrupt resources have been initialized, and
2840          * in that case we do not want to execute the loops below
2841          * We can detect this reliably by the state of the adapter
2842          * res pointer.
2843          */
2844         if (adapter->res == NULL)
2845                 goto mem;
2846
2847         /*
2848          * Release all msix queue resources:
2849          */
2850         for (int i = 0; i < adapter->num_queues; i++, que++) {
2851                 rid = que->msix + 1;
2852                 if (que->tag != NULL) {
2853                         bus_teardown_intr(dev, que->res, que->tag);
2854                         que->tag = NULL;
2855                 }
2856                 if (que->res != NULL)
2857                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2858         }
2859
2860
2861         /* Clean the Legacy or Link interrupt last */
2862         if (adapter->tag != NULL) {
2863                 bus_teardown_intr(dev, adapter->res, adapter->tag);
2864                 adapter->tag = NULL;
2865         }
2866         if (adapter->res != NULL)
2867                 bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
2868                     adapter->res);
2869
2870 mem:
2871         if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
2872             (adapter->feat_en & IXGBE_FEATURE_MSIX))
2873                 pci_release_msi(dev);
2874
2875         if (adapter->msix_mem != NULL)
2876                 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
2877                     adapter->msix_mem);
2878
2879         if (adapter->pci_mem != NULL)
2880                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
2881                     adapter->pci_mem);
2882
2883         return;
2884 } /* ixgbe_free_pci_resources */
2885
2886 /************************************************************************
2887  * ixgbe_setup_interface
2888  *
2889  *   Setup networking device structure and register an interface.
2890  ************************************************************************/
2891 static int
2892 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2893 {
2894         struct ifnet *ifp;
2895
2896         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2897
2898         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2899         if (ifp == NULL) {
2900                 device_printf(dev, "can not allocate ifnet structure\n");
2901                 return (-1);
2902         }
2903         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2904         if_initbaudrate(ifp, IF_Gbps(10));
2905         ifp->if_init = ixgbe_init;
2906         ifp->if_softc = adapter;
2907         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2908         ifp->if_ioctl = ixgbe_ioctl;
2909 #if __FreeBSD_version >= 1100036
2910         if_setgetcounterfn(ifp, ixgbe_get_counter);
2911 #endif
2912 #if __FreeBSD_version >= 1100045
2913         /* TSO parameters */
2914         ifp->if_hw_tsomax = 65518;
2915         ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2916         ifp->if_hw_tsomaxsegsize = 2048;
2917 #endif
2918         if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
2919                 ifp->if_start = ixgbe_legacy_start;
2920                 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2921                 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2922                 IFQ_SET_READY(&ifp->if_snd);
2923                 ixgbe_start_locked = ixgbe_legacy_start_locked;
2924                 ixgbe_ring_empty = ixgbe_legacy_ring_empty;
2925         } else {
2926                 ifp->if_transmit = ixgbe_mq_start;
2927                 ifp->if_qflush = ixgbe_qflush;
2928                 ixgbe_start_locked = ixgbe_mq_start_locked;
2929                 ixgbe_ring_empty = drbr_empty;
2930         }
2931
2932         ether_ifattach(ifp, adapter->hw.mac.addr);
2933
2934         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
2935
2936         /*
2937          * Tell the upper layer(s) we support long frames.
2938          */
2939         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2940
2941         /* Set capability flags */
2942         ifp->if_capabilities |= IFCAP_HWCSUM
2943                              |  IFCAP_HWCSUM_IPV6
2944                              |  IFCAP_TSO
2945                              |  IFCAP_LRO
2946                              |  IFCAP_VLAN_HWTAGGING
2947                              |  IFCAP_VLAN_HWTSO
2948                              |  IFCAP_VLAN_HWCSUM
2949                              |  IFCAP_JUMBO_MTU
2950                              |  IFCAP_VLAN_MTU
2951                              |  IFCAP_HWSTATS;
2952
2953         /* Enable the above capabilities by default */
2954         ifp->if_capenable = ifp->if_capabilities;
2955
2956         /*
2957          * Don't turn this on by default, if vlans are
2958          * created on another pseudo device (eg. lagg)
2959          * then vlan events are not passed thru, breaking
2960          * operation, but with HW FILTER off it works. If
2961          * using vlans directly on the ixgbe driver you can
2962          * enable this and get full hardware tag filtering.
2963          */
2964         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2965
2966         /*
2967          * Specify the media types supported by this adapter and register
2968          * callbacks to update media and link information
2969          */
2970         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2971             ixgbe_media_status);
2972
2973         adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
2974         ixgbe_add_media_types(adapter);
2975
2976         /* Set autoselect media by default */
2977         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2978
2979         return (0);
2980 } /* ixgbe_setup_interface */
2981
2982 /************************************************************************
2983  * ixgbe_add_media_types
2984  ************************************************************************/
2985 static void
2986 ixgbe_add_media_types(struct adapter *adapter)
2987 {
2988         struct ixgbe_hw *hw = &adapter->hw;
2989         device_t        dev = adapter->dev;
2990         int             layer;
2991
2992         layer = adapter->phy_layer;
2993
2994         /* Media types with matching FreeBSD media defines */
2995         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2996                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2997         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2998                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2999         if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
3000                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
3001         if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
3002                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3003
3004         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
3005             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
3006                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
3007                     NULL);
3008
3009         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
3010                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
3011                 if (hw->phy.multispeed_fiber)
3012                         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
3013                             NULL);
3014         }
3015         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
3016                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
3017                 if (hw->phy.multispeed_fiber)
3018                         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
3019                             NULL);
3020         } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
3021                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
3022         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
3023                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
3024
3025 #ifdef IFM_ETH_XTYPE
3026         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
3027                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
3028         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
3029                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
3030         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
3031                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
3032 #else
3033         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
3034                 device_printf(dev, "Media supported: 10GbaseKR\n");
3035                 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
3036                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
3037         }
3038         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
3039                 device_printf(dev, "Media supported: 10GbaseKX4\n");
3040                 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
3041                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
3042         }
3043         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
3044                 device_printf(dev, "Media supported: 1000baseKX\n");
3045                 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
3046                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
3047         }
3048 #endif
3049         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
3050                 device_printf(dev, "Media supported: 1000baseBX\n");
3051
3052         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
3053                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
3054                     0, NULL);
3055                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
3056         }
3057
3058         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3059 } /* ixgbe_add_media_types */
3060
3061 /************************************************************************
3062  * ixgbe_config_link
3063  ************************************************************************/
3064 static void
3065 ixgbe_config_link(struct adapter *adapter)
3066 {
3067         struct ixgbe_hw *hw = &adapter->hw;
3068         u32             autoneg, err = 0;
3069         bool            sfp, negotiate;
3070
3071         sfp = ixgbe_is_sfp(hw);
3072
3073         if (sfp) {
3074                 if (hw->phy.multispeed_fiber) {
3075                         hw->mac.ops.setup_sfp(hw);
3076                         ixgbe_enable_tx_laser(hw);
3077                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3078                 } else
3079                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
3080         } else {
3081                 if (hw->mac.ops.check_link)
3082                         err = ixgbe_check_link(hw, &adapter->link_speed,
3083                             &adapter->link_up, FALSE);
3084                 if (err)
3085                         goto out;
3086                 autoneg = hw->phy.autoneg_advertised;
3087                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3088                         err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3089                             &negotiate);
3090                 if (err)
3091                         goto out;
3092                 if (hw->mac.ops.setup_link)
3093                         err = hw->mac.ops.setup_link(hw, autoneg,
3094                             adapter->link_up);
3095         }
3096 out:
3097
3098         return;
3099 } /* ixgbe_config_link */
3100
3101
3102 /************************************************************************
3103  * ixgbe_initialize_transmit_units - Enable transmit units.
3104  ************************************************************************/
3105 static void
3106 ixgbe_initialize_transmit_units(struct adapter *adapter)
3107 {
3108         struct tx_ring  *txr = adapter->tx_rings;
3109         struct ixgbe_hw *hw = &adapter->hw;
3110
3111         /* Setup the Base and Length of the Tx Descriptor Ring */
3112         for (int i = 0; i < adapter->num_queues; i++, txr++) {
3113                 u64 tdba = txr->txdma.dma_paddr;
3114                 u32 txctrl = 0;
3115                 int j = txr->me;
3116
3117                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
3118                     (tdba & 0x00000000ffffffffULL));
3119                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
3120                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
3121                     adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
3122
3123                 /* Setup the HW Tx Head and Tail descriptor pointers */
3124                 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
3125                 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
3126
3127                 /* Cache the tail address */
3128                 txr->tail = IXGBE_TDT(j);
3129
3130                 /* Disable Head Writeback */
3131                 /*
3132                  * Note: for X550 series devices, these registers are actually
3133                  * prefixed with TPH_ isntead of DCA_, but the addresses and
3134                  * fields remain the same.
3135                  */
3136                 switch (hw->mac.type) {
3137                 case ixgbe_mac_82598EB:
3138                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
3139                         break;
3140                 default:
3141                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
3142                         break;
3143                 }
3144                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3145                 switch (hw->mac.type) {
3146                 case ixgbe_mac_82598EB:
3147                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
3148                         break;
3149                 default:
3150                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
3151                         break;
3152                 }
3153
3154         }
3155
3156         if (hw->mac.type != ixgbe_mac_82598EB) {
3157                 u32 dmatxctl, rttdcs;
3158
3159                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3160                 dmatxctl |= IXGBE_DMATXCTL_TE;
3161                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3162                 /* Disable arbiter to set MTQC */
3163                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3164                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3165                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3166                 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
3167                     ixgbe_get_mtqc(adapter->iov_mode));
3168                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3169                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3170         }
3171
3172         return;
3173 } /* ixgbe_initialize_transmit_units */
3174
3175 /************************************************************************
3176  * ixgbe_initialize_rss_mapping
3177  ************************************************************************/
3178 static void
3179 ixgbe_initialize_rss_mapping(struct adapter *adapter)
3180 {
3181         struct ixgbe_hw *hw = &adapter->hw;
3182         u32             reta = 0, mrqc, rss_key[10];
3183         int             queue_id, table_size, index_mult;
3184         int             i, j;
3185         u32             rss_hash_config;
3186
3187         if (adapter->feat_en & IXGBE_FEATURE_RSS) {
3188                 /* Fetch the configured RSS key */
3189                 rss_getkey((uint8_t *)&rss_key);
3190         } else {
3191                 /* set up random bits */
3192                 arc4rand(&rss_key, sizeof(rss_key), 0);
3193         }
3194
3195         /* Set multiplier for RETA setup and table size based on MAC */
3196         index_mult = 0x1;
3197         table_size = 128;
3198         switch (adapter->hw.mac.type) {
3199         case ixgbe_mac_82598EB:
3200                 index_mult = 0x11;
3201                 break;
3202         case ixgbe_mac_X550:
3203         case ixgbe_mac_X550EM_x:
3204         case ixgbe_mac_X550EM_a:
3205                 table_size = 512;
3206                 break;
3207         default:
3208                 break;
3209         }
3210
3211         /* Set up the redirection table */
3212         for (i = 0, j = 0; i < table_size; i++, j++) {
3213                 if (j == adapter->num_queues)
3214                         j = 0;
3215
3216                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
3217                         /*
3218                          * Fetch the RSS bucket id for the given indirection
3219                          * entry. Cap it at the number of configured buckets
3220                          * (which is num_queues.)
3221                          */
3222                         queue_id = rss_get_indirection_to_bucket(i);
3223                         queue_id = queue_id % adapter->num_queues;
3224                 } else
3225                         queue_id = (j * index_mult);
3226
3227                 /*
3228                  * The low 8 bits are for hash value (n+0);
3229                  * The next 8 bits are for hash value (n+1), etc.
3230                  */
3231                 reta = reta >> 8;
3232                 reta = reta | (((uint32_t)queue_id) << 24);
3233                 if ((i & 3) == 3) {
3234                         if (i < 128)
3235                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3236                         else
3237                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3238                                     reta);
3239                         reta = 0;
3240                 }
3241         }
3242
3243         /* Now fill our hash function seeds */
3244         for (i = 0; i < 10; i++)
3245                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3246
3247         /* Perform hash on these packet types */
3248         if (adapter->feat_en & IXGBE_FEATURE_RSS)
3249                 rss_hash_config = rss_gethashconfig();
3250         else {
3251                 /*
3252                  * Disable UDP - IP fragments aren't currently being handled
3253                  * and so we end up with a mix of 2-tuple and 4-tuple
3254                  * traffic.
3255                  */
3256                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
3257                                 | RSS_HASHTYPE_RSS_TCP_IPV4
3258                                 | RSS_HASHTYPE_RSS_IPV6
3259                                 | RSS_HASHTYPE_RSS_TCP_IPV6
3260                                 | RSS_HASHTYPE_RSS_IPV6_EX
3261                                 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
3262         }
3263
3264         mrqc = IXGBE_MRQC_RSSEN;
3265         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3266                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3267         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3268                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3269         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3270                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3271         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3272                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3273         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3274                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3275         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3276                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3277         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3278                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3279         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3280                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
3281                     __func__);
3282         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3283                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3284         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3285                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3286         mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
3287         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3288 } /* ixgbe_initialize_rss_mapping */
3289
3290
3291 /************************************************************************
3292  * ixgbe_initialize_receive_units - Setup receive registers and features.
3293  ************************************************************************/
3294 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3295
3296 static void
3297 ixgbe_initialize_receive_units(struct adapter *adapter)
3298 {
3299         struct rx_ring  *rxr = adapter->rx_rings;
3300         struct ixgbe_hw *hw = &adapter->hw;
3301         struct ifnet    *ifp = adapter->ifp;
3302         int             i, j;
3303         u32             bufsz, fctrl, srrctl, rxcsum;
3304         u32             hlreg;
3305
3306         /*
3307          * Make sure receives are disabled while
3308          * setting up the descriptor ring
3309          */
3310         ixgbe_disable_rx(hw);
3311
3312         /* Enable broadcasts */
3313         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3314         fctrl |= IXGBE_FCTRL_BAM;
3315         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3316                 fctrl |= IXGBE_FCTRL_DPF;
3317                 fctrl |= IXGBE_FCTRL_PMCF;
3318         }
3319         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3320
3321         /* Set for Jumbo Frames? */
3322         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3323         if (ifp->if_mtu > ETHERMTU)
3324                 hlreg |= IXGBE_HLREG0_JUMBOEN;
3325         else
3326                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3327
3328 #ifdef DEV_NETMAP
3329         /* CRC stripping is conditional in Netmap */
3330         if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3331             (ifp->if_capenable & IFCAP_NETMAP) &&
3332             !ix_crcstrip)
3333                 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3334         else
3335 #endif /* DEV_NETMAP */
3336                 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3337
3338         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3339
3340         bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
3341             IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3342
3343         for (i = 0; i < adapter->num_queues; i++, rxr++) {
3344                 u64 rdba = rxr->rxdma.dma_paddr;
3345                 j = rxr->me;
3346
3347                 /* Setup the Base and Length of the Rx Descriptor Ring */
3348                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3349                     (rdba & 0x00000000ffffffffULL));
3350                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3351                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3352                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3353
3354                 /* Set up the SRRCTL register */
3355                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3356                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3357                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3358                 srrctl |= bufsz;
3359                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3360
3361                 /*
3362                  * Set DROP_EN iff we have no flow control and >1 queue.
3363                  * Note that srrctl was cleared shortly before during reset,
3364                  * so we do not need to clear the bit, but do it just in case
3365                  * this code is moved elsewhere.
3366                  */
3367                 if (adapter->num_queues > 1 &&
3368                     adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3369                         srrctl |= IXGBE_SRRCTL_DROP_EN;
3370                 } else {
3371                         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3372                 }
3373
3374                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3375
3376                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3377                 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3378                 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3379
3380                 /* Set the driver rx tail address */
3381                 rxr->tail =  IXGBE_RDT(rxr->me);
3382         }
3383
3384         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3385                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
3386                             | IXGBE_PSRTYPE_UDPHDR
3387                             | IXGBE_PSRTYPE_IPV4HDR
3388                             | IXGBE_PSRTYPE_IPV6HDR;
3389                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3390         }
3391
3392         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3393
3394         ixgbe_initialize_rss_mapping(adapter);
3395
3396         if (adapter->num_queues > 1) {
3397                 /* RSS and RX IPP Checksum are mutually exclusive */
3398                 rxcsum |= IXGBE_RXCSUM_PCSD;
3399         }
3400
3401         if (ifp->if_capenable & IFCAP_RXCSUM)
3402                 rxcsum |= IXGBE_RXCSUM_PCSD;
3403
3404         /* This is useful for calculating UDP/IP fragment checksums */
3405         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3406                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3407
3408         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3409
3410         return;
3411 } /* ixgbe_initialize_receive_units */
3412
3413
3414 /************************************************************************
3415  * ixgbe_register_vlan
3416  *
3417  *   Run via vlan config EVENT, it enables us to use the
3418  *   HW Filter table since we can get the vlan id. This
3419  *   just creates the entry in the soft version of the
3420  *   VFTA, init will repopulate the real table.
3421  ************************************************************************/
3422 static void
3423 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3424 {
3425         struct adapter *adapter = ifp->if_softc;
3426         u16            index, bit;
3427
3428         if (ifp->if_softc !=  arg)   /* Not our event */
3429                 return;
3430
3431         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
3432                 return;
3433
3434         IXGBE_CORE_LOCK(adapter);
3435         index = (vtag >> 5) & 0x7F;
3436         bit = vtag & 0x1F;
3437         adapter->shadow_vfta[index] |= (1 << bit);
3438         ++adapter->num_vlans;
3439         ixgbe_setup_vlan_hw_support(adapter);
3440         IXGBE_CORE_UNLOCK(adapter);
3441 } /* ixgbe_register_vlan */
3442
3443 /************************************************************************
3444  * ixgbe_unregister_vlan
3445  *
3446  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
3447  ************************************************************************/
3448 static void
3449 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3450 {
3451         struct adapter *adapter = ifp->if_softc;
3452         u16            index, bit;
3453
3454         if (ifp->if_softc != arg)
3455                 return;
3456
3457         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
3458                 return;
3459
3460         IXGBE_CORE_LOCK(adapter);
3461         index = (vtag >> 5) & 0x7F;
3462         bit = vtag & 0x1F;
3463         adapter->shadow_vfta[index] &= ~(1 << bit);
3464         --adapter->num_vlans;
3465         /* Re-init to load the changes */
3466         ixgbe_setup_vlan_hw_support(adapter);
3467         IXGBE_CORE_UNLOCK(adapter);
3468 } /* ixgbe_unregister_vlan */
3469
3470 /************************************************************************
3471  * ixgbe_setup_vlan_hw_support
3472  ************************************************************************/
3473 static void
3474 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3475 {
3476         struct ifnet    *ifp = adapter->ifp;
3477         struct ixgbe_hw *hw = &adapter->hw;
3478         struct rx_ring  *rxr;
3479         int             i;
3480         u32             ctrl;
3481
3482
3483         /*
3484          * We get here thru init_locked, meaning
3485          * a soft reset, this has already cleared
3486          * the VFTA and other state, so if there
3487          * have been no vlan's registered do nothing.
3488          */
3489         if (adapter->num_vlans == 0)
3490                 return;
3491
3492         /* Setup the queues for vlans */
3493         for (i = 0; i < adapter->num_queues; i++) {
3494                 rxr = &adapter->rx_rings[i];
3495                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3496                 if (hw->mac.type != ixgbe_mac_82598EB) {
3497                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3498                         ctrl |= IXGBE_RXDCTL_VME;
3499                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3500                 }
3501                 rxr->vtag_strip = TRUE;
3502         }
3503
3504         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3505                 return;
3506         /*
3507          * A soft reset zero's out the VFTA, so
3508          * we need to repopulate it now.
3509          */
3510         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
3511                 if (adapter->shadow_vfta[i] != 0)
3512                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3513                             adapter->shadow_vfta[i]);
3514
3515         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3516         /* Enable the Filter Table if enabled */
3517         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3518                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3519                 ctrl |= IXGBE_VLNCTRL_VFE;
3520         }
3521         if (hw->mac.type == ixgbe_mac_82598EB)
3522                 ctrl |= IXGBE_VLNCTRL_VME;
3523         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3524 } /* ixgbe_setup_vlan_hw_support */
3525
3526 /************************************************************************
3527  * ixgbe_enable_intr
3528  ************************************************************************/
3529 static void
3530 ixgbe_enable_intr(struct adapter *adapter)
3531 {
3532         struct ixgbe_hw *hw = &adapter->hw;
3533         struct ix_queue *que = adapter->queues;
3534         u32             mask, fwsm;
3535
3536         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3537
3538         switch (adapter->hw.mac.type) {
3539         case ixgbe_mac_82599EB:
3540                 mask |= IXGBE_EIMS_ECC;
3541                 /* Temperature sensor on some adapters */
3542                 mask |= IXGBE_EIMS_GPI_SDP0;
3543                 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3544                 mask |= IXGBE_EIMS_GPI_SDP1;
3545                 mask |= IXGBE_EIMS_GPI_SDP2;
3546                 break;
3547         case ixgbe_mac_X540:
3548                 /* Detect if Thermal Sensor is enabled */
3549                 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3550                 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3551                         mask |= IXGBE_EIMS_TS;
3552                 mask |= IXGBE_EIMS_ECC;
3553                 break;
3554         case ixgbe_mac_X550:
3555                 /* MAC thermal sensor is automatically enabled */
3556                 mask |= IXGBE_EIMS_TS;
3557                 mask |= IXGBE_EIMS_ECC;
3558                 break;
3559         case ixgbe_mac_X550EM_x:
3560         case ixgbe_mac_X550EM_a:
3561                 /* Some devices use SDP0 for important information */
3562                 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3563                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3564                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3565                     hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3566                         mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3567                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3568                         mask |= IXGBE_EICR_GPI_SDP0_X540;
3569                 mask |= IXGBE_EIMS_ECC;
3570                 break;
3571         default:
3572                 break;
3573         }
3574
3575         /* Enable Fan Failure detection */
3576         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3577                 mask |= IXGBE_EIMS_GPI_SDP1;
3578         /* Enable SR-IOV */
3579         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3580                 mask |= IXGBE_EIMS_MAILBOX;
3581         /* Enable Flow Director */
3582         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3583                 mask |= IXGBE_EIMS_FLOW_DIR;
3584
3585         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3586
3587         /* With MSI-X we use auto clear */
3588         if (adapter->msix_mem) {
3589                 mask = IXGBE_EIMS_ENABLE_MASK;
3590                 /* Don't autoclear Link */
3591                 mask &= ~IXGBE_EIMS_OTHER;
3592                 mask &= ~IXGBE_EIMS_LSC;
3593                 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3594                         mask &= ~IXGBE_EIMS_MAILBOX;
3595                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3596         }
3597
3598         /*
3599          * Now enable all queues, this is done separately to
3600          * allow for handling the extended (beyond 32) MSI-X
3601          * vectors that can be used by 82599
3602          */
3603         for (int i = 0; i < adapter->num_queues; i++, que++)
3604                 ixgbe_enable_queue(adapter, que->msix);
3605
3606         IXGBE_WRITE_FLUSH(hw);
3607
3608         return;
3609 } /* ixgbe_enable_intr */
3610
3611 /************************************************************************
3612  * ixgbe_disable_intr
3613  ************************************************************************/
3614 static void
3615 ixgbe_disable_intr(struct adapter *adapter)
3616 {
3617         if (adapter->msix_mem)
3618                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3619         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3620                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3621         } else {
3622                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3623                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3624                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3625         }
3626         IXGBE_WRITE_FLUSH(&adapter->hw);
3627
3628         return;
3629 } /* ixgbe_disable_intr */
3630
3631 /************************************************************************
3632  * ixgbe_get_slot_info
3633  *
3634  *   Get the width and transaction speed of
3635  *   the slot this adapter is plugged into.
3636  ************************************************************************/
3637 static void
3638 ixgbe_get_slot_info(struct adapter *adapter)
3639 {
3640         device_t              dev = adapter->dev;
3641         struct ixgbe_hw       *hw = &adapter->hw;
3642         u32                   offset;
3643         u16                   link;
3644         int                   bus_info_valid = TRUE;
3645
3646         /* Some devices are behind an internal bridge */
3647         switch (hw->device_id) {
3648         case IXGBE_DEV_ID_82599_SFP_SF_QP:
3649         case IXGBE_DEV_ID_82599_QSFP_SF_QP:
3650                 goto get_parent_info;
3651         default:
3652                 break;
3653         }
3654
3655         ixgbe_get_bus_info(hw);
3656
3657         /*
3658          * Some devices don't use PCI-E, but there is no need
3659          * to display "Unknown" for bus speed and width.
3660          */
3661         switch (hw->mac.type) {
3662         case ixgbe_mac_X550EM_x:
3663         case ixgbe_mac_X550EM_a:
3664                 return;
3665         default:
3666                 goto display;
3667         }
3668
3669 get_parent_info:
3670         /*
3671          * For the Quad port adapter we need to parse back
3672          * up the PCI tree to find the speed of the expansion
3673          * slot into which this adapter is plugged. A bit more work.
3674          */
3675         dev = device_get_parent(device_get_parent(dev));
3676 #ifdef IXGBE_DEBUG
3677         device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
3678             pci_get_slot(dev), pci_get_function(dev));
3679 #endif
3680         dev = device_get_parent(device_get_parent(dev));
3681 #ifdef IXGBE_DEBUG
3682         device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
3683             pci_get_slot(dev), pci_get_function(dev));
3684 #endif
3685         /* Now get the PCI Express Capabilities offset */
3686         if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
3687                 /*
3688                  * Hmm...can't get PCI-Express capabilities.
3689                  * Falling back to default method.
3690                  */
3691                 bus_info_valid = FALSE;
3692                 ixgbe_get_bus_info(hw);
3693                 goto display;
3694         }
3695         /* ...and read the Link Status Register */
3696         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3697         ixgbe_set_pci_config_data_generic(hw, link);
3698
3699 display:
3700         device_printf(dev, "PCI Express Bus: Speed %s %s\n",
3701             ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
3702              (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
3703              (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
3704              "Unknown"),
3705             ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3706              (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3707              (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3708              "Unknown"));
3709
3710         if (bus_info_valid) {
3711                 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3712                     ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3713                     (hw->bus.speed == ixgbe_bus_speed_2500))) {
3714                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
3715                         device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
3716                 }
3717                 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3718                     ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3719                     (hw->bus.speed < ixgbe_bus_speed_8000))) {
3720                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
3721                         device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
3722                 }
3723         } else
3724                 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
3725
3726         return;
3727 } /* ixgbe_get_slot_info */
3728
3729
3730 /************************************************************************
3731  * ixgbe_set_ivar
3732  *
3733  *   Setup the correct IVAR register for a particular MSI-X interrupt
3734  *     (yes this is all very magic and confusing :)
3735  *    - entry is the register array entry
3736  *    - vector is the MSI-X vector for this queue
3737  *    - type is RX/TX/MISC
3738  ************************************************************************/
3739 static void
3740 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3741 {
3742         struct ixgbe_hw *hw = &adapter->hw;
3743         u32 ivar, index;
3744
3745         vector |= IXGBE_IVAR_ALLOC_VAL;
3746
3747         switch (hw->mac.type) {
3748
3749         case ixgbe_mac_82598EB:
3750                 if (type == -1)
3751                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3752                 else
3753                         entry += (type * 64);
3754                 index = (entry >> 2) & 0x1F;
3755                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3756                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3757                 ivar |= (vector << (8 * (entry & 0x3)));
3758                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3759                 break;
3760
3761         case ixgbe_mac_82599EB:
3762         case ixgbe_mac_X540:
3763         case ixgbe_mac_X550:
3764         case ixgbe_mac_X550EM_x:
3765         case ixgbe_mac_X550EM_a:
3766                 if (type == -1) { /* MISC IVAR */
3767                         index = (entry & 1) * 8;
3768                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3769                         ivar &= ~(0xFF << index);
3770                         ivar |= (vector << index);
3771                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3772                 } else {          /* RX/TX IVARS */
3773                         index = (16 * (entry & 1)) + (8 * type);
3774                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3775                         ivar &= ~(0xFF << index);
3776                         ivar |= (vector << index);
3777                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3778                 }
3779
3780         default:
3781                 break;
3782         }
3783 } /* ixgbe_set_ivar */
3784
3785 /************************************************************************
3786  * ixgbe_configure_ivars
3787  ************************************************************************/
3788 static void
3789 ixgbe_configure_ivars(struct adapter *adapter)
3790 {
3791         struct ix_queue *que = adapter->queues;
3792         u32             newitr;
3793
3794         if (ixgbe_max_interrupt_rate > 0)
3795                 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3796         else {
3797                 /*
3798                  * Disable DMA coalescing if interrupt moderation is
3799                  * disabled.
3800                  */
3801                 adapter->dmac = 0;
3802                 newitr = 0;
3803         }
3804
3805         for (int i = 0; i < adapter->num_queues; i++, que++) {
3806                 struct rx_ring *rxr = &adapter->rx_rings[i];
3807                 struct tx_ring *txr = &adapter->tx_rings[i];
3808                 /* First the RX queue entry */
3809                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3810                 /* ... and the TX */
3811                 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3812                 /* Set an Initial EITR value */
3813                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3814         }
3815
3816         /* For the Link interrupt */
3817         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3818 } /* ixgbe_configure_ivars */
3819
3820 /************************************************************************
3821  * ixgbe_sfp_probe
3822  *
3823  *   Determine if a port had optics inserted.
3824  ************************************************************************/
3825 static bool
3826 ixgbe_sfp_probe(struct adapter *adapter)
3827 {
3828         struct ixgbe_hw *hw = &adapter->hw;
3829         device_t        dev = adapter->dev;
3830         bool            result = FALSE;
3831
3832         if ((hw->phy.type == ixgbe_phy_nl) &&
3833             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3834                 s32 ret = hw->phy.ops.identify_sfp(hw);
3835                 if (ret)
3836                         goto out;
3837                 ret = hw->phy.ops.reset(hw);
3838                 adapter->sfp_probe = FALSE;
3839                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3840                         device_printf(dev, "Unsupported SFP+ module detected!");
3841                         device_printf(dev,
3842                             "Reload driver with supported module.\n");
3843                         goto out;
3844                 } else
3845                         device_printf(dev, "SFP+ module detected!\n");
3846                 /* We now have supported optics */
3847                 result = TRUE;
3848         }
3849 out:
3850
3851         return (result);
3852 } /* ixgbe_sfp_probe */
3853
3854 /************************************************************************
3855  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
3856  *
3857  *   Done outside of interrupt context since the driver might sleep
3858  ************************************************************************/
3859 static void
3860 ixgbe_handle_link(void *context, int pending)
3861 {
3862         struct adapter  *adapter = context;
3863         struct ixgbe_hw *hw = &adapter->hw;
3864
3865         ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
3866         ixgbe_update_link_status(adapter);
3867
3868         /* Re-enable link interrupts */
3869         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3870 } /* ixgbe_handle_link */
3871
3872 /************************************************************************
3873  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3874  ************************************************************************/
3875 static void
3876 ixgbe_handle_mod(void *context, int pending)
3877 {
3878         struct adapter  *adapter = context;
3879         struct ixgbe_hw *hw = &adapter->hw;
3880         device_t        dev = adapter->dev;
3881         u32             err, cage_full = 0;
3882
3883         if (adapter->hw.need_crosstalk_fix) {
3884                 switch (hw->mac.type) {
3885                 case ixgbe_mac_82599EB:
3886                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3887                             IXGBE_ESDP_SDP2;
3888                         break;
3889                 case ixgbe_mac_X550EM_x:
3890                 case ixgbe_mac_X550EM_a:
3891                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3892                             IXGBE_ESDP_SDP0;
3893                         break;
3894                 default:
3895                         break;
3896                 }
3897
3898                 if (!cage_full)
3899                         return;
3900         }
3901
3902         err = hw->phy.ops.identify_sfp(hw);
3903         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3904                 device_printf(dev,
3905                     "Unsupported SFP+ module type was detected.\n");
3906                 return;
3907         }
3908
3909         err = hw->mac.ops.setup_sfp(hw);
3910         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3911                 device_printf(dev,
3912                     "Setup failure - unsupported SFP+ module type.\n");
3913                 return;
3914         }
3915         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3916
3917         return;
3918 } /* ixgbe_handle_mod */
3919
3920
3921 /************************************************************************
3922  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3923  ************************************************************************/
3924 static void
3925 ixgbe_handle_msf(void *context, int pending)
3926 {
3927         struct adapter  *adapter = context;
3928         struct ixgbe_hw *hw = &adapter->hw;
3929         u32             autoneg;
3930         bool            negotiate;
3931
3932         IXGBE_CORE_LOCK(adapter);
3933         /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3934         adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3935
3936         autoneg = hw->phy.autoneg_advertised;
3937         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3938                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3939         if (hw->mac.ops.setup_link)
3940                 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3941
3942         /* Adjust media types shown in ifconfig */
3943         ifmedia_removeall(&adapter->media);
3944         ixgbe_add_media_types(adapter);
3945         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3946         IXGBE_CORE_UNLOCK(adapter);
3947         return;
3948 } /* ixgbe_handle_msf */
3949
3950 /************************************************************************
3951  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3952  ************************************************************************/
3953 static void
3954 ixgbe_handle_phy(void *context, int pending)
3955 {
3956         struct adapter  *adapter = context;
3957         struct ixgbe_hw *hw = &adapter->hw;
3958         int             error;
3959
3960         error = hw->phy.ops.handle_lasi(hw);
3961         if (error == IXGBE_ERR_OVERTEMP)
3962                 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3963         else if (error)
3964                 device_printf(adapter->dev,
3965                     "Error handling LASI interrupt: %d\n", error);
3966
3967         return;
3968 } /* ixgbe_handle_phy */
3969
3970 /************************************************************************
3971  * ixgbe_config_dmac - Configure DMA Coalescing
3972  ************************************************************************/
3973 static void
3974 ixgbe_config_dmac(struct adapter *adapter)
3975 {
3976         struct ixgbe_hw          *hw = &adapter->hw;
3977         struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3978
3979         if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3980                 return;
3981
3982         if (dcfg->watchdog_timer ^ adapter->dmac ||
3983             dcfg->link_speed ^ adapter->link_speed) {
3984                 dcfg->watchdog_timer = adapter->dmac;
3985                 dcfg->fcoe_en = false;
3986                 dcfg->link_speed = adapter->link_speed;
3987                 dcfg->num_tcs = 1;
3988
3989                 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3990                     dcfg->watchdog_timer, dcfg->link_speed);
3991
3992                 hw->mac.ops.dmac_config(hw);
3993         }
3994 } /* ixgbe_config_dmac */
3995
3996 /************************************************************************
3997  * ixgbe_check_wol_support
3998  *
3999  *   Checks whether the adapter's ports are capable of
4000  *   Wake On LAN by reading the adapter's NVM.
4001  *
4002  *   Sets each port's hw->wol_enabled value depending
4003  *   on the value read here.
4004  ************************************************************************/
4005 static void
4006 ixgbe_check_wol_support(struct adapter *adapter)
4007 {
4008         struct ixgbe_hw *hw = &adapter->hw;
4009         u16             dev_caps = 0;
4010
4011         /* Find out WoL support for port */
4012         adapter->wol_support = hw->wol_enabled = 0;
4013         ixgbe_get_device_caps(hw, &dev_caps);
4014         if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
4015             ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
4016              hw->bus.func == 0))
4017                 adapter->wol_support = hw->wol_enabled = 1;
4018
4019         /* Save initial wake up filter configuration */
4020         adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
4021
4022         return;
4023 } /* ixgbe_check_wol_support */
4024
4025 /************************************************************************
4026  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
4027  *
4028  *   Prepare the adapter/port for LPLU and/or WoL
4029  ************************************************************************/
4030 static int
4031 ixgbe_setup_low_power_mode(struct adapter *adapter)
4032 {
4033         struct ixgbe_hw *hw = &adapter->hw;
4034         device_t        dev = adapter->dev;
4035         s32             error = 0;
4036
4037         mtx_assert(&adapter->core_mtx, MA_OWNED);
4038
4039         /* Limit power management flow to X550EM baseT */
4040         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
4041             hw->phy.ops.enter_lplu) {
4042                 /* Turn off support for APM wakeup. (Using ACPI instead) */
4043                 IXGBE_WRITE_REG(hw, IXGBE_GRC,
4044                     IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
4045
4046                 /*
4047                  * Clear Wake Up Status register to prevent any previous wakeup
4048                  * events from waking us up immediately after we suspend.
4049                  */
4050                 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4051
4052                 /*
4053                  * Program the Wakeup Filter Control register with user filter
4054                  * settings
4055                  */
4056                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
4057
4058                 /* Enable wakeups and power management in Wakeup Control */
4059                 IXGBE_WRITE_REG(hw, IXGBE_WUC,
4060                     IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
4061
4062                 /* X550EM baseT adapters need a special LPLU flow */
4063                 hw->phy.reset_disable = true;
4064                 ixgbe_stop(adapter);
4065                 error = hw->phy.ops.enter_lplu(hw);
4066                 if (error)
4067                         device_printf(dev, "Error entering LPLU: %d\n", error);
4068                 hw->phy.reset_disable = false;
4069         } else {
4070                 /* Just stop for other adapters */
4071                 ixgbe_stop(adapter);
4072         }
4073
4074         return error;
4075 } /* ixgbe_setup_low_power_mode */
4076
4077 /************************************************************************
4078  * ixgbe_update_stats_counters - Update board statistics counters.
4079  ************************************************************************/
4080 static void
4081 ixgbe_update_stats_counters(struct adapter *adapter)
4082 {
4083         struct ixgbe_hw       *hw = &adapter->hw;
4084         struct ixgbe_hw_stats *stats = &adapter->stats_pf;
4085         u32                   missed_rx = 0, bprc, lxon, lxoff, total;
4086         u64                   total_missed_rx = 0;
4087
4088         stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4089         stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4090         stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4091         stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4092         stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
4093
4094         for (int i = 0; i < 16; i++) {
4095                 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4096                 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4097                 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4098         }
4099         stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4100         stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4101         stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4102
4103         /* Hardware workaround, gprc counts missed packets */
4104         stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4105         stats->gprc -= missed_rx;
4106
4107         if (hw->mac.type != ixgbe_mac_82598EB) {
4108                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4109                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4110                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4111                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4112                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4113                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4114                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4115                 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4116         } else {
4117                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4118                 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4119                 /* 82598 only has a counter in the high register */
4120                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4121                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4122                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4123         }
4124
4125         /*
4126          * Workaround: mprc hardware is incorrectly counting
4127          * broadcasts, so for now we subtract those.
4128          */
4129         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4130         stats->bprc += bprc;
4131         stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4132         if (hw->mac.type == ixgbe_mac_82598EB)
4133                 stats->mprc -= bprc;
4134
4135         stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4136         stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4137         stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4138         stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4139         stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4140         stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4141
4142         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4143         stats->lxontxc += lxon;
4144         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4145         stats->lxofftxc += lxoff;
4146         total = lxon + lxoff;
4147
4148         stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4149         stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4150         stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4151         stats->gptc -= total;
4152         stats->mptc -= total;
4153         stats->ptc64 -= total;
4154         stats->gotc -= total * ETHER_MIN_LEN;
4155
4156         stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4157         stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4158         stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4159         stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4160         stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4161         stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4162         stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4163         stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4164         stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4165         stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4166         stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4167         stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4168         stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4169         stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4170         stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4171         stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4172         stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4173         stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4174         /* Only read FCOE on 82599 */
4175         if (hw->mac.type != ixgbe_mac_82598EB) {
4176                 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4177                 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4178                 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4179                 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4180                 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4181         }
4182
4183         /* Fill out the OS statistics structure */
4184         IXGBE_SET_IPACKETS(adapter, stats->gprc);
4185         IXGBE_SET_OPACKETS(adapter, stats->gptc);
4186         IXGBE_SET_IBYTES(adapter, stats->gorc);
4187         IXGBE_SET_OBYTES(adapter, stats->gotc);
4188         IXGBE_SET_IMCASTS(adapter, stats->mprc);
4189         IXGBE_SET_OMCASTS(adapter, stats->mptc);
4190         IXGBE_SET_COLLISIONS(adapter, 0);
4191         IXGBE_SET_IQDROPS(adapter, total_missed_rx);
4192         IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
4193 } /* ixgbe_update_stats_counters */
4194
4195 #if __FreeBSD_version >= 1100036
4196 /************************************************************************
4197  * ixgbe_get_counter
4198  ************************************************************************/
4199 static uint64_t
4200 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
4201 {
4202         struct adapter *adapter;
4203         struct tx_ring *txr;
4204         uint64_t       rv;
4205
4206         adapter = if_getsoftc(ifp);
4207
4208         switch (cnt) {
4209         case IFCOUNTER_IPACKETS:
4210                 return (adapter->ipackets);
4211         case IFCOUNTER_OPACKETS:
4212                 return (adapter->opackets);
4213         case IFCOUNTER_IBYTES:
4214                 return (adapter->ibytes);
4215         case IFCOUNTER_OBYTES:
4216                 return (adapter->obytes);
4217         case IFCOUNTER_IMCASTS:
4218                 return (adapter->imcasts);
4219         case IFCOUNTER_OMCASTS:
4220                 return (adapter->omcasts);
4221         case IFCOUNTER_COLLISIONS:
4222                 return (0);
4223         case IFCOUNTER_IQDROPS:
4224                 return (adapter->iqdrops);
4225         case IFCOUNTER_OQDROPS:
4226                 rv = 0;
4227                 txr = adapter->tx_rings;
4228                 for (int i = 0; i < adapter->num_queues; i++, txr++)
4229                         rv += txr->br->br_drops;
4230                 return (rv);
4231         case IFCOUNTER_IERRORS:
4232                 return (adapter->ierrors);
4233         default:
4234                 return (if_get_counter_default(ifp, cnt));
4235         }
4236 } /* ixgbe_get_counter */
4237 #endif
4238
4239 /************************************************************************
4240  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
4241  *
4242  *   Retrieves the TDH value from the hardware
4243  ************************************************************************/
4244 static int
4245 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4246 {
4247         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4248         int            error;
4249         unsigned int   val;
4250
4251         if (!txr)
4252                 return 0;
4253
4254         val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4255         error = sysctl_handle_int(oidp, &val, 0, req);
4256         if (error || !req->newptr)
4257                 return error;
4258
4259         return 0;
4260 } /* ixgbe_sysctl_tdh_handler */
4261
4262 /************************************************************************
4263  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
4264  *
4265  *   Retrieves the TDT value from the hardware
4266  ************************************************************************/
4267 static int
4268 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4269 {
4270         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4271         int            error;
4272         unsigned int   val;
4273
4274         if (!txr)
4275                 return 0;
4276
4277         val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4278         error = sysctl_handle_int(oidp, &val, 0, req);
4279         if (error || !req->newptr)
4280                 return error;
4281
4282         return 0;
4283 } /* ixgbe_sysctl_tdt_handler */
4284
4285 /************************************************************************
4286  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
4287  *
4288  *   Retrieves the RDH value from the hardware
4289  ************************************************************************/
4290 static int
4291 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4292 {
4293         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4294         int            error;
4295         unsigned int   val;
4296
4297         if (!rxr)
4298                 return 0;
4299
4300         val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4301         error = sysctl_handle_int(oidp, &val, 0, req);
4302         if (error || !req->newptr)
4303                 return error;
4304
4305         return 0;
4306 } /* ixgbe_sysctl_rdh_handler */
4307
4308 /************************************************************************
4309  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
4310  *
4311  *   Retrieves the RDT value from the hardware
4312  ************************************************************************/
4313 static int
4314 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4315 {
4316         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4317         int            error;
4318         unsigned int   val;
4319
4320         if (!rxr)
4321                 return 0;
4322
4323         val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4324         error = sysctl_handle_int(oidp, &val, 0, req);
4325         if (error || !req->newptr)
4326                 return error;
4327
4328         return 0;
4329 } /* ixgbe_sysctl_rdt_handler */
4330
4331 /************************************************************************
4332  * ixgbe_sysctl_interrupt_rate_handler
4333  ************************************************************************/
4334 static int
4335 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4336 {
4337         struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4338         int             error;
4339         unsigned int    reg, usec, rate;
4340
4341         reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4342         usec = ((reg & 0x0FF8) >> 3);
4343         if (usec > 0)
4344                 rate = 500000 / usec;
4345         else
4346                 rate = 0;
4347         error = sysctl_handle_int(oidp, &rate, 0, req);
4348         if (error || !req->newptr)
4349                 return error;
4350         reg &= ~0xfff; /* default, no limitation */
4351         ixgbe_max_interrupt_rate = 0;
4352         if (rate > 0 && rate < 500000) {
4353                 if (rate < 1000)
4354                         rate = 1000;
4355                 ixgbe_max_interrupt_rate = rate;
4356                 reg |= ((4000000/rate) & 0xff8);
4357         }
4358         IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4359
4360         return 0;
4361 } /* ixgbe_sysctl_interrupt_rate_handler */
4362
4363 /************************************************************************
4364  * ixgbe_add_device_sysctls
4365  ************************************************************************/
4366 static void
4367 ixgbe_add_device_sysctls(struct adapter *adapter)
4368 {
4369         device_t               dev = adapter->dev;
4370         struct ixgbe_hw        *hw = &adapter->hw;
4371         struct sysctl_oid_list *child;
4372         struct sysctl_ctx_list *ctx;
4373
4374         ctx = device_get_sysctl_ctx(dev);
4375         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4376
4377         /* Sysctls for all devices */
4378         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4379             adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4380
4381         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
4382             &ixgbe_enable_aim, 1, "Interrupt Moderation");
4383
4384         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4385             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
4386             IXGBE_SYSCTL_DESC_ADV_SPEED);
4387
4388 #ifdef IXGBE_DEBUG
4389         /* testing sysctls (for all devices) */
4390         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
4391             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
4392             "I", "PCI Power State");
4393
4394         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
4395             CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
4396             ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
4397 #endif
4398         /* for X550 series devices */
4399         if (hw->mac.type >= ixgbe_mac_X550)
4400                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4401                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
4402                     "I", "DMA Coalesce");
4403
4404         /* for WoL-capable devices */
4405         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4406                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4407                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4408                     ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
4409
4410                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4411                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
4412                     "I", "Enable/Disable Wake Up Filters");
4413         }
4414
4415         /* for X552/X557-AT devices */
4416         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4417                 struct sysctl_oid *phy_node;
4418                 struct sysctl_oid_list *phy_list;
4419
4420                 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4421                     CTLFLAG_RD, NULL, "External PHY sysctls");
4422                 phy_list = SYSCTL_CHILDREN(phy_node);
4423
4424                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4425                     CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
4426                     "I", "Current External PHY Temperature (Celsius)");
4427
4428                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4429                     CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4430                     ixgbe_sysctl_phy_overtemp_occurred, "I",
4431                     "External PHY High Temperature Event Occurred");
4432         }
4433
4434         if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
4435                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
4436                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4437                     ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
4438         }
4439 } /* ixgbe_add_device_sysctls */
4440
4441 /************************************************************************
4442  * ixgbe_add_hw_stats
4443  *
4444  *   Add sysctl variables, one per statistic, to the system.
4445  ************************************************************************/
4446 static void
4447 ixgbe_add_hw_stats(struct adapter *adapter)
4448 {
4449         device_t               dev = adapter->dev;
4450         struct tx_ring         *txr = adapter->tx_rings;
4451         struct rx_ring         *rxr = adapter->rx_rings;
4452         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4453         struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
4454         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4455         struct ixgbe_hw_stats  *stats = &adapter->stats_pf;
4456         struct sysctl_oid      *stat_node, *queue_node;
4457         struct sysctl_oid_list *stat_list, *queue_list;
4458
4459 #define QUEUE_NAME_LEN 32
4460         char                   namebuf[QUEUE_NAME_LEN];
4461
4462         /* Driver Statistics */
4463         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4464             CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
4465         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4466             CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
4467         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4468             CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
4469         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4470             CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
4471
4472         for (int i = 0; i < adapter->num_queues; i++, txr++) {
4473                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4474                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4475                     CTLFLAG_RD, NULL, "Queue Name");
4476                 queue_list = SYSCTL_CHILDREN(queue_node);
4477
4478                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4479                     CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4480                     sizeof(&adapter->queues[i]),
4481                     ixgbe_sysctl_interrupt_rate_handler, "IU",
4482                     "Interrupt Rate");
4483                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4484                     CTLFLAG_RD, &(adapter->queues[i].irqs),
4485                     "irqs on this queue");
4486                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4487                     CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4488                     ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
4489                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4490                     CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4491                     ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
4492                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
4493                     CTLFLAG_RD, &txr->tso_tx, "TSO");
4494                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4495                     CTLFLAG_RD, &txr->no_tx_dma_setup,
4496                     "Driver tx dma failure in xmit");
4497                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4498                     CTLFLAG_RD, &txr->no_desc_avail,
4499                     "Queue No Descriptor Available");
4500                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4501                     CTLFLAG_RD, &txr->total_packets,
4502                     "Queue Packets Transmitted");
4503                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4504                     CTLFLAG_RD, &txr->br->br_drops,
4505                     "Packets dropped in buf_ring");
4506         }
4507
4508         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4509                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4510                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4511                     CTLFLAG_RD, NULL, "Queue Name");
4512                 queue_list = SYSCTL_CHILDREN(queue_node);
4513
4514                 struct lro_ctrl *lro = &rxr->lro;
4515
4516                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4517                     CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4518                     ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
4519                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4520                     CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4521                     ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
4522                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4523                     CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
4524                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4525                     CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
4526                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4527                     CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
4528                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
4529                     CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
4530 #if __FreeBSD_version < 1100000
4531                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4532                     CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
4533                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4534                     CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
4535 #else
4536                 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
4537                     CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
4538                 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
4539                     CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
4540 #endif
4541         }
4542
4543         /* MAC stats get their own sub node */
4544
4545         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4546             CTLFLAG_RD, NULL, "MAC Statistics");
4547         stat_list = SYSCTL_CHILDREN(stat_node);
4548
4549         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4550             CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
4551         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4552             CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
4553         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4554             CTLFLAG_RD, &stats->errbc, "Byte Errors");
4555         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4556             CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
4557         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4558             CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
4559         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4560             CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
4561         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4562             CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
4563         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
4564             CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
4565
4566         /* Flow Control stats */
4567         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4568             CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
4569         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4570             CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
4571         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4572             CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
4573         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4574             CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
4575
4576         /* Packet Reception Stats */
4577         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4578             CTLFLAG_RD, &stats->tor, "Total Octets Received");
4579         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4580             CTLFLAG_RD, &stats->gorc, "Good Octets Received");
4581         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4582             CTLFLAG_RD, &stats->tpr, "Total Packets Received");
4583         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4584             CTLFLAG_RD, &stats->gprc, "Good Packets Received");
4585         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4586             CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
4587         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4588             CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
4589         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4590             CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
4591         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4592             CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
4593         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4594             CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
4595         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4596             CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
4597         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4598             CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
4599         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4600             CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
4601         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4602             CTLFLAG_RD, &stats->ruc, "Receive Undersized");
4603         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4604             CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
4605         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4606             CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
4607         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4608             CTLFLAG_RD, &stats->rjc, "Received Jabber");
4609         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4610             CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
4611         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4612             CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
4613         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4614             CTLFLAG_RD, &stats->xec, "Checksum Errors");
4615
4616         /* Packet Transmission Stats */
4617         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4618             CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
4619         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4620             CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
4621         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4622             CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
4623         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4624             CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
4625         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4626             CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
4627         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4628             CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
4629         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4630             CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
4631         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4632             CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
4633         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4634             CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
4635         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4636             CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
4637         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4638             CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
4639         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4640             CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
4641 } /* ixgbe_add_hw_stats */
4642
4643 /************************************************************************
4644  * ixgbe_set_sysctl_value
4645  ************************************************************************/
4646 static void
4647 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4648     const char *description, int *limit, int value)
4649 {
4650         *limit = value;
4651         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4652             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4653             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4654 } /* ixgbe_set_sysctl_value */
4655
4656 /************************************************************************
4657  * ixgbe_sysctl_flowcntl
4658  *
4659  *   SYSCTL wrapper around setting Flow Control
4660  ************************************************************************/
4661 static int
4662 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4663 {
4664         struct adapter *adapter;
4665         int            error, fc;
4666
4667         adapter = (struct adapter *)arg1;
4668         fc = adapter->hw.fc.current_mode;
4669
4670         error = sysctl_handle_int(oidp, &fc, 0, req);
4671         if ((error) || (req->newptr == NULL))
4672                 return (error);
4673
4674         /* Don't bother if it's not changed */
4675         if (fc == adapter->hw.fc.current_mode)
4676                 return (0);
4677
4678         return ixgbe_set_flowcntl(adapter, fc);
4679 } /* ixgbe_sysctl_flowcntl */
4680
4681 /************************************************************************
4682  * ixgbe_set_flowcntl - Set flow control
4683  *
4684  *   Flow control values:
4685  *     0 - off
4686  *     1 - rx pause
4687  *     2 - tx pause
4688  *     3 - full
4689  ************************************************************************/
4690 static int
4691 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4692 {
4693         switch (fc) {
4694         case ixgbe_fc_rx_pause:
4695         case ixgbe_fc_tx_pause:
4696         case ixgbe_fc_full:
4697                 adapter->hw.fc.requested_mode = fc;
4698                 if (adapter->num_queues > 1)
4699                         ixgbe_disable_rx_drop(adapter);
4700                 break;
4701         case ixgbe_fc_none:
4702                 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4703                 if (adapter->num_queues > 1)
4704                         ixgbe_enable_rx_drop(adapter);
4705                 break;
4706         default:
4707                 return (EINVAL);
4708         }
4709
4710         /* Don't autoneg if forcing a value */
4711         adapter->hw.fc.disable_fc_autoneg = TRUE;
4712         ixgbe_fc_enable(&adapter->hw);
4713
4714         return (0);
4715 } /* ixgbe_set_flowcntl */
4716
4717 /************************************************************************
4718  * ixgbe_sysctl_advertise
4719  *
4720  *   SYSCTL wrapper around setting advertised speed
4721  ************************************************************************/
4722 static int
4723 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4724 {
4725         struct adapter *adapter;
4726         int            error, advertise;
4727
4728         adapter = (struct adapter *)arg1;
4729         advertise = adapter->advertise;
4730
4731         error = sysctl_handle_int(oidp, &advertise, 0, req);
4732         if ((error) || (req->newptr == NULL))
4733                 return (error);
4734
4735         return ixgbe_set_advertise(adapter, advertise);
4736 } /* ixgbe_sysctl_advertise */
4737
4738 /************************************************************************
4739  * ixgbe_set_advertise - Control advertised link speed
4740  *
4741  *   Flags:
4742  *     0x1 - advertise 100 Mb
4743  *     0x2 - advertise 1G
4744  *     0x4 - advertise 10G
4745  *     0x8 - advertise 10 Mb (yes, Mb)
4746  ************************************************************************/
4747 static int
4748 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4749 {
4750         device_t         dev;
4751         struct ixgbe_hw  *hw;
4752         ixgbe_link_speed speed = 0;
4753         ixgbe_link_speed link_caps = 0;
4754         s32              err = IXGBE_NOT_IMPLEMENTED;
4755         bool             negotiate = FALSE;
4756
4757         /* Checks to validate new value */
4758         if (adapter->advertise == advertise) /* no change */
4759                 return (0);
4760
4761         dev = adapter->dev;
4762         hw = &adapter->hw;
4763
4764         /* No speed changes for backplane media */
4765         if (hw->phy.media_type == ixgbe_media_type_backplane)
4766                 return (ENODEV);
4767
4768         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4769               (hw->phy.multispeed_fiber))) {
4770                 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4771                 return (EINVAL);
4772         }
4773
4774         if (advertise < 0x1 || advertise > 0xF) {
4775                 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4776                 return (EINVAL);
4777         }
4778
4779         if (hw->mac.ops.get_link_capabilities) {
4780                 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4781                     &negotiate);
4782                 if (err != IXGBE_SUCCESS) {
4783                         device_printf(dev, "Unable to determine supported advertise speeds\n");
4784                         return (ENODEV);
4785                 }
4786         }
4787
4788         /* Set new value and report new advertised mode */
4789         if (advertise & 0x1) {
4790                 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4791                         device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4792                         return (EINVAL);
4793                 }
4794                 speed |= IXGBE_LINK_SPEED_100_FULL;
4795         }
4796         if (advertise & 0x2) {
4797                 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4798                         device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4799                         return (EINVAL);
4800                 }
4801                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4802         }
4803         if (advertise & 0x4) {
4804                 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4805                         device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4806                         return (EINVAL);
4807                 }
4808                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4809         }
4810         if (advertise & 0x8) {
4811                 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4812                         device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4813                         return (EINVAL);
4814                 }
4815                 speed |= IXGBE_LINK_SPEED_10_FULL;
4816         }
4817
4818         hw->mac.autotry_restart = TRUE;
4819         hw->mac.ops.setup_link(hw, speed, TRUE);
4820         adapter->advertise = advertise;
4821
4822         return (0);
4823 } /* ixgbe_set_advertise */
4824
4825 /************************************************************************
4826  * ixgbe_get_advertise - Get current advertised speed settings
4827  *
4828  *   Formatted for sysctl usage.
4829  *   Flags:
4830  *     0x1 - advertise 100 Mb
4831  *     0x2 - advertise 1G
4832  *     0x4 - advertise 10G
4833  *     0x8 - advertise 10 Mb (yes, Mb)
4834  ************************************************************************/
4835 static int
4836 ixgbe_get_advertise(struct adapter *adapter)
4837 {
4838         struct ixgbe_hw  *hw = &adapter->hw;
4839         int              speed;
4840         ixgbe_link_speed link_caps = 0;
4841         s32              err;
4842         bool             negotiate = FALSE;
4843
4844         /*
4845          * Advertised speed means nothing unless it's copper or
4846          * multi-speed fiber
4847          */
4848         if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4849             !(hw->phy.multispeed_fiber))
4850                 return 0;
4851
4852         err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4853         if (err != IXGBE_SUCCESS)
4854                 return 0;
4855
4856         speed =
4857             ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4858             ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4859             ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4860             ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4861
4862         return speed;
4863 } /* ixgbe_get_advertise */
4864
4865 /************************************************************************
4866  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4867  *
4868  *   For X552/X557-AT devices using an external PHY
4869  ************************************************************************/
4870 static int
4871 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4872 {
4873         struct adapter  *adapter = (struct adapter *)arg1;
4874         struct ixgbe_hw *hw = &adapter->hw;
4875         u16             reg;
4876
4877         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4878                 device_printf(adapter->dev,
4879                     "Device has no supported external thermal sensor.\n");
4880                 return (ENODEV);
4881         }
4882
4883         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4884             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4885                 device_printf(adapter->dev,
4886                     "Error reading from PHY's current temperature register\n");
4887                 return (EAGAIN);
4888         }
4889
4890         /* Shift temp for output */
4891         reg = reg >> 8;
4892
4893         return (sysctl_handle_int(oidp, NULL, reg, req));
4894 } /* ixgbe_sysctl_phy_temp */
4895
4896 /************************************************************************
4897  * ixgbe_sysctl_phy_overtemp_occurred
4898  *
4899  *   Reports (directly from the PHY) whether the current PHY
4900  *   temperature is over the overtemp threshold.
4901  ************************************************************************/
4902 static int
4903 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4904 {
4905         struct adapter  *adapter = (struct adapter *)arg1;
4906         struct ixgbe_hw *hw = &adapter->hw;
4907         u16             reg;
4908
4909         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4910                 device_printf(adapter->dev,
4911                     "Device has no supported external thermal sensor.\n");
4912                 return (ENODEV);
4913         }
4914
4915         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4916             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4917                 device_printf(adapter->dev,
4918                     "Error reading from PHY's temperature status register\n");
4919                 return (EAGAIN);
4920         }
4921
4922         /* Get occurrence bit */
4923         reg = !!(reg & 0x4000);
4924
4925         return (sysctl_handle_int(oidp, 0, reg, req));
4926 } /* ixgbe_sysctl_phy_overtemp_occurred */
4927
4928 /************************************************************************
4929  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4930  *
4931  *   Control values:
4932  *     0/1 - off / on (use default value of 1000)
4933  *
4934  *     Legal timer values are:
4935  *     50,100,250,500,1000,2000,5000,10000
4936  *
4937  *     Turning off interrupt moderation will also turn this off.
4938  ************************************************************************/
4939 static int
4940 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4941 {
4942         struct adapter *adapter = (struct adapter *)arg1;
4943         struct ifnet   *ifp = adapter->ifp;
4944         int            error;
4945         u32            newval;
4946
4947         newval = adapter->dmac;
4948         error = sysctl_handle_int(oidp, &newval, 0, req);
4949         if ((error) || (req->newptr == NULL))
4950                 return (error);
4951
4952         switch (newval) {
4953         case 0:
4954                 /* Disabled */
4955                 adapter->dmac = 0;
4956                 break;
4957         case 1:
4958                 /* Enable and use default */
4959                 adapter->dmac = 1000;
4960                 break;
4961         case 50:
4962         case 100:
4963         case 250:
4964         case 500:
4965         case 1000:
4966         case 2000:
4967         case 5000:
4968         case 10000:
4969                 /* Legal values - allow */
4970                 adapter->dmac = newval;
4971                 break;
4972         default:
4973                 /* Do nothing, illegal value */
4974                 return (EINVAL);
4975         }
4976
4977         /* Re-initialize hardware if it's already running */
4978         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4979                 ixgbe_init(adapter);
4980
4981         return (0);
4982 } /* ixgbe_sysctl_dmac */
4983
4984 #ifdef IXGBE_DEBUG
4985 /************************************************************************
4986  * ixgbe_sysctl_power_state
4987  *
4988  *   Sysctl to test power states
4989  *   Values:
4990  *     0      - set device to D0
4991  *     3      - set device to D3
4992  *     (none) - get current device power state
4993  ************************************************************************/
4994 static int
4995 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4996 {
4997         struct adapter *adapter = (struct adapter *)arg1;
4998         device_t       dev = adapter->dev;
4999         int            curr_ps, new_ps, error = 0;
5000
5001         curr_ps = new_ps = pci_get_powerstate(dev);
5002
5003         error = sysctl_handle_int(oidp, &new_ps, 0, req);
5004         if ((error) || (req->newptr == NULL))
5005                 return (error);
5006
5007         if (new_ps == curr_ps)
5008                 return (0);
5009
5010         if (new_ps == 3 && curr_ps == 0)
5011                 error = DEVICE_SUSPEND(dev);
5012         else if (new_ps == 0 && curr_ps == 3)
5013                 error = DEVICE_RESUME(dev);
5014         else
5015                 return (EINVAL);
5016
5017         device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5018
5019         return (error);
5020 } /* ixgbe_sysctl_power_state */
5021 #endif
5022
5023 /************************************************************************
5024  * ixgbe_sysctl_eee_state
5025  *
5026  *   Sysctl to set EEE power saving feature
5027  *   Values:
5028  *     0      - disable EEE
5029  *     1      - enable EEE
5030  *     (none) - get current device EEE state
5031  ************************************************************************/
5032 static int
5033 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
5034 {
5035         struct adapter *adapter = (struct adapter *)arg1;
5036         device_t       dev = adapter->dev;
5037         int            curr_eee, new_eee, error = 0;
5038         s32            retval;
5039
5040         curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5041
5042         error = sysctl_handle_int(oidp, &new_eee, 0, req);
5043         if ((error) || (req->newptr == NULL))
5044                 return (error);
5045
5046         /* Nothing to do */
5047         if (new_eee == curr_eee)
5048                 return (0);
5049
5050         /* Not supported */
5051         if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5052                 return (EINVAL);
5053
5054         /* Bounds checking */
5055         if ((new_eee < 0) || (new_eee > 1))
5056                 return (EINVAL);
5057
5058         retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5059         if (retval) {
5060                 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5061                 return (EINVAL);
5062         }
5063
5064         /* Restart auto-neg */
5065         ixgbe_init(adapter);
5066
5067         device_printf(dev, "New EEE state: %d\n", new_eee);
5068
5069         /* Cache new value */
5070         if (new_eee)
5071                 adapter->feat_en |= IXGBE_FEATURE_EEE;
5072         else
5073                 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5074
5075         return (error);
5076 } /* ixgbe_sysctl_eee_state */
5077
5078 /************************************************************************
5079  * ixgbe_sysctl_wol_enable
5080  *
5081  *   Sysctl to enable/disable the WoL capability,
5082  *   if supported by the adapter.
5083  *
5084  *   Values:
5085  *     0 - disabled
5086  *     1 - enabled
5087  ************************************************************************/
5088 static int
5089 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
5090 {
5091         struct adapter  *adapter = (struct adapter *)arg1;
5092         struct ixgbe_hw *hw = &adapter->hw;
5093         int             new_wol_enabled;
5094         int             error = 0;
5095
5096         new_wol_enabled = hw->wol_enabled;
5097         error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
5098         if ((error) || (req->newptr == NULL))
5099                 return (error);
5100         new_wol_enabled = !!(new_wol_enabled);
5101         if (new_wol_enabled == hw->wol_enabled)
5102                 return (0);
5103
5104         if (new_wol_enabled > 0 && !adapter->wol_support)
5105                 return (ENODEV);
5106         else
5107                 hw->wol_enabled = new_wol_enabled;
5108
5109         return (0);
5110 } /* ixgbe_sysctl_wol_enable */
5111
5112 /************************************************************************
5113  * ixgbe_sysctl_wufc - Wake Up Filter Control
5114  *
5115  *   Sysctl to enable/disable the types of packets that the
5116  *   adapter will wake up on upon receipt.
5117  *   Flags:
5118  *     0x1  - Link Status Change
5119  *     0x2  - Magic Packet
5120  *     0x4  - Direct Exact
5121  *     0x8  - Directed Multicast
5122  *     0x10 - Broadcast
5123  *     0x20 - ARP/IPv4 Request Packet
5124  *     0x40 - Direct IPv4 Packet
5125  *     0x80 - Direct IPv6 Packet
5126  *
5127  *   Settings not listed above will cause the sysctl to return an error.
5128  ************************************************************************/
5129 static int
5130 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5131 {
5132         struct adapter *adapter = (struct adapter *)arg1;
5133         int            error = 0;
5134         u32            new_wufc;
5135
5136         new_wufc = adapter->wufc;
5137
5138         error = sysctl_handle_int(oidp, &new_wufc, 0, req);
5139         if ((error) || (req->newptr == NULL))
5140                 return (error);
5141         if (new_wufc == adapter->wufc)
5142                 return (0);
5143
5144         if (new_wufc & 0xffffff00)
5145                 return (EINVAL);
5146
5147         new_wufc &= 0xff;
5148         new_wufc |= (0xffffff & adapter->wufc);
5149         adapter->wufc = new_wufc;
5150
5151         return (0);
5152 } /* ixgbe_sysctl_wufc */
5153
5154 #ifdef IXGBE_DEBUG
5155 /************************************************************************
5156  * ixgbe_sysctl_print_rss_config
5157  ************************************************************************/
5158 static int
5159 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5160 {
5161         struct adapter  *adapter = (struct adapter *)arg1;
5162         struct ixgbe_hw *hw = &adapter->hw;
5163         device_t        dev = adapter->dev;
5164         struct sbuf     *buf;
5165         int             error = 0, reta_size;
5166         u32             reg;
5167
5168         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5169         if (!buf) {
5170                 device_printf(dev, "Could not allocate sbuf for output.\n");
5171                 return (ENOMEM);
5172         }
5173
5174         // TODO: use sbufs to make a string to print out
5175         /* Set multiplier for RETA setup and table size based on MAC */
5176         switch (adapter->hw.mac.type) {
5177         case ixgbe_mac_X550:
5178         case ixgbe_mac_X550EM_x:
5179         case ixgbe_mac_X550EM_a:
5180                 reta_size = 128;
5181                 break;
5182         default:
5183                 reta_size = 32;
5184                 break;
5185         }
5186
5187         /* Print out the redirection table */
5188         sbuf_cat(buf, "\n");
5189         for (int i = 0; i < reta_size; i++) {
5190                 if (i < 32) {
5191                         reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5192                         sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5193                 } else {
5194                         reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5195                         sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5196                 }
5197         }
5198
5199         // TODO: print more config
5200
5201         error = sbuf_finish(buf);
5202         if (error)
5203                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5204
5205         sbuf_delete(buf);
5206
5207         return (0);
5208 } /* ixgbe_sysctl_print_rss_config */
5209 #endif /* IXGBE_DEBUG */
5210
5211 /************************************************************************
5212  * ixgbe_enable_rx_drop
5213  *
5214  *   Enable the hardware to drop packets when the buffer is
5215  *   full. This is useful with multiqueue, so that no single
5216  *   queue being full stalls the entire RX engine. We only
5217  *   enable this when Multiqueue is enabled AND Flow Control
5218  *   is disabled.
5219  ************************************************************************/
5220 static void
5221 ixgbe_enable_rx_drop(struct adapter *adapter)
5222 {
5223         struct ixgbe_hw *hw = &adapter->hw;
5224         struct rx_ring  *rxr;
5225         u32             srrctl;
5226
5227         for (int i = 0; i < adapter->num_queues; i++) {
5228                 rxr = &adapter->rx_rings[i];
5229                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5230                 srrctl |= IXGBE_SRRCTL_DROP_EN;
5231                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5232         }
5233
5234         /* enable drop for each vf */
5235         for (int i = 0; i < adapter->num_vfs; i++) {
5236                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5237                     (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5238                     IXGBE_QDE_ENABLE));
5239         }
5240 } /* ixgbe_enable_rx_drop */
5241
5242 /************************************************************************
5243  * ixgbe_disable_rx_drop
5244  ************************************************************************/
5245 static void
5246 ixgbe_disable_rx_drop(struct adapter *adapter)
5247 {
5248         struct ixgbe_hw *hw = &adapter->hw;
5249         struct rx_ring  *rxr;
5250         u32             srrctl;
5251
5252         for (int i = 0; i < adapter->num_queues; i++) {
5253                 rxr = &adapter->rx_rings[i];
5254                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5255                 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5256                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5257         }
5258
5259         /* disable drop for each vf */
5260         for (int i = 0; i < adapter->num_vfs; i++) {
5261                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5262                     (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5263         }
5264 } /* ixgbe_disable_rx_drop */
5265
5266 /************************************************************************
5267  * ixgbe_rearm_queues
5268  ************************************************************************/
5269 static void
5270 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5271 {
5272         u32 mask;
5273
5274         switch (adapter->hw.mac.type) {
5275         case ixgbe_mac_82598EB:
5276                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5277                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5278                 break;
5279         case ixgbe_mac_82599EB:
5280         case ixgbe_mac_X540:
5281         case ixgbe_mac_X550:
5282         case ixgbe_mac_X550EM_x:
5283         case ixgbe_mac_X550EM_a:
5284                 mask = (queues & 0xFFFFFFFF);
5285                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5286                 mask = (queues >> 32);
5287                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5288                 break;
5289         default:
5290                 break;
5291         }
5292 } /* ixgbe_rearm_queues */
5293