]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/if_ix.c
Merge OpenSSL 1.0.2m.
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / if_ix.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41
42 #include "ixgbe.h"
43
44 /************************************************************************
45  * Driver version
46  ************************************************************************/
47 char ixgbe_driver_version[] = "3.2.12-k";
48
49
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixgbe_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
64         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
84         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
85         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
87         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
88         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
89         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
90         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
91         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
92         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
93         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
94         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
95         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
96         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
97         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
98         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
99         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
100         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
101         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
102         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
103         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
104         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
105         /* required last entry */
106         {0, 0, 0, 0, 0}
107 };
108
109 /************************************************************************
110  * Table of branding strings
111  ************************************************************************/
112 static char    *ixgbe_strings[] = {
113         "Intel(R) PRO/10GbE PCI-Express Network Driver"
114 };
115
116 /************************************************************************
117  * Function prototypes
118  ************************************************************************/
119 static int      ixgbe_probe(device_t);
120 static int      ixgbe_attach(device_t);
121 static int      ixgbe_detach(device_t);
122 static int      ixgbe_shutdown(device_t);
123 static int      ixgbe_suspend(device_t);
124 static int      ixgbe_resume(device_t);
125 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
126 static void     ixgbe_init(void *);
127 static void     ixgbe_init_locked(struct adapter *);
128 static void     ixgbe_stop(void *);
129 #if __FreeBSD_version >= 1100036
130 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
131 #endif
132 static void     ixgbe_init_device_features(struct adapter *);
133 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
134 static void     ixgbe_add_media_types(struct adapter *);
135 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
136 static int      ixgbe_media_change(struct ifnet *);
137 static int      ixgbe_allocate_pci_resources(struct adapter *);
138 static void     ixgbe_get_slot_info(struct adapter *);
139 static int      ixgbe_allocate_msix(struct adapter *);
140 static int      ixgbe_allocate_legacy(struct adapter *);
141 static int      ixgbe_configure_interrupts(struct adapter *);
142 static void     ixgbe_free_pci_resources(struct adapter *);
143 static void     ixgbe_local_timer(void *);
144 static int      ixgbe_setup_interface(device_t, struct adapter *);
145 static void     ixgbe_config_gpie(struct adapter *);
146 static void     ixgbe_config_dmac(struct adapter *);
147 static void     ixgbe_config_delay_values(struct adapter *);
148 static void     ixgbe_config_link(struct adapter *);
149 static void     ixgbe_check_wol_support(struct adapter *);
150 static int      ixgbe_setup_low_power_mode(struct adapter *);
151 static void     ixgbe_rearm_queues(struct adapter *, u64);
152
153 static void     ixgbe_initialize_transmit_units(struct adapter *);
154 static void     ixgbe_initialize_receive_units(struct adapter *);
155 static void     ixgbe_enable_rx_drop(struct adapter *);
156 static void     ixgbe_disable_rx_drop(struct adapter *);
157 static void     ixgbe_initialize_rss_mapping(struct adapter *);
158
159 static void     ixgbe_enable_intr(struct adapter *);
160 static void     ixgbe_disable_intr(struct adapter *);
161 static void     ixgbe_update_stats_counters(struct adapter *);
162 static void     ixgbe_set_promisc(struct adapter *);
163 static void     ixgbe_set_multi(struct adapter *);
164 static void     ixgbe_update_link_status(struct adapter *);
165 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
166 static void     ixgbe_configure_ivars(struct adapter *);
167 static u8       *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
168
169 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
170 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
171 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
172
173 static void     ixgbe_add_device_sysctls(struct adapter *);
174 static void     ixgbe_add_hw_stats(struct adapter *);
175 static int      ixgbe_set_flowcntl(struct adapter *, int);
176 static int      ixgbe_set_advertise(struct adapter *, int);
177 static int      ixgbe_get_advertise(struct adapter *);
178
179 /* Sysctl handlers */
180 static void     ixgbe_set_sysctl_value(struct adapter *, const char *,
181                                        const char *, int *, int);
182 static int      ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
183 static int      ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
184 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
185 static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
186 static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
187 static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
188 #ifdef IXGBE_DEBUG
189 static int      ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
190 static int      ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
191 #endif
192 static int      ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
193 static int      ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
194 static int      ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
195 static int      ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
196 static int      ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
197 static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
198 static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
199
200 /* Support for pluggable optic modules */
201 static bool     ixgbe_sfp_probe(struct adapter *);
202
203 /* Legacy (single vector) interrupt handler */
204 static void     ixgbe_legacy_irq(void *);
205
206 /* The MSI/MSI-X Interrupt handlers */
207 static void     ixgbe_msix_que(void *);
208 static void     ixgbe_msix_link(void *);
209
210 /* Deferred interrupt tasklets */
211 static void     ixgbe_handle_que(void *, int);
212 static void     ixgbe_handle_link(void *, int);
213 static void     ixgbe_handle_msf(void *, int);
214 static void     ixgbe_handle_mod(void *, int);
215 static void     ixgbe_handle_phy(void *, int);
216
217
218 /************************************************************************
219  *  FreeBSD Device Interface Entry Points
220  ************************************************************************/
221 static device_method_t ix_methods[] = {
222         /* Device interface */
223         DEVMETHOD(device_probe, ixgbe_probe),
224         DEVMETHOD(device_attach, ixgbe_attach),
225         DEVMETHOD(device_detach, ixgbe_detach),
226         DEVMETHOD(device_shutdown, ixgbe_shutdown),
227         DEVMETHOD(device_suspend, ixgbe_suspend),
228         DEVMETHOD(device_resume, ixgbe_resume),
229 #ifdef PCI_IOV
230         DEVMETHOD(pci_iov_init, ixgbe_init_iov),
231         DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
232         DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
233 #endif /* PCI_IOV */
234         DEVMETHOD_END
235 };
236
237 static driver_t ix_driver = {
238         "ix", ix_methods, sizeof(struct adapter),
239 };
240
241 devclass_t ix_devclass;
242 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
243
244 MODULE_DEPEND(ix, pci, 1, 1, 1);
245 MODULE_DEPEND(ix, ether, 1, 1, 1);
246 #ifdef DEV_NETMAP
247 MODULE_DEPEND(ix, netmap, 1, 1, 1);
248 #endif
249
250 /*
251  * TUNEABLE PARAMETERS:
252  */
253
254 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
255
256 /*
257  * AIM: Adaptive Interrupt Moderation
258  * which means that the interrupt rate
259  * is varied over time based on the
260  * traffic for that interrupt vector
261  */
262 static int ixgbe_enable_aim = TRUE;
263 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
264     "Enable adaptive interrupt moderation");
265
266 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
267 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
268     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
269
270 /* How many packets rxeof tries to clean at a time */
271 static int ixgbe_rx_process_limit = 256;
272 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
273     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
274
275 /* How many packets txeof tries to clean at a time */
276 static int ixgbe_tx_process_limit = 256;
277 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
278     &ixgbe_tx_process_limit, 0,
279     "Maximum number of sent packets to process at a time, -1 means unlimited");
280
281 /* Flow control setting, default to full */
282 static int ixgbe_flow_control = ixgbe_fc_full;
283 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
284     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
285
286 /* Advertise Speed, default to 0 (auto) */
287 static int ixgbe_advertise_speed = 0;
288 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
289     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
290
291 /*
292  * Smart speed setting, default to on
293  * this only works as a compile option
294  * right now as its during attach, set
295  * this to 'ixgbe_smart_speed_off' to
296  * disable.
297  */
298 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
299
300 /*
301  * MSI-X should be the default for best performance,
302  * but this allows it to be forced off for testing.
303  */
304 static int ixgbe_enable_msix = 1;
305 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
306     "Enable MSI-X interrupts");
307
308 /*
309  * Number of Queues, can be set to 0,
310  * it then autoconfigures based on the
311  * number of cpus with a max of 8. This
312  * can be overriden manually here.
313  */
314 static int ixgbe_num_queues = 0;
315 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
316     "Number of queues to configure, 0 indicates autoconfigure");
317
318 /*
319  * Number of TX descriptors per ring,
320  * setting higher than RX as this seems
321  * the better performing choice.
322  */
323 static int ixgbe_txd = PERFORM_TXD;
324 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
325     "Number of transmit descriptors per queue");
326
327 /* Number of RX descriptors per ring */
328 static int ixgbe_rxd = PERFORM_RXD;
329 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
330     "Number of receive descriptors per queue");
331
332 /*
333  * Defining this on will allow the use
334  * of unsupported SFP+ modules, note that
335  * doing so you are on your own :)
336  */
337 static int allow_unsupported_sfp = FALSE;
338 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
339     &allow_unsupported_sfp, 0,
340     "Allow unsupported SFP modules...use at your own risk");
341
342 /*
343  * Not sure if Flow Director is fully baked,
344  * so we'll default to turning it off.
345  */
346 static int ixgbe_enable_fdir = 0;
347 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
348     "Enable Flow Director");
349
350 /* Legacy Transmit (single queue) */
351 static int ixgbe_enable_legacy_tx = 0;
352 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
353     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
354
355 /* Receive-Side Scaling */
356 static int ixgbe_enable_rss = 1;
357 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
358     "Enable Receive-Side Scaling (RSS)");
359
360 /* Keep running tab on them for sanity check */
361 static int ixgbe_total_ports;
362
363 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
364 static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
365
366 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
367
368 /************************************************************************
369  * ixgbe_initialize_rss_mapping
370  ************************************************************************/
371 static void
372 ixgbe_initialize_rss_mapping(struct adapter *adapter)
373 {
374         struct ixgbe_hw *hw = &adapter->hw;
375         u32             reta = 0, mrqc, rss_key[10];
376         int             queue_id, table_size, index_mult;
377         int             i, j;
378         u32             rss_hash_config;
379
380         if (adapter->feat_en & IXGBE_FEATURE_RSS) {
381                 /* Fetch the configured RSS key */
382                 rss_getkey((uint8_t *)&rss_key);
383         } else {
384                 /* set up random bits */
385                 arc4rand(&rss_key, sizeof(rss_key), 0);
386         }
387
388         /* Set multiplier for RETA setup and table size based on MAC */
389         index_mult = 0x1;
390         table_size = 128;
391         switch (adapter->hw.mac.type) {
392         case ixgbe_mac_82598EB:
393                 index_mult = 0x11;
394                 break;
395         case ixgbe_mac_X550:
396         case ixgbe_mac_X550EM_x:
397         case ixgbe_mac_X550EM_a:
398                 table_size = 512;
399                 break;
400         default:
401                 break;
402         }
403
404         /* Set up the redirection table */
405         for (i = 0, j = 0; i < table_size; i++, j++) {
406                 if (j == adapter->num_queues)
407                         j = 0;
408
409                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
410                         /*
411                          * Fetch the RSS bucket id for the given indirection
412                          * entry. Cap it at the number of configured buckets
413                          * (which is num_queues.)
414                          */
415                         queue_id = rss_get_indirection_to_bucket(i);
416                         queue_id = queue_id % adapter->num_queues;
417                 } else
418                         queue_id = (j * index_mult);
419
420                 /*
421                  * The low 8 bits are for hash value (n+0);
422                  * The next 8 bits are for hash value (n+1), etc.
423                  */
424                 reta = reta >> 8;
425                 reta = reta | (((uint32_t)queue_id) << 24);
426                 if ((i & 3) == 3) {
427                         if (i < 128)
428                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
429                         else
430                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
431                                     reta);
432                         reta = 0;
433                 }
434         }
435
436         /* Now fill our hash function seeds */
437         for (i = 0; i < 10; i++)
438                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
439
440         /* Perform hash on these packet types */
441         if (adapter->feat_en & IXGBE_FEATURE_RSS)
442                 rss_hash_config = rss_gethashconfig();
443         else {
444                 /*
445                  * Disable UDP - IP fragments aren't currently being handled
446                  * and so we end up with a mix of 2-tuple and 4-tuple
447                  * traffic.
448                  */
449                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
450                                 | RSS_HASHTYPE_RSS_TCP_IPV4
451                                 | RSS_HASHTYPE_RSS_IPV6
452                                 | RSS_HASHTYPE_RSS_TCP_IPV6
453                                 | RSS_HASHTYPE_RSS_IPV6_EX
454                                 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
455         }
456
457         mrqc = IXGBE_MRQC_RSSEN;
458         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
459                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
460         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
461                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
462         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
463                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
464         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
465                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
466         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
467                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
468         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
469                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
470         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
471                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
472         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
473                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
474         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
475                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
476         mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
477         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
478 } /* ixgbe_initialize_rss_mapping */
479
480 /************************************************************************
481  * ixgbe_initialize_receive_units - Setup receive registers and features.
482  ************************************************************************/
483 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
484
485 static void
486 ixgbe_initialize_receive_units(struct adapter *adapter)
487 {
488         struct rx_ring  *rxr = adapter->rx_rings;
489         struct ixgbe_hw *hw = &adapter->hw;
490         struct ifnet    *ifp = adapter->ifp;
491         int             i, j;
492         u32             bufsz, fctrl, srrctl, rxcsum;
493         u32             hlreg;
494
495         /*
496          * Make sure receives are disabled while
497          * setting up the descriptor ring
498          */
499         ixgbe_disable_rx(hw);
500
501         /* Enable broadcasts */
502         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
503         fctrl |= IXGBE_FCTRL_BAM;
504         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
505                 fctrl |= IXGBE_FCTRL_DPF;
506                 fctrl |= IXGBE_FCTRL_PMCF;
507         }
508         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
509
510         /* Set for Jumbo Frames? */
511         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
512         if (ifp->if_mtu > ETHERMTU)
513                 hlreg |= IXGBE_HLREG0_JUMBOEN;
514         else
515                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
516
517 #ifdef DEV_NETMAP
518         /* CRC stripping is conditional in Netmap */
519         if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
520             (ifp->if_capenable & IFCAP_NETMAP) &&
521             !ix_crcstrip)
522                 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
523         else
524 #endif /* DEV_NETMAP */
525                 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
526
527         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
528
529         bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
530             IXGBE_SRRCTL_BSIZEPKT_SHIFT;
531
532         for (i = 0; i < adapter->num_queues; i++, rxr++) {
533                 u64 rdba = rxr->rxdma.dma_paddr;
534                 j = rxr->me;
535
536                 /* Setup the Base and Length of the Rx Descriptor Ring */
537                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
538                     (rdba & 0x00000000ffffffffULL));
539                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
540                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
541                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
542
543                 /* Set up the SRRCTL register */
544                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
545                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
546                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
547                 srrctl |= bufsz;
548                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
549
550                 /*
551                  * Set DROP_EN iff we have no flow control and >1 queue.
552                  * Note that srrctl was cleared shortly before during reset,
553                  * so we do not need to clear the bit, but do it just in case
554                  * this code is moved elsewhere.
555                  */
556                 if (adapter->num_queues > 1 &&
557                     adapter->hw.fc.requested_mode == ixgbe_fc_none) {
558                         srrctl |= IXGBE_SRRCTL_DROP_EN;
559                 } else {
560                         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
561                 }
562
563                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
564
565                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
566                 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
567                 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
568
569                 /* Set the driver rx tail address */
570                 rxr->tail =  IXGBE_RDT(rxr->me);
571         }
572
573         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
574                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
575                             | IXGBE_PSRTYPE_UDPHDR
576                             | IXGBE_PSRTYPE_IPV4HDR
577                             | IXGBE_PSRTYPE_IPV6HDR;
578                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
579         }
580
581         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
582
583         ixgbe_initialize_rss_mapping(adapter);
584
585         if (adapter->num_queues > 1) {
586                 /* RSS and RX IPP Checksum are mutually exclusive */
587                 rxcsum |= IXGBE_RXCSUM_PCSD;
588         }
589
590         if (ifp->if_capenable & IFCAP_RXCSUM)
591                 rxcsum |= IXGBE_RXCSUM_PCSD;
592
593         /* This is useful for calculating UDP/IP fragment checksums */
594         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
595                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
596
597         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
598
599         return;
600 } /* ixgbe_initialize_receive_units */
601
602 /************************************************************************
603  * ixgbe_initialize_transmit_units - Enable transmit units.
604  ************************************************************************/
605 static void
606 ixgbe_initialize_transmit_units(struct adapter *adapter)
607 {
608         struct tx_ring  *txr = adapter->tx_rings;
609         struct ixgbe_hw *hw = &adapter->hw;
610
611         /* Setup the Base and Length of the Tx Descriptor Ring */
612         for (int i = 0; i < adapter->num_queues; i++, txr++) {
613                 u64 tdba = txr->txdma.dma_paddr;
614                 u32 txctrl = 0;
615                 int j = txr->me;
616
617                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
618                     (tdba & 0x00000000ffffffffULL));
619                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
620                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
621                     adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
622
623                 /* Setup the HW Tx Head and Tail descriptor pointers */
624                 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
625                 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
626
627                 /* Cache the tail address */
628                 txr->tail = IXGBE_TDT(j);
629
630                 /* Disable Head Writeback */
631                 /*
632                  * Note: for X550 series devices, these registers are actually
633                  * prefixed with TPH_ isntead of DCA_, but the addresses and
634                  * fields remain the same.
635                  */
636                 switch (hw->mac.type) {
637                 case ixgbe_mac_82598EB:
638                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
639                         break;
640                 default:
641                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
642                         break;
643                 }
644                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
645                 switch (hw->mac.type) {
646                 case ixgbe_mac_82598EB:
647                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
648                         break;
649                 default:
650                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
651                         break;
652                 }
653
654         }
655
656         if (hw->mac.type != ixgbe_mac_82598EB) {
657                 u32 dmatxctl, rttdcs;
658
659                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
660                 dmatxctl |= IXGBE_DMATXCTL_TE;
661                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
662                 /* Disable arbiter to set MTQC */
663                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
664                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
665                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
666                 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
667                     ixgbe_get_mtqc(adapter->iov_mode));
668                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
669                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
670         }
671
672         return;
673 } /* ixgbe_initialize_transmit_units */
674
675 /************************************************************************
676  * ixgbe_attach - Device initialization routine
677  *
678  *   Called when the driver is being loaded.
679  *   Identifies the type of hardware, allocates all resources
680  *   and initializes the hardware.
681  *
682  *   return 0 on success, positive on failure
683  ************************************************************************/
684 static int
685 ixgbe_attach(device_t dev)
686 {
687         struct adapter  *adapter;
688         struct ixgbe_hw *hw;
689         int             error = 0;
690         u32             ctrl_ext;
691
692         INIT_DEBUGOUT("ixgbe_attach: begin");
693
694         /* Allocate, clear, and link in our adapter structure */
695         adapter = device_get_softc(dev);
696         adapter->hw.back = adapter;
697         adapter->dev = dev;
698         hw = &adapter->hw;
699
700         /* Core Lock Init*/
701         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
702
703         /* Set up the timer callout */
704         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
705
706         /* Determine hardware revision */
707         hw->vendor_id = pci_get_vendor(dev);
708         hw->device_id = pci_get_device(dev);
709         hw->revision_id = pci_get_revid(dev);
710         hw->subsystem_vendor_id = pci_get_subvendor(dev);
711         hw->subsystem_device_id = pci_get_subdevice(dev);
712
713         /*
714          * Make sure BUSMASTER is set
715          */
716         pci_enable_busmaster(dev);
717
718         /* Do base PCI setup - map BAR0 */
719         if (ixgbe_allocate_pci_resources(adapter)) {
720                 device_printf(dev, "Allocation of PCI resources failed\n");
721                 error = ENXIO;
722                 goto err_out;
723         }
724
725         /* let hardware know driver is loaded */
726         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
727         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
728         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
729
730         /*
731          * Initialize the shared code
732          */
733         if (ixgbe_init_shared_code(hw)) {
734                 device_printf(dev, "Unable to initialize the shared code\n");
735                 error = ENXIO;
736                 goto err_out;
737         }
738
739         if (hw->mbx.ops.init_params)
740                 hw->mbx.ops.init_params(hw);
741
742         hw->allow_unsupported_sfp = allow_unsupported_sfp;
743
744         /* Pick up the 82599 settings */
745         if (hw->mac.type != ixgbe_mac_82598EB) {
746                 hw->phy.smart_speed = ixgbe_smart_speed;
747                 adapter->num_segs = IXGBE_82599_SCATTER;
748         } else
749                 adapter->num_segs = IXGBE_82598_SCATTER;
750
751         ixgbe_init_device_features(adapter);
752
753         if (ixgbe_configure_interrupts(adapter)) {
754                 error = ENXIO;
755                 goto err_out;
756         }
757
758         /* Allocate multicast array memory. */
759         adapter->mta = malloc(sizeof(*adapter->mta) *
760             MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
761         if (adapter->mta == NULL) {
762                 device_printf(dev, "Can not allocate multicast setup array\n");
763                 error = ENOMEM;
764                 goto err_out;
765         }
766
767         /* Enable WoL (if supported) */
768         ixgbe_check_wol_support(adapter);
769
770         /* Register for VLAN events */
771         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
772             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
773         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
774             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
775
776         /* Verify adapter fan is still functional (if applicable) */
777         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
778                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
779                 ixgbe_check_fan_failure(adapter, esdp, FALSE);
780         }
781
782         /* Ensure SW/FW semaphore is free */
783         ixgbe_init_swfw_semaphore(hw);
784
785         /* Enable EEE power saving */
786         if (adapter->feat_en & IXGBE_FEATURE_EEE)
787                 hw->mac.ops.setup_eee(hw, TRUE);
788
789         /* Set an initial default flow control value */
790         hw->fc.requested_mode = ixgbe_flow_control;
791
792         /* Sysctls for limiting the amount of work done in the taskqueues */
793         ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
794             "max number of rx packets to process",
795             &adapter->rx_process_limit, ixgbe_rx_process_limit);
796
797         ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
798             "max number of tx packets to process",
799             &adapter->tx_process_limit, ixgbe_tx_process_limit);
800
801         /* Do descriptor calc and sanity checks */
802         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
803             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
804                 device_printf(dev, "TXD config issue, using default!\n");
805                 adapter->num_tx_desc = DEFAULT_TXD;
806         } else
807                 adapter->num_tx_desc = ixgbe_txd;
808
809         /*
810          * With many RX rings it is easy to exceed the
811          * system mbuf allocation. Tuning nmbclusters
812          * can alleviate this.
813          */
814         if (nmbclusters > 0) {
815                 int s;
816                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
817                 if (s > nmbclusters) {
818                         device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
819                         ixgbe_rxd = DEFAULT_RXD;
820                 }
821         }
822
823         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
824             ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
825                 device_printf(dev, "RXD config issue, using default!\n");
826                 adapter->num_rx_desc = DEFAULT_RXD;
827         } else
828                 adapter->num_rx_desc = ixgbe_rxd;
829
830         /* Allocate our TX/RX Queues */
831         if (ixgbe_allocate_queues(adapter)) {
832                 error = ENOMEM;
833                 goto err_out;
834         }
835
836         hw->phy.reset_if_overtemp = TRUE;
837         error = ixgbe_reset_hw(hw);
838         hw->phy.reset_if_overtemp = FALSE;
839         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
840                 /*
841                  * No optics in this port, set up
842                  * so the timer routine will probe
843                  * for later insertion.
844                  */
845                 adapter->sfp_probe = TRUE;
846                 error = IXGBE_SUCCESS;
847         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
848                 device_printf(dev, "Unsupported SFP+ module detected!\n");
849                 error = EIO;
850                 goto err_late;
851         } else if (error) {
852                 device_printf(dev, "Hardware initialization failed\n");
853                 error = EIO;
854                 goto err_late;
855         }
856
857         /* Make sure we have a good EEPROM before we read from it */
858         if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
859                 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
860                 error = EIO;
861                 goto err_late;
862         }
863
864         /* Setup OS specific network interface */
865         if (ixgbe_setup_interface(dev, adapter) != 0)
866                 goto err_late;
867
868         if (adapter->feat_en & IXGBE_FEATURE_MSIX)
869                 error = ixgbe_allocate_msix(adapter);
870         else
871                 error = ixgbe_allocate_legacy(adapter);
872         if (error)
873                 goto err_late;
874
875         error = ixgbe_start_hw(hw);
876         switch (error) {
877         case IXGBE_ERR_EEPROM_VERSION:
878                 device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
879                 break;
880         case IXGBE_ERR_SFP_NOT_SUPPORTED:
881                 device_printf(dev, "Unsupported SFP+ Module\n");
882                 error = EIO;
883                 goto err_late;
884         case IXGBE_ERR_SFP_NOT_PRESENT:
885                 device_printf(dev, "No SFP+ Module found\n");
886                 /* falls thru */
887         default:
888                 break;
889         }
890
891         /* Enable the optics for 82599 SFP+ fiber */
892         ixgbe_enable_tx_laser(hw);
893
894         /* Enable power to the phy. */
895         ixgbe_set_phy_power(hw, TRUE);
896
897         /* Initialize statistics */
898         ixgbe_update_stats_counters(adapter);
899
900         /* Check PCIE slot type/speed/width */
901         ixgbe_get_slot_info(adapter);
902
903         /*
904          * Do time init and sysctl init here, but
905          * only on the first port of a bypass adapter.
906          */
907         ixgbe_bypass_init(adapter);
908
909         /* Set an initial dmac value */
910         adapter->dmac = 0;
911         /* Set initial advertised speeds (if applicable) */
912         adapter->advertise = ixgbe_get_advertise(adapter);
913
914         if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
915                 ixgbe_define_iov_schemas(dev, &error);
916
917         /* Add sysctls */
918         ixgbe_add_device_sysctls(adapter);
919         ixgbe_add_hw_stats(adapter);
920
921         /* For Netmap */
922         adapter->init_locked = ixgbe_init_locked;
923         adapter->stop_locked = ixgbe_stop;
924
925         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
926                 ixgbe_netmap_attach(adapter);
927
928         INIT_DEBUGOUT("ixgbe_attach: end");
929
930         return (0);
931
932 err_late:
933         ixgbe_free_transmit_structures(adapter);
934         ixgbe_free_receive_structures(adapter);
935         free(adapter->queues, M_DEVBUF);
936 err_out:
937         if (adapter->ifp != NULL)
938                 if_free(adapter->ifp);
939         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
940         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
941         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
942         ixgbe_free_pci_resources(adapter);
943         free(adapter->mta, M_IXGBE);
944         IXGBE_CORE_LOCK_DESTROY(adapter);
945
946         return (error);
947 } /* ixgbe_attach */
948
949 /************************************************************************
950  * ixgbe_check_wol_support
951  *
952  *   Checks whether the adapter's ports are capable of
953  *   Wake On LAN by reading the adapter's NVM.
954  *
955  *   Sets each port's hw->wol_enabled value depending
956  *   on the value read here.
957  ************************************************************************/
958 static void
959 ixgbe_check_wol_support(struct adapter *adapter)
960 {
961         struct ixgbe_hw *hw = &adapter->hw;
962         u16             dev_caps = 0;
963
964         /* Find out WoL support for port */
965         adapter->wol_support = hw->wol_enabled = 0;
966         ixgbe_get_device_caps(hw, &dev_caps);
967         if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
968             ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
969              hw->bus.func == 0))
970                 adapter->wol_support = hw->wol_enabled = 1;
971
972         /* Save initial wake up filter configuration */
973         adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
974
975         return;
976 } /* ixgbe_check_wol_support */
977
978 /************************************************************************
979  * ixgbe_setup_interface
980  *
981  *   Setup networking device structure and register an interface.
982  ************************************************************************/
983 static int
984 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
985 {
986         struct ifnet *ifp;
987
988         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
989
990         ifp = adapter->ifp = if_alloc(IFT_ETHER);
991         if (ifp == NULL) {
992                 device_printf(dev, "can not allocate ifnet structure\n");
993                 return (-1);
994         }
995         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
996         ifp->if_baudrate = IF_Gbps(10);
997         ifp->if_init = ixgbe_init;
998         ifp->if_softc = adapter;
999         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1000         ifp->if_ioctl = ixgbe_ioctl;
1001 #if __FreeBSD_version >= 1100036
1002         if_setgetcounterfn(ifp, ixgbe_get_counter);
1003 #endif
1004 #if __FreeBSD_version >= 1100045
1005         /* TSO parameters */
1006         ifp->if_hw_tsomax = 65518;
1007         ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1008         ifp->if_hw_tsomaxsegsize = 2048;
1009 #endif
1010         if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1011                 ifp->if_start = ixgbe_legacy_start;
1012                 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1013                 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
1014                 IFQ_SET_READY(&ifp->if_snd);
1015                 ixgbe_start_locked = ixgbe_legacy_start_locked;
1016                 ixgbe_ring_empty = ixgbe_legacy_ring_empty;
1017         } else {
1018                 ifp->if_transmit = ixgbe_mq_start;
1019                 ifp->if_qflush = ixgbe_qflush;
1020                 ixgbe_start_locked = ixgbe_mq_start_locked;
1021                 ixgbe_ring_empty = drbr_empty;
1022         }
1023
1024         ether_ifattach(ifp, adapter->hw.mac.addr);
1025
1026         adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1027
1028         /*
1029          * Tell the upper layer(s) we support long frames.
1030          */
1031         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1032
1033         /* Set capability flags */
1034         ifp->if_capabilities |= IFCAP_HWCSUM
1035                              |  IFCAP_HWCSUM_IPV6
1036                              |  IFCAP_TSO
1037                              |  IFCAP_LRO
1038                              |  IFCAP_VLAN_HWTAGGING
1039                              |  IFCAP_VLAN_HWTSO
1040                              |  IFCAP_VLAN_HWCSUM
1041                              |  IFCAP_JUMBO_MTU
1042                              |  IFCAP_VLAN_MTU
1043                              |  IFCAP_HWSTATS;
1044
1045         /* Enable the above capabilities by default */
1046         ifp->if_capenable = ifp->if_capabilities;
1047
1048         /*
1049          * Don't turn this on by default, if vlans are
1050          * created on another pseudo device (eg. lagg)
1051          * then vlan events are not passed thru, breaking
1052          * operation, but with HW FILTER off it works. If
1053          * using vlans directly on the ixgbe driver you can
1054          * enable this and get full hardware tag filtering.
1055          */
1056         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1057
1058         /*
1059          * Specify the media types supported by this adapter and register
1060          * callbacks to update media and link information
1061          */
1062         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1063             ixgbe_media_status);
1064
1065         adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1066         ixgbe_add_media_types(adapter);
1067
1068         /* Set autoselect media by default */
1069         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1070
1071         return (0);
1072 } /* ixgbe_setup_interface */
1073
1074 #if __FreeBSD_version >= 1100036
1075 /************************************************************************
1076  * ixgbe_get_counter
1077  ************************************************************************/
1078 static uint64_t
1079 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
1080 {
1081         struct adapter *adapter;
1082         struct tx_ring *txr;
1083         uint64_t       rv;
1084
1085         adapter = if_getsoftc(ifp);
1086
1087         switch (cnt) {
1088         case IFCOUNTER_IPACKETS:
1089                 return (adapter->ipackets);
1090         case IFCOUNTER_OPACKETS:
1091                 return (adapter->opackets);
1092         case IFCOUNTER_IBYTES:
1093                 return (adapter->ibytes);
1094         case IFCOUNTER_OBYTES:
1095                 return (adapter->obytes);
1096         case IFCOUNTER_IMCASTS:
1097                 return (adapter->imcasts);
1098         case IFCOUNTER_OMCASTS:
1099                 return (adapter->omcasts);
1100         case IFCOUNTER_COLLISIONS:
1101                 return (0);
1102         case IFCOUNTER_IQDROPS:
1103                 return (adapter->iqdrops);
1104         case IFCOUNTER_OQDROPS:
1105                 rv = 0;
1106                 txr = adapter->tx_rings;
1107                 for (int i = 0; i < adapter->num_queues; i++, txr++)
1108                         rv += txr->br->br_drops;
1109                 return (rv);
1110         case IFCOUNTER_IERRORS:
1111                 return (adapter->ierrors);
1112         default:
1113                 return (if_get_counter_default(ifp, cnt));
1114         }
1115 } /* ixgbe_get_counter */
1116 #endif
1117
1118 /************************************************************************
1119  * ixgbe_add_media_types
1120  ************************************************************************/
1121 static void
1122 ixgbe_add_media_types(struct adapter *adapter)
1123 {
1124         struct ixgbe_hw *hw = &adapter->hw;
1125         device_t        dev = adapter->dev;
1126         u64             layer;
1127
1128         layer = adapter->phy_layer;
1129
1130         /* Media types with matching FreeBSD media defines */
1131         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1132                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1133         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1134                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1135         if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1136                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1137         if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1138                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1139
1140         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1141             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1142                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1143                     NULL);
1144
1145         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1146                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1147                 if (hw->phy.multispeed_fiber)
1148                         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1149                             NULL);
1150         }
1151         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1152                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1153                 if (hw->phy.multispeed_fiber)
1154                         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1155                             NULL);
1156         } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1157                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1158         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1159                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1160
1161 #ifdef IFM_ETH_XTYPE
1162         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1163                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1164         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1165                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1166         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1167                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1168         if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1169                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1170 #else
1171         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1172                 device_printf(dev, "Media supported: 10GbaseKR\n");
1173                 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1174                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1175         }
1176         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1177                 device_printf(dev, "Media supported: 10GbaseKX4\n");
1178                 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1179                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1180         }
1181         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1182                 device_printf(dev, "Media supported: 1000baseKX\n");
1183                 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1184                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1185         }
1186         if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1187                 device_printf(dev, "Media supported: 2500baseKX\n");
1188                 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1189                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1190         }
1191 #endif
1192         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1193                 device_printf(dev, "Media supported: 1000baseBX\n");
1194
1195         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1196                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1197                     0, NULL);
1198                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1199         }
1200
1201         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1202 } /* ixgbe_add_media_types */
1203
1204 /************************************************************************
1205  * ixgbe_is_sfp
1206  ************************************************************************/
1207 static inline bool
1208 ixgbe_is_sfp(struct ixgbe_hw *hw)
1209 {
1210         switch (hw->mac.type) {
1211         case ixgbe_mac_82598EB:
1212                 if (hw->phy.type == ixgbe_phy_nl)
1213                         return TRUE;
1214                 return FALSE;
1215         case ixgbe_mac_82599EB:
1216                 switch (hw->mac.ops.get_media_type(hw)) {
1217                 case ixgbe_media_type_fiber:
1218                 case ixgbe_media_type_fiber_qsfp:
1219                         return TRUE;
1220                 default:
1221                         return FALSE;
1222                 }
1223         case ixgbe_mac_X550EM_x:
1224         case ixgbe_mac_X550EM_a:
1225                 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1226                         return TRUE;
1227                 return FALSE;
1228         default:
1229                 return FALSE;
1230         }
1231 } /* ixgbe_is_sfp */
1232
1233 /************************************************************************
1234  * ixgbe_config_link
1235  ************************************************************************/
1236 static void
1237 ixgbe_config_link(struct adapter *adapter)
1238 {
1239         struct ixgbe_hw *hw = &adapter->hw;
1240         u32             autoneg, err = 0;
1241         bool            sfp, negotiate;
1242
1243         sfp = ixgbe_is_sfp(hw);
1244
1245         if (sfp) {
1246                 if (hw->phy.multispeed_fiber) {
1247                         hw->mac.ops.setup_sfp(hw);
1248                         ixgbe_enable_tx_laser(hw);
1249                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1250                 } else
1251                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1252         } else {
1253                 if (hw->mac.ops.check_link)
1254                         err = ixgbe_check_link(hw, &adapter->link_speed,
1255                             &adapter->link_up, FALSE);
1256                 if (err)
1257                         goto out;
1258                 autoneg = hw->phy.autoneg_advertised;
1259                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1260                         err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1261                             &negotiate);
1262                 if (err)
1263                         goto out;
1264                 if (hw->mac.ops.setup_link)
1265                         err = hw->mac.ops.setup_link(hw, autoneg,
1266                             adapter->link_up);
1267         }
1268 out:
1269
1270         return;
1271 } /* ixgbe_config_link */
1272
1273 /************************************************************************
1274  * ixgbe_update_stats_counters - Update board statistics counters.
1275  ************************************************************************/
1276 static void
1277 ixgbe_update_stats_counters(struct adapter *adapter)
1278 {
1279         struct ixgbe_hw       *hw = &adapter->hw;
1280         struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1281         u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1282         u64                   total_missed_rx = 0;
1283
1284         stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1285         stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1286         stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1287         stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1288         stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1289
1290         for (int i = 0; i < 16; i++) {
1291                 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1292                 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1293                 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1294         }
1295         stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1296         stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1297         stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1298
1299         /* Hardware workaround, gprc counts missed packets */
1300         stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1301         stats->gprc -= missed_rx;
1302
1303         if (hw->mac.type != ixgbe_mac_82598EB) {
1304                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1305                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1306                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1307                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1308                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1309                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1310                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1311                 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1312         } else {
1313                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1314                 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1315                 /* 82598 only has a counter in the high register */
1316                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1317                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1318                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1319         }
1320
1321         /*
1322          * Workaround: mprc hardware is incorrectly counting
1323          * broadcasts, so for now we subtract those.
1324          */
1325         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1326         stats->bprc += bprc;
1327         stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1328         if (hw->mac.type == ixgbe_mac_82598EB)
1329                 stats->mprc -= bprc;
1330
1331         stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1332         stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1333         stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1334         stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1335         stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1336         stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1337
1338         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1339         stats->lxontxc += lxon;
1340         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1341         stats->lxofftxc += lxoff;
1342         total = lxon + lxoff;
1343
1344         stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1345         stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1346         stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1347         stats->gptc -= total;
1348         stats->mptc -= total;
1349         stats->ptc64 -= total;
1350         stats->gotc -= total * ETHER_MIN_LEN;
1351
1352         stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1353         stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1354         stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1355         stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1356         stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1357         stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1358         stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1359         stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1360         stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1361         stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1362         stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1363         stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1364         stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1365         stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1366         stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1367         stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1368         stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1369         stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1370         /* Only read FCOE on 82599 */
1371         if (hw->mac.type != ixgbe_mac_82598EB) {
1372                 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1373                 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1374                 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1375                 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1376                 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1377         }
1378
1379         /* Fill out the OS statistics structure */
1380         IXGBE_SET_IPACKETS(adapter, stats->gprc);
1381         IXGBE_SET_OPACKETS(adapter, stats->gptc);
1382         IXGBE_SET_IBYTES(adapter, stats->gorc);
1383         IXGBE_SET_OBYTES(adapter, stats->gotc);
1384         IXGBE_SET_IMCASTS(adapter, stats->mprc);
1385         IXGBE_SET_OMCASTS(adapter, stats->mptc);
1386         IXGBE_SET_COLLISIONS(adapter, 0);
1387         IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1388         IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1389 } /* ixgbe_update_stats_counters */
1390
1391 /************************************************************************
1392  * ixgbe_add_hw_stats
1393  *
1394  *   Add sysctl variables, one per statistic, to the system.
1395  ************************************************************************/
1396 static void
1397 ixgbe_add_hw_stats(struct adapter *adapter)
1398 {
1399         device_t               dev = adapter->dev;
1400         struct tx_ring         *txr = adapter->tx_rings;
1401         struct rx_ring         *rxr = adapter->rx_rings;
1402         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1403         struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1404         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1405         struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1406         struct sysctl_oid      *stat_node, *queue_node;
1407         struct sysctl_oid_list *stat_list, *queue_list;
1408
1409 #define QUEUE_NAME_LEN 32
1410         char                   namebuf[QUEUE_NAME_LEN];
1411
1412         /* Driver Statistics */
1413         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1414             CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1415         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1416             CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1417         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1418             CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1419         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1420             CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1421
1422         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1423                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1424                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1425                     CTLFLAG_RD, NULL, "Queue Name");
1426                 queue_list = SYSCTL_CHILDREN(queue_node);
1427
1428                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1429                     CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
1430                     sizeof(&adapter->queues[i]),
1431                     ixgbe_sysctl_interrupt_rate_handler, "IU",
1432                     "Interrupt Rate");
1433                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1434                     CTLFLAG_RD, &(adapter->queues[i].irqs),
1435                     "irqs on this queue");
1436                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1437                     CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1438                     ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1439                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1440                     CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1441                     ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1442                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1443                     CTLFLAG_RD, &txr->tso_tx, "TSO");
1444                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1445                     CTLFLAG_RD, &txr->no_tx_dma_setup,
1446                     "Driver tx dma failure in xmit");
1447                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
1448                     CTLFLAG_RD, &txr->no_desc_avail,
1449                     "Queue No Descriptor Available");
1450                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1451                     CTLFLAG_RD, &txr->total_packets,
1452                     "Queue Packets Transmitted");
1453                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1454                     CTLFLAG_RD, &txr->br->br_drops,
1455                     "Packets dropped in buf_ring");
1456         }
1457
1458         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1459                 struct lro_ctrl *lro = &rxr->lro;
1460
1461                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1462                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1463                     CTLFLAG_RD, NULL, "Queue Name");
1464                 queue_list = SYSCTL_CHILDREN(queue_node);
1465
1466                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1467                     CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1468                     ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1469                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1470                     CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1471                     ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1472                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1473                     CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1474                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1475                     CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1476                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1477                     CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1478                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1479                     CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1480                 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
1481                     CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
1482                 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
1483                     CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
1484         }
1485
1486         /* MAC stats get their own sub node */
1487
1488         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1489             CTLFLAG_RD, NULL, "MAC Statistics");
1490         stat_list = SYSCTL_CHILDREN(stat_node);
1491
1492         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1493             CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1494         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1495             CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1496         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1497             CTLFLAG_RD, &stats->errbc, "Byte Errors");
1498         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1499             CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1500         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1501             CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1502         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1503             CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1504         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1505             CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1506         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1507             CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1508
1509         /* Flow Control stats */
1510         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1511             CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1512         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1513             CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1514         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1515             CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1516         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1517             CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1518
1519         /* Packet Reception Stats */
1520         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1521             CTLFLAG_RD, &stats->tor, "Total Octets Received");
1522         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1523             CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1524         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1525             CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1526         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1527             CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1528         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1529             CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1530         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1531             CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1532         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1533             CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1534         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1535             CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1536         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1537             CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1538         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1539             CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1540         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1541             CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1542         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1543             CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1544         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1545             CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1546         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1547             CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1548         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1549             CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1550         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1551             CTLFLAG_RD, &stats->rjc, "Received Jabber");
1552         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1553             CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1554         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1555             CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1556         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1557             CTLFLAG_RD, &stats->xec, "Checksum Errors");
1558
1559         /* Packet Transmission Stats */
1560         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1561             CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1562         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1563             CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1564         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1565             CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1566         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1567             CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1568         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1569             CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1570         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1571             CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1572         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1573             CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1574         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1575             CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1576         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1577             CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1578         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1579             CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1580         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1581             CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1582         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1583             CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1584 } /* ixgbe_add_hw_stats */
1585
1586 /************************************************************************
1587  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1588  *
1589  *   Retrieves the TDH value from the hardware
1590  ************************************************************************/
1591 static int
1592 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1593 {
1594         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1595         int            error;
1596         unsigned int   val;
1597
1598         if (!txr)
1599                 return (0);
1600
1601         val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1602         error = sysctl_handle_int(oidp, &val, 0, req);
1603         if (error || !req->newptr)
1604                 return error;
1605
1606         return (0);
1607 } /* ixgbe_sysctl_tdh_handler */
1608
1609 /************************************************************************
1610  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1611  *
1612  *   Retrieves the TDT value from the hardware
1613  ************************************************************************/
1614 static int
1615 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1616 {
1617         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1618         int            error;
1619         unsigned int   val;
1620
1621         if (!txr)
1622                 return (0);
1623
1624         val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1625         error = sysctl_handle_int(oidp, &val, 0, req);
1626         if (error || !req->newptr)
1627                 return error;
1628
1629         return (0);
1630 } /* ixgbe_sysctl_tdt_handler */
1631
1632 /************************************************************************
1633  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1634  *
1635  *   Retrieves the RDH value from the hardware
1636  ************************************************************************/
1637 static int
1638 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1639 {
1640         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1641         int            error;
1642         unsigned int   val;
1643
1644         if (!rxr)
1645                 return (0);
1646
1647         val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1648         error = sysctl_handle_int(oidp, &val, 0, req);
1649         if (error || !req->newptr)
1650                 return error;
1651
1652         return (0);
1653 } /* ixgbe_sysctl_rdh_handler */
1654
1655 /************************************************************************
1656  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1657  *
1658  *   Retrieves the RDT value from the hardware
1659  ************************************************************************/
1660 static int
1661 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1662 {
1663         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1664         int            error;
1665         unsigned int   val;
1666
1667         if (!rxr)
1668                 return (0);
1669
1670         val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1671         error = sysctl_handle_int(oidp, &val, 0, req);
1672         if (error || !req->newptr)
1673                 return error;
1674
1675         return (0);
1676 } /* ixgbe_sysctl_rdt_handler */
1677
1678 /************************************************************************
1679  * ixgbe_register_vlan
1680  *
1681  *   Run via vlan config EVENT, it enables us to use the
1682  *   HW Filter table since we can get the vlan id. This
1683  *   just creates the entry in the soft version of the
1684  *   VFTA, init will repopulate the real table.
1685  ************************************************************************/
1686 static void
1687 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1688 {
1689         struct adapter *adapter = ifp->if_softc;
1690         u16            index, bit;
1691
1692         if (ifp->if_softc != arg)   /* Not our event */
1693                 return;
1694
1695         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1696                 return;
1697
1698         IXGBE_CORE_LOCK(adapter);
1699         index = (vtag >> 5) & 0x7F;
1700         bit = vtag & 0x1F;
1701         adapter->shadow_vfta[index] |= (1 << bit);
1702         ++adapter->num_vlans;
1703         ixgbe_setup_vlan_hw_support(adapter);
1704         IXGBE_CORE_UNLOCK(adapter);
1705 } /* ixgbe_register_vlan */
1706
1707 /************************************************************************
1708  * ixgbe_unregister_vlan
1709  *
1710  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1711  ************************************************************************/
1712 static void
1713 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1714 {
1715         struct adapter *adapter = ifp->if_softc;
1716         u16            index, bit;
1717
1718         if (ifp->if_softc != arg)
1719                 return;
1720
1721         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1722                 return;
1723
1724         IXGBE_CORE_LOCK(adapter);
1725         index = (vtag >> 5) & 0x7F;
1726         bit = vtag & 0x1F;
1727         adapter->shadow_vfta[index] &= ~(1 << bit);
1728         --adapter->num_vlans;
1729         /* Re-init to load the changes */
1730         ixgbe_setup_vlan_hw_support(adapter);
1731         IXGBE_CORE_UNLOCK(adapter);
1732 } /* ixgbe_unregister_vlan */
1733
1734 /************************************************************************
1735  * ixgbe_setup_vlan_hw_support
1736  ************************************************************************/
1737 static void
1738 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
1739 {
1740         struct ifnet    *ifp = adapter->ifp;
1741         struct ixgbe_hw *hw = &adapter->hw;
1742         struct rx_ring  *rxr;
1743         int             i;
1744         u32             ctrl;
1745
1746
1747         /*
1748          * We get here thru init_locked, meaning
1749          * a soft reset, this has already cleared
1750          * the VFTA and other state, so if there
1751          * have been no vlan's registered do nothing.
1752          */
1753         if (adapter->num_vlans == 0)
1754                 return;
1755
1756         /* Setup the queues for vlans */
1757         for (i = 0; i < adapter->num_queues; i++) {
1758                 rxr = &adapter->rx_rings[i];
1759                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1760                 if (hw->mac.type != ixgbe_mac_82598EB) {
1761                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1762                         ctrl |= IXGBE_RXDCTL_VME;
1763                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1764                 }
1765                 rxr->vtag_strip = TRUE;
1766         }
1767
1768         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1769                 return;
1770         /*
1771          * A soft reset zero's out the VFTA, so
1772          * we need to repopulate it now.
1773          */
1774         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1775                 if (adapter->shadow_vfta[i] != 0)
1776                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1777                             adapter->shadow_vfta[i]);
1778
1779         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1780         /* Enable the Filter Table if enabled */
1781         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1782                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1783                 ctrl |= IXGBE_VLNCTRL_VFE;
1784         }
1785         if (hw->mac.type == ixgbe_mac_82598EB)
1786                 ctrl |= IXGBE_VLNCTRL_VME;
1787         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1788 } /* ixgbe_setup_vlan_hw_support */
1789
1790 /************************************************************************
1791  * ixgbe_get_slot_info
1792  *
1793  *   Get the width and transaction speed of
1794  *   the slot this adapter is plugged into.
1795  ************************************************************************/
1796 static void
1797 ixgbe_get_slot_info(struct adapter *adapter)
1798 {
1799         device_t              dev = adapter->dev;
1800         struct ixgbe_hw       *hw = &adapter->hw;
1801         u32                   offset;
1802         u16                   link;
1803         int                   bus_info_valid = TRUE;
1804
1805         /* Some devices are behind an internal bridge */
1806         switch (hw->device_id) {
1807         case IXGBE_DEV_ID_82599_SFP_SF_QP:
1808         case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1809                 goto get_parent_info;
1810         default:
1811                 break;
1812         }
1813
1814         ixgbe_get_bus_info(hw);
1815
1816         /*
1817          * Some devices don't use PCI-E, but there is no need
1818          * to display "Unknown" for bus speed and width.
1819          */
1820         switch (hw->mac.type) {
1821         case ixgbe_mac_X550EM_x:
1822         case ixgbe_mac_X550EM_a:
1823                 return;
1824         default:
1825                 goto display;
1826         }
1827
1828 get_parent_info:
1829         /*
1830          * For the Quad port adapter we need to parse back
1831          * up the PCI tree to find the speed of the expansion
1832          * slot into which this adapter is plugged. A bit more work.
1833          */
1834         dev = device_get_parent(device_get_parent(dev));
1835 #ifdef IXGBE_DEBUG
1836         device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1837             pci_get_slot(dev), pci_get_function(dev));
1838 #endif
1839         dev = device_get_parent(device_get_parent(dev));
1840 #ifdef IXGBE_DEBUG
1841         device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1842             pci_get_slot(dev), pci_get_function(dev));
1843 #endif
1844         /* Now get the PCI Express Capabilities offset */
1845         if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1846                 /*
1847                  * Hmm...can't get PCI-Express capabilities.
1848                  * Falling back to default method.
1849                  */
1850                 bus_info_valid = FALSE;
1851                 ixgbe_get_bus_info(hw);
1852                 goto display;
1853         }
1854         /* ...and read the Link Status Register */
1855         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1856         ixgbe_set_pci_config_data_generic(hw, link);
1857
1858 display:
1859         device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1860             ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1861              (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1862              (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1863              "Unknown"),
1864             ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1865              (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1866              (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1867              "Unknown"));
1868
1869         if (bus_info_valid) {
1870                 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1871                     ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1872                     (hw->bus.speed == ixgbe_bus_speed_2500))) {
1873                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1874                         device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1875                 }
1876                 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1877                     ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1878                     (hw->bus.speed < ixgbe_bus_speed_8000))) {
1879                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1880                         device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1881                 }
1882         } else
1883                 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1884
1885         return;
1886 } /* ixgbe_get_slot_info */
1887
1888 /************************************************************************
1889  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1890  ************************************************************************/
1891 static inline void
1892 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1893 {
1894         struct ixgbe_hw *hw = &adapter->hw;
1895         u64             queue = (u64)(1 << vector);
1896         u32             mask;
1897
1898         if (hw->mac.type == ixgbe_mac_82598EB) {
1899                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1900                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1901         } else {
1902                 mask = (queue & 0xFFFFFFFF);
1903                 if (mask)
1904                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1905                 mask = (queue >> 32);
1906                 if (mask)
1907                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1908         }
1909 } /* ixgbe_enable_queue */
1910
1911 /************************************************************************
1912  * ixgbe_disable_queue
1913  ************************************************************************/
1914 static inline void
1915 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1916 {
1917         struct ixgbe_hw *hw = &adapter->hw;
1918         u64             queue = (u64)(1 << vector);
1919         u32             mask;
1920
1921         if (hw->mac.type == ixgbe_mac_82598EB) {
1922                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1923                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1924         } else {
1925                 mask = (queue & 0xFFFFFFFF);
1926                 if (mask)
1927                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1928                 mask = (queue >> 32);
1929                 if (mask)
1930                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1931         }
1932 } /* ixgbe_disable_queue */
1933
1934 /************************************************************************
1935  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1936  ************************************************************************/
1937 void
1938 ixgbe_msix_que(void *arg)
1939 {
1940         struct ix_queue *que = arg;
1941         struct adapter  *adapter = que->adapter;
1942         struct ifnet    *ifp = adapter->ifp;
1943         struct tx_ring  *txr = que->txr;
1944         struct rx_ring  *rxr = que->rxr;
1945         bool            more;
1946         u32             newitr = 0;
1947
1948
1949         /* Protect against spurious interrupts */
1950         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1951                 return;
1952
1953         ixgbe_disable_queue(adapter, que->msix);
1954         ++que->irqs;
1955
1956         more = ixgbe_rxeof(que);
1957
1958         IXGBE_TX_LOCK(txr);
1959         ixgbe_txeof(txr);
1960         if (!ixgbe_ring_empty(ifp, txr->br))
1961                 ixgbe_start_locked(ifp, txr);
1962         IXGBE_TX_UNLOCK(txr);
1963
1964         /* Do AIM now? */
1965
1966         if (adapter->enable_aim == FALSE)
1967                 goto no_calc;
1968         /*
1969          * Do Adaptive Interrupt Moderation:
1970          *  - Write out last calculated setting
1971          *  - Calculate based on average size over
1972          *    the last interval.
1973          */
1974         if (que->eitr_setting)
1975                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1976                     que->eitr_setting);
1977
1978         que->eitr_setting = 0;
1979
1980         /* Idle, do nothing */
1981         if ((txr->bytes == 0) && (rxr->bytes == 0))
1982                 goto no_calc;
1983
1984         if ((txr->bytes) && (txr->packets))
1985                 newitr = txr->bytes/txr->packets;
1986         if ((rxr->bytes) && (rxr->packets))
1987                 newitr = max(newitr, (rxr->bytes / rxr->packets));
1988         newitr += 24; /* account for hardware frame, crc */
1989
1990         /* set an upper boundary */
1991         newitr = min(newitr, 3000);
1992
1993         /* Be nice to the mid range */
1994         if ((newitr > 300) && (newitr < 1200))
1995                 newitr = (newitr / 3);
1996         else
1997                 newitr = (newitr / 2);
1998
1999         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2000                 newitr |= newitr << 16;
2001         else
2002                 newitr |= IXGBE_EITR_CNT_WDIS;
2003
2004         /* save for next interrupt */
2005         que->eitr_setting = newitr;
2006
2007         /* Reset state */
2008         txr->bytes = 0;
2009         txr->packets = 0;
2010         rxr->bytes = 0;
2011         rxr->packets = 0;
2012
2013 no_calc:
2014         if (more)
2015                 taskqueue_enqueue(que->tq, &que->que_task);
2016         else
2017                 ixgbe_enable_queue(adapter, que->msix);
2018
2019         return;
2020 } /* ixgbe_msix_que */
2021
2022 /************************************************************************
2023  * ixgbe_media_status - Media Ioctl callback
2024  *
2025  *   Called whenever the user queries the status of
2026  *   the interface using ifconfig.
2027  ************************************************************************/
2028 static void
2029 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2030 {
2031         struct adapter  *adapter = ifp->if_softc;
2032         struct ixgbe_hw *hw = &adapter->hw;
2033         int             layer;
2034
2035         INIT_DEBUGOUT("ixgbe_media_status: begin");
2036         IXGBE_CORE_LOCK(adapter);
2037         ixgbe_update_link_status(adapter);
2038
2039         ifmr->ifm_status = IFM_AVALID;
2040         ifmr->ifm_active = IFM_ETHER;
2041
2042         if (!adapter->link_active) {
2043                 IXGBE_CORE_UNLOCK(adapter);
2044                 return;
2045         }
2046
2047         ifmr->ifm_status |= IFM_ACTIVE;
2048         layer = adapter->phy_layer;
2049
2050         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2051             layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2052             layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2053             layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2054                 switch (adapter->link_speed) {
2055                 case IXGBE_LINK_SPEED_10GB_FULL:
2056                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2057                         break;
2058                 case IXGBE_LINK_SPEED_1GB_FULL:
2059                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2060                         break;
2061                 case IXGBE_LINK_SPEED_100_FULL:
2062                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2063                         break;
2064                 case IXGBE_LINK_SPEED_10_FULL:
2065                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2066                         break;
2067                 }
2068         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2069             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2070                 switch (adapter->link_speed) {
2071                 case IXGBE_LINK_SPEED_10GB_FULL:
2072                         ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2073                         break;
2074                 }
2075         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2076                 switch (adapter->link_speed) {
2077                 case IXGBE_LINK_SPEED_10GB_FULL:
2078                         ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2079                         break;
2080                 case IXGBE_LINK_SPEED_1GB_FULL:
2081                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2082                         break;
2083                 }
2084         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2085                 switch (adapter->link_speed) {
2086                 case IXGBE_LINK_SPEED_10GB_FULL:
2087                         ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2088                         break;
2089                 case IXGBE_LINK_SPEED_1GB_FULL:
2090                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2091                         break;
2092                 }
2093         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2094             layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2095                 switch (adapter->link_speed) {
2096                 case IXGBE_LINK_SPEED_10GB_FULL:
2097                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2098                         break;
2099                 case IXGBE_LINK_SPEED_1GB_FULL:
2100                         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2101                         break;
2102                 }
2103         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2104                 switch (adapter->link_speed) {
2105                 case IXGBE_LINK_SPEED_10GB_FULL:
2106                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2107                         break;
2108                 }
2109         /*
2110          * XXX: These need to use the proper media types once
2111          * they're added.
2112          */
2113 #ifndef IFM_ETH_XTYPE
2114         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2115                 switch (adapter->link_speed) {
2116                 case IXGBE_LINK_SPEED_10GB_FULL:
2117                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2118                         break;
2119                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2120                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2121                         break;
2122                 case IXGBE_LINK_SPEED_1GB_FULL:
2123                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2124                         break;
2125                 }
2126         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2127             layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2128             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2129                 switch (adapter->link_speed) {
2130                 case IXGBE_LINK_SPEED_10GB_FULL:
2131                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2132                         break;
2133                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2134                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2135                         break;
2136                 case IXGBE_LINK_SPEED_1GB_FULL:
2137                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2138                         break;
2139                 }
2140 #else
2141         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2142                 switch (adapter->link_speed) {
2143                 case IXGBE_LINK_SPEED_10GB_FULL:
2144                         ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2145                         break;
2146                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2147                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2148                         break;
2149                 case IXGBE_LINK_SPEED_1GB_FULL:
2150                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2151                         break;
2152                 }
2153         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2154             layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2155             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2156                 switch (adapter->link_speed) {
2157                 case IXGBE_LINK_SPEED_10GB_FULL:
2158                         ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2159                         break;
2160                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2161                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2162                         break;
2163                 case IXGBE_LINK_SPEED_1GB_FULL:
2164                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2165                         break;
2166                 }
2167 #endif
2168
2169         /* If nothing is recognized... */
2170         if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2171                 ifmr->ifm_active |= IFM_UNKNOWN;
2172
2173 #if __FreeBSD_version >= 900025
2174         /* Display current flow control setting used on link */
2175         if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2176             hw->fc.current_mode == ixgbe_fc_full)
2177                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2178         if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2179             hw->fc.current_mode == ixgbe_fc_full)
2180                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2181 #endif
2182
2183         IXGBE_CORE_UNLOCK(adapter);
2184
2185         return;
2186 } /* ixgbe_media_status */
2187
2188 /************************************************************************
2189  * ixgbe_media_change - Media Ioctl callback
2190  *
2191  *   Called when the user changes speed/duplex using
2192  *   media/mediopt option with ifconfig.
2193  ************************************************************************/
2194 static int
2195 ixgbe_media_change(struct ifnet *ifp)
2196 {
2197         struct adapter   *adapter = ifp->if_softc;
2198         struct ifmedia   *ifm = &adapter->media;
2199         struct ixgbe_hw  *hw = &adapter->hw;
2200         ixgbe_link_speed speed = 0;
2201
2202         INIT_DEBUGOUT("ixgbe_media_change: begin");
2203
2204         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2205                 return (EINVAL);
2206
2207         if (hw->phy.media_type == ixgbe_media_type_backplane)
2208                 return (ENODEV);
2209
2210         /*
2211          * We don't actually need to check against the supported
2212          * media types of the adapter; ifmedia will take care of
2213          * that for us.
2214          */
2215         switch (IFM_SUBTYPE(ifm->ifm_media)) {
2216                 case IFM_AUTO:
2217                 case IFM_10G_T:
2218                         speed |= IXGBE_LINK_SPEED_100_FULL;
2219                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2220                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2221                         break;
2222                 case IFM_10G_LRM:
2223                 case IFM_10G_LR:
2224 #ifndef IFM_ETH_XTYPE
2225                 case IFM_10G_SR: /* KR, too */
2226                 case IFM_10G_CX4: /* KX4 */
2227 #else
2228                 case IFM_10G_KR:
2229                 case IFM_10G_KX4:
2230 #endif
2231                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2232                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2233                         break;
2234 #ifndef IFM_ETH_XTYPE
2235                 case IFM_1000_CX: /* KX */
2236 #else
2237                 case IFM_1000_KX:
2238 #endif
2239                 case IFM_1000_LX:
2240                 case IFM_1000_SX:
2241                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2242                         break;
2243                 case IFM_1000_T:
2244                         speed |= IXGBE_LINK_SPEED_100_FULL;
2245                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2246                         break;
2247                 case IFM_10G_TWINAX:
2248                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2249                         break;
2250                 case IFM_100_TX:
2251                         speed |= IXGBE_LINK_SPEED_100_FULL;
2252                         break;
2253                 case IFM_10_T:
2254                         speed |= IXGBE_LINK_SPEED_10_FULL;
2255                         break;
2256                 default:
2257                         goto invalid;
2258         }
2259
2260         hw->mac.autotry_restart = TRUE;
2261         hw->mac.ops.setup_link(hw, speed, TRUE);
2262         adapter->advertise =
2263             ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2264             ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2265             ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2266             ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2267
2268         return (0);
2269
2270 invalid:
2271         device_printf(adapter->dev, "Invalid media type!\n");
2272
2273         return (EINVAL);
2274 } /* ixgbe_media_change */
2275
2276 /************************************************************************
2277  * ixgbe_set_promisc
2278  ************************************************************************/
2279 static void
2280 ixgbe_set_promisc(struct adapter *adapter)
2281 {
2282         struct ifnet *ifp = adapter->ifp;
2283         int          mcnt = 0;
2284         u32          rctl;
2285
2286         rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2287         rctl &= (~IXGBE_FCTRL_UPE);
2288         if (ifp->if_flags & IFF_ALLMULTI)
2289                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2290         else {
2291                 struct ifmultiaddr *ifma;
2292 #if __FreeBSD_version < 800000
2293                 IF_ADDR_LOCK(ifp);
2294 #else
2295                 if_maddr_rlock(ifp);
2296 #endif
2297                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2298                         if (ifma->ifma_addr->sa_family != AF_LINK)
2299                                 continue;
2300                         if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2301                                 break;
2302                         mcnt++;
2303                 }
2304 #if __FreeBSD_version < 800000
2305                 IF_ADDR_UNLOCK(ifp);
2306 #else
2307                 if_maddr_runlock(ifp);
2308 #endif
2309         }
2310         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2311                 rctl &= (~IXGBE_FCTRL_MPE);
2312         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2313
2314         if (ifp->if_flags & IFF_PROMISC) {
2315                 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2316                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2317         } else if (ifp->if_flags & IFF_ALLMULTI) {
2318                 rctl |= IXGBE_FCTRL_MPE;
2319                 rctl &= ~IXGBE_FCTRL_UPE;
2320                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2321         }
2322 } /* ixgbe_set_promisc */
2323
2324 /************************************************************************
2325  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2326  ************************************************************************/
2327 static void
2328 ixgbe_msix_link(void *arg)
2329 {
2330         struct adapter  *adapter = arg;
2331         struct ixgbe_hw *hw = &adapter->hw;
2332         u32             eicr, eicr_mask;
2333         s32             retval;
2334
2335         ++adapter->link_irq;
2336
2337         /* Pause other interrupts */
2338         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2339
2340         /* First get the cause */
2341         eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2342         /* Be sure the queue bits are not cleared */
2343         eicr &= ~IXGBE_EICR_RTX_QUEUE;
2344         /* Clear interrupt with write */
2345         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2346
2347         /* Link status change */
2348         if (eicr & IXGBE_EICR_LSC) {
2349                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2350                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
2351         }
2352
2353         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2354                 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2355                     (eicr & IXGBE_EICR_FLOW_DIR)) {
2356                         /* This is probably overkill :) */
2357                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2358                                 return;
2359                         /* Disable the interrupt */
2360                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2361                         taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
2362                 }
2363
2364                 if (eicr & IXGBE_EICR_ECC) {
2365                         device_printf(adapter->dev,
2366                             "CRITICAL: ECC ERROR!!  Please Reboot!!\n");
2367                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2368                 }
2369
2370                 /* Check for over temp condition */
2371                 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2372                         switch (adapter->hw.mac.type) {
2373                         case ixgbe_mac_X550EM_a:
2374                                 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2375                                         break;
2376                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2377                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
2378                                 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2379                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
2380                                 retval = hw->phy.ops.check_overtemp(hw);
2381                                 if (retval != IXGBE_ERR_OVERTEMP)
2382                                         break;
2383                                 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2384                                 device_printf(adapter->dev, "System shutdown required!\n");
2385                                 break;
2386                         default:
2387                                 if (!(eicr & IXGBE_EICR_TS))
2388                                         break;
2389                                 retval = hw->phy.ops.check_overtemp(hw);
2390                                 if (retval != IXGBE_ERR_OVERTEMP)
2391                                         break;
2392                                 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2393                                 device_printf(adapter->dev, "System shutdown required!\n");
2394                                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2395                                 break;
2396                         }
2397                 }
2398
2399                 /* Check for VF message */
2400                 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2401                     (eicr & IXGBE_EICR_MAILBOX))
2402                         taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
2403         }
2404
2405         if (ixgbe_is_sfp(hw)) {
2406                 /* Pluggable optics-related interrupt */
2407                 if (hw->mac.type >= ixgbe_mac_X540)
2408                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2409                 else
2410                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2411
2412                 if (eicr & eicr_mask) {
2413                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2414                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2415                 }
2416
2417                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2418                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2419                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
2420                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2421                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2422                 }
2423         }
2424
2425         /* Check for fan failure */
2426         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2427                 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2428                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2429         }
2430
2431         /* External PHY interrupt */
2432         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2433             (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2434                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2435                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
2436         }
2437
2438         /* Re-enable other interrupts */
2439         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2440 } /* ixgbe_msix_link */
2441
2442 /************************************************************************
2443  * ixgbe_sysctl_interrupt_rate_handler
2444  ************************************************************************/
2445 static int
2446 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2447 {
2448         struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
2449         int             error;
2450         unsigned int    reg, usec, rate;
2451
2452         reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2453         usec = ((reg & 0x0FF8) >> 3);
2454         if (usec > 0)
2455                 rate = 500000 / usec;
2456         else
2457                 rate = 0;
2458         error = sysctl_handle_int(oidp, &rate, 0, req);
2459         if (error || !req->newptr)
2460                 return error;
2461         reg &= ~0xfff; /* default, no limitation */
2462         ixgbe_max_interrupt_rate = 0;
2463         if (rate > 0 && rate < 500000) {
2464                 if (rate < 1000)
2465                         rate = 1000;
2466                 ixgbe_max_interrupt_rate = rate;
2467                 reg |= ((4000000/rate) & 0xff8);
2468         }
2469         IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2470
2471         return (0);
2472 } /* ixgbe_sysctl_interrupt_rate_handler */
2473
2474 /************************************************************************
2475  * ixgbe_add_device_sysctls
2476  ************************************************************************/
2477 static void
2478 ixgbe_add_device_sysctls(struct adapter *adapter)
2479 {
2480         device_t               dev = adapter->dev;
2481         struct ixgbe_hw        *hw = &adapter->hw;
2482         struct sysctl_oid_list *child;
2483         struct sysctl_ctx_list *ctx;
2484
2485         ctx = device_get_sysctl_ctx(dev);
2486         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2487
2488         /* Sysctls for all devices */
2489         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
2490             adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
2491
2492         adapter->enable_aim = ixgbe_enable_aim;
2493         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2494             &adapter->enable_aim, 1, "Interrupt Moderation");
2495
2496         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
2497             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2498             IXGBE_SYSCTL_DESC_ADV_SPEED);
2499
2500 #ifdef IXGBE_DEBUG
2501         /* testing sysctls (for all devices) */
2502         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
2503             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2504             "I", "PCI Power State");
2505
2506         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
2507             CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2508             ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2509 #endif
2510         /* for X550 series devices */
2511         if (hw->mac.type >= ixgbe_mac_X550)
2512                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
2513                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2514                     "I", "DMA Coalesce");
2515
2516         /* for WoL-capable devices */
2517         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2518                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
2519                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2520                     ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2521
2522                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
2523                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2524                     "I", "Enable/Disable Wake Up Filters");
2525         }
2526
2527         /* for X552/X557-AT devices */
2528         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2529                 struct sysctl_oid *phy_node;
2530                 struct sysctl_oid_list *phy_list;
2531
2532                 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
2533                     CTLFLAG_RD, NULL, "External PHY sysctls");
2534                 phy_list = SYSCTL_CHILDREN(phy_node);
2535
2536                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
2537                     CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2538                     "I", "Current External PHY Temperature (Celsius)");
2539
2540                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
2541                     CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
2542                     ixgbe_sysctl_phy_overtemp_occurred, "I",
2543                     "External PHY High Temperature Event Occurred");
2544         }
2545
2546         if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2547                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
2548                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2549                     ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2550         }
2551 } /* ixgbe_add_device_sysctls */
2552
2553 /************************************************************************
2554  * ixgbe_allocate_pci_resources
2555  ************************************************************************/
2556 static int
2557 ixgbe_allocate_pci_resources(struct adapter *adapter)
2558 {
2559         device_t dev = adapter->dev;
2560         int      rid;
2561
2562         rid = PCIR_BAR(0);
2563         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2564             RF_ACTIVE);
2565
2566         if (!(adapter->pci_mem)) {
2567                 device_printf(dev, "Unable to allocate bus resource: memory\n");
2568                 return (ENXIO);
2569         }
2570
2571         /* Save bus_space values for READ/WRITE_REG macros */
2572         adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2573         adapter->osdep.mem_bus_space_handle =
2574             rman_get_bushandle(adapter->pci_mem);
2575         /* Set hw values for shared code */
2576         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2577
2578         return (0);
2579 } /* ixgbe_allocate_pci_resources */
2580
2581 /************************************************************************
2582  * ixgbe_detach - Device removal routine
2583  *
2584  *   Called when the driver is being removed.
2585  *   Stops the adapter and deallocates all the resources
2586  *   that were allocated for driver operation.
2587  *
2588  *   return 0 on success, positive on failure
2589  ************************************************************************/
2590 static int
2591 ixgbe_detach(device_t dev)
2592 {
2593         struct adapter  *adapter = device_get_softc(dev);
2594         struct ix_queue *que = adapter->queues;
2595         struct tx_ring  *txr = adapter->tx_rings;
2596         u32             ctrl_ext;
2597
2598         INIT_DEBUGOUT("ixgbe_detach: begin");
2599
2600         /* Make sure VLANS are not using driver */
2601         if (adapter->ifp->if_vlantrunk != NULL) {
2602                 device_printf(dev, "Vlan in use, detach first\n");
2603                 return (EBUSY);
2604         }
2605
2606         if (ixgbe_pci_iov_detach(dev) != 0) {
2607                 device_printf(dev, "SR-IOV in use; detach first.\n");
2608                 return (EBUSY);
2609         }
2610
2611         ether_ifdetach(adapter->ifp);
2612         /* Stop the adapter */
2613         IXGBE_CORE_LOCK(adapter);
2614         ixgbe_setup_low_power_mode(adapter);
2615         IXGBE_CORE_UNLOCK(adapter);
2616
2617         for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
2618                 if (que->tq) {
2619                         if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2620                                 taskqueue_drain(que->tq, &txr->txq_task);
2621                         taskqueue_drain(que->tq, &que->que_task);
2622                         taskqueue_free(que->tq);
2623                 }
2624         }
2625
2626         /* Drain the Link queue */
2627         if (adapter->tq) {
2628                 taskqueue_drain(adapter->tq, &adapter->link_task);
2629                 taskqueue_drain(adapter->tq, &adapter->mod_task);
2630                 taskqueue_drain(adapter->tq, &adapter->msf_task);
2631                 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2632                         taskqueue_drain(adapter->tq, &adapter->mbx_task);
2633                 taskqueue_drain(adapter->tq, &adapter->phy_task);
2634                 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2635                         taskqueue_drain(adapter->tq, &adapter->fdir_task);
2636                 taskqueue_free(adapter->tq);
2637         }
2638
2639         /* let hardware know driver is unloading */
2640         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2641         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2642         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2643
2644         /* Unregister VLAN events */
2645         if (adapter->vlan_attach != NULL)
2646                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
2647         if (adapter->vlan_detach != NULL)
2648                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
2649
2650         callout_drain(&adapter->timer);
2651
2652         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
2653                 netmap_detach(adapter->ifp);
2654
2655         ixgbe_free_pci_resources(adapter);
2656         bus_generic_detach(dev);
2657         if_free(adapter->ifp);
2658
2659         ixgbe_free_transmit_structures(adapter);
2660         ixgbe_free_receive_structures(adapter);
2661         free(adapter->queues, M_DEVBUF);
2662         free(adapter->mta, M_IXGBE);
2663
2664         IXGBE_CORE_LOCK_DESTROY(adapter);
2665
2666         return (0);
2667 } /* ixgbe_detach */
2668
2669 /************************************************************************
2670  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2671  *
2672  *   Prepare the adapter/port for LPLU and/or WoL
2673  ************************************************************************/
2674 static int
2675 ixgbe_setup_low_power_mode(struct adapter *adapter)
2676 {
2677         struct ixgbe_hw *hw = &adapter->hw;
2678         device_t        dev = adapter->dev;
2679         s32             error = 0;
2680
2681         mtx_assert(&adapter->core_mtx, MA_OWNED);
2682
2683         /* Limit power management flow to X550EM baseT */
2684         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2685             hw->phy.ops.enter_lplu) {
2686                 /* Turn off support for APM wakeup. (Using ACPI instead) */
2687                 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2688                     IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2689
2690                 /*
2691                  * Clear Wake Up Status register to prevent any previous wakeup
2692                  * events from waking us up immediately after we suspend.
2693                  */
2694                 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2695
2696                 /*
2697                  * Program the Wakeup Filter Control register with user filter
2698                  * settings
2699                  */
2700                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2701
2702                 /* Enable wakeups and power management in Wakeup Control */
2703                 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2704                     IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2705
2706                 /* X550EM baseT adapters need a special LPLU flow */
2707                 hw->phy.reset_disable = true;
2708                 ixgbe_stop(adapter);
2709                 error = hw->phy.ops.enter_lplu(hw);
2710                 if (error)
2711                         device_printf(dev, "Error entering LPLU: %d\n", error);
2712                 hw->phy.reset_disable = false;
2713         } else {
2714                 /* Just stop for other adapters */
2715                 ixgbe_stop(adapter);
2716         }
2717
2718         return error;
2719 } /* ixgbe_setup_low_power_mode */
2720
2721 /************************************************************************
2722  * ixgbe_shutdown - Shutdown entry point
2723  ************************************************************************/
2724 static int
2725 ixgbe_shutdown(device_t dev)
2726 {
2727         struct adapter *adapter = device_get_softc(dev);
2728         int            error = 0;
2729
2730         INIT_DEBUGOUT("ixgbe_shutdown: begin");
2731
2732         IXGBE_CORE_LOCK(adapter);
2733         error = ixgbe_setup_low_power_mode(adapter);
2734         IXGBE_CORE_UNLOCK(adapter);
2735
2736         return (error);
2737 } /* ixgbe_shutdown */
2738
2739 /************************************************************************
2740  * ixgbe_suspend
2741  *
2742  *   From D0 to D3
2743  ************************************************************************/
2744 static int
2745 ixgbe_suspend(device_t dev)
2746 {
2747         struct adapter *adapter = device_get_softc(dev);
2748         int            error = 0;
2749
2750         INIT_DEBUGOUT("ixgbe_suspend: begin");
2751
2752         IXGBE_CORE_LOCK(adapter);
2753
2754         error = ixgbe_setup_low_power_mode(adapter);
2755
2756         IXGBE_CORE_UNLOCK(adapter);
2757
2758         return (error);
2759 } /* ixgbe_suspend */
2760
2761 /************************************************************************
2762  * ixgbe_resume
2763  *
2764  *   From D3 to D0
2765  ************************************************************************/
2766 static int
2767 ixgbe_resume(device_t dev)
2768 {
2769         struct adapter  *adapter = device_get_softc(dev);
2770         struct ifnet    *ifp = adapter->ifp;
2771         struct ixgbe_hw *hw = &adapter->hw;
2772         u32             wus;
2773
2774         INIT_DEBUGOUT("ixgbe_resume: begin");
2775
2776         IXGBE_CORE_LOCK(adapter);
2777
2778         /* Read & clear WUS register */
2779         wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2780         if (wus)
2781                 device_printf(dev, "Woken up by (WUS): %#010x\n",
2782                     IXGBE_READ_REG(hw, IXGBE_WUS));
2783         IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2784         /* And clear WUFC until next low-power transition */
2785         IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2786
2787         /*
2788          * Required after D3->D0 transition;
2789          * will re-advertise all previous advertised speeds
2790          */
2791         if (ifp->if_flags & IFF_UP)
2792                 ixgbe_init_locked(adapter);
2793
2794         IXGBE_CORE_UNLOCK(adapter);
2795
2796         return (0);
2797 } /* ixgbe_resume */
2798
2799 /************************************************************************
2800  * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
2801  *
2802  *   Takes the ifnet's if_capenable flags (e.g. set by the user using
2803  *   ifconfig) and indicates to the OS via the ifnet's if_hwassist
2804  *   field what mbuf offload flags the driver will understand.
2805  ************************************************************************/
2806 static void
2807 ixgbe_set_if_hwassist(struct adapter *adapter)
2808 {
2809         struct ifnet *ifp = adapter->ifp;
2810
2811         ifp->if_hwassist = 0;
2812 #if __FreeBSD_version >= 1000000
2813         if (ifp->if_capenable & IFCAP_TSO4)
2814                 ifp->if_hwassist |= CSUM_IP_TSO;
2815         if (ifp->if_capenable & IFCAP_TSO6)
2816                 ifp->if_hwassist |= CSUM_IP6_TSO;
2817         if (ifp->if_capenable & IFCAP_TXCSUM) {
2818                 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
2819                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2820                         ifp->if_hwassist |= CSUM_IP_SCTP;
2821         }
2822         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
2823                 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
2824                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2825                         ifp->if_hwassist |= CSUM_IP6_SCTP;
2826         }
2827 #else
2828         if (ifp->if_capenable & IFCAP_TSO)
2829                 ifp->if_hwassist |= CSUM_TSO;
2830         if (ifp->if_capenable & IFCAP_TXCSUM) {
2831                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
2832                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2833                         ifp->if_hwassist |= CSUM_SCTP;
2834         }
2835 #endif
2836 } /* ixgbe_set_if_hwassist */
2837
2838 /************************************************************************
2839  * ixgbe_init_locked - Init entry point
2840  *
2841  *   Used in two ways: It is used by the stack as an init
2842  *   entry point in network interface structure. It is also
2843  *   used by the driver as a hw/sw initialization routine to
2844  *   get to a consistent state.
2845  *
2846  *   return 0 on success, positive on failure
2847  ************************************************************************/
2848 void
2849 ixgbe_init_locked(struct adapter *adapter)
2850 {
2851         struct ifnet    *ifp = adapter->ifp;
2852         device_t        dev = adapter->dev;
2853         struct ixgbe_hw *hw = &adapter->hw;
2854         struct tx_ring  *txr;
2855         struct rx_ring  *rxr;
2856         u32             txdctl, mhadd;
2857         u32             rxdctl, rxctrl;
2858         u32             ctrl_ext;
2859         int             err = 0;
2860
2861         mtx_assert(&adapter->core_mtx, MA_OWNED);
2862         INIT_DEBUGOUT("ixgbe_init_locked: begin");
2863
2864         hw->adapter_stopped = FALSE;
2865         ixgbe_stop_adapter(hw);
2866         callout_stop(&adapter->timer);
2867
2868         /* Queue indices may change with IOV mode */
2869         ixgbe_align_all_queue_indices(adapter);
2870
2871         /* reprogram the RAR[0] in case user changed it. */
2872         ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2873
2874         /* Get the latest mac address, User can use a LAA */
2875         bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2876         ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2877         hw->addr_ctrl.rar_used_count = 1;
2878
2879         /* Set hardware offload abilities from ifnet flags */
2880         ixgbe_set_if_hwassist(adapter);
2881
2882         /* Prepare transmit descriptors and buffers */
2883         if (ixgbe_setup_transmit_structures(adapter)) {
2884                 device_printf(dev, "Could not setup transmit structures\n");
2885                 ixgbe_stop(adapter);
2886                 return;
2887         }
2888
2889         ixgbe_init_hw(hw);
2890         ixgbe_initialize_iov(adapter);
2891         ixgbe_initialize_transmit_units(adapter);
2892
2893         /* Setup Multicast table */
2894         ixgbe_set_multi(adapter);
2895
2896         /* Determine the correct mbuf pool, based on frame size */
2897         if (adapter->max_frame_size <= MCLBYTES)
2898                 adapter->rx_mbuf_sz = MCLBYTES;
2899         else
2900                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
2901
2902         /* Prepare receive descriptors and buffers */
2903         if (ixgbe_setup_receive_structures(adapter)) {
2904                 device_printf(dev, "Could not setup receive structures\n");
2905                 ixgbe_stop(adapter);
2906                 return;
2907         }
2908
2909         /* Configure RX settings */
2910         ixgbe_initialize_receive_units(adapter);
2911
2912         /* Enable SDP & MSI-X interrupts based on adapter */
2913         ixgbe_config_gpie(adapter);
2914
2915         /* Set MTU size */
2916         if (ifp->if_mtu > ETHERMTU) {
2917                 /* aka IXGBE_MAXFRS on 82599 and newer */
2918                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2919                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2920                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2921                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2922         }
2923
2924         /* Now enable all the queues */
2925         for (int i = 0; i < adapter->num_queues; i++) {
2926                 txr = &adapter->tx_rings[i];
2927                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2928                 txdctl |= IXGBE_TXDCTL_ENABLE;
2929                 /* Set WTHRESH to 8, burst writeback */
2930                 txdctl |= (8 << 16);
2931                 /*
2932                  * When the internal queue falls below PTHRESH (32),
2933                  * start prefetching as long as there are at least
2934                  * HTHRESH (1) buffers ready. The values are taken
2935                  * from the Intel linux driver 3.8.21.
2936                  * Prefetching enables tx line rate even with 1 queue.
2937                  */
2938                 txdctl |= (32 << 0) | (1 << 8);
2939                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2940         }
2941
2942         for (int i = 0, j = 0; i < adapter->num_queues; i++) {
2943                 rxr = &adapter->rx_rings[i];
2944                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2945                 if (hw->mac.type == ixgbe_mac_82598EB) {
2946                         /*
2947                          * PTHRESH = 21
2948                          * HTHRESH = 4
2949                          * WTHRESH = 8
2950                          */
2951                         rxdctl &= ~0x3FFFFF;
2952                         rxdctl |= 0x080420;
2953                 }
2954                 rxdctl |= IXGBE_RXDCTL_ENABLE;
2955                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2956                 for (; j < 10; j++) {
2957                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2958                             IXGBE_RXDCTL_ENABLE)
2959                                 break;
2960                         else
2961                                 msec_delay(1);
2962                 }
2963                 wmb();
2964
2965                 /*
2966                  * In netmap mode, we must preserve the buffers made
2967                  * available to userspace before the if_init()
2968                  * (this is true by default on the TX side, because
2969                  * init makes all buffers available to userspace).
2970                  *
2971                  * netmap_reset() and the device specific routines
2972                  * (e.g. ixgbe_setup_receive_rings()) map these
2973                  * buffers at the end of the NIC ring, so here we
2974                  * must set the RDT (tail) register to make sure
2975                  * they are not overwritten.
2976                  *
2977                  * In this driver the NIC ring starts at RDH = 0,
2978                  * RDT points to the last slot available for reception (?),
2979                  * so RDT = num_rx_desc - 1 means the whole ring is available.
2980                  */
2981 #ifdef DEV_NETMAP
2982                 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
2983                     (ifp->if_capenable & IFCAP_NETMAP)) {
2984                         struct netmap_adapter *na = NA(adapter->ifp);
2985                         struct netmap_kring *kring = &na->rx_rings[i];
2986                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2987
2988                         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
2989                 } else
2990 #endif /* DEV_NETMAP */
2991                         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
2992                             adapter->num_rx_desc - 1);
2993         }
2994
2995         /* Enable Receive engine */
2996         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2997         if (hw->mac.type == ixgbe_mac_82598EB)
2998                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
2999         rxctrl |= IXGBE_RXCTRL_RXEN;
3000         ixgbe_enable_rx_dma(hw, rxctrl);
3001
3002         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3003
3004         /* Set up MSI-X routing */
3005         if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3006                 ixgbe_configure_ivars(adapter);
3007                 /* Set up auto-mask */
3008                 if (hw->mac.type == ixgbe_mac_82598EB)
3009                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3010                 else {
3011                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3012                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3013                 }
3014         } else {  /* Simple settings for Legacy/MSI */
3015                 ixgbe_set_ivar(adapter, 0, 0, 0);
3016                 ixgbe_set_ivar(adapter, 0, 0, 1);
3017                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3018         }
3019
3020         ixgbe_init_fdir(adapter);
3021
3022         /*
3023          * Check on any SFP devices that
3024          * need to be kick-started
3025          */
3026         if (hw->phy.type == ixgbe_phy_none) {
3027                 err = hw->phy.ops.identify(hw);
3028                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3029                         device_printf(dev,
3030                             "Unsupported SFP+ module type was detected.\n");
3031                         return;
3032                 }
3033         }
3034
3035         /* Set moderation on the Link interrupt */
3036         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3037
3038         /* Config/Enable Link */
3039         ixgbe_config_link(adapter);
3040
3041         /* Hardware Packet Buffer & Flow Control setup */
3042         ixgbe_config_delay_values(adapter);
3043
3044         /* Initialize the FC settings */
3045         ixgbe_start_hw(hw);
3046
3047         /* Set up VLAN support and filter */
3048         ixgbe_setup_vlan_hw_support(adapter);
3049
3050         /* Setup DMA Coalescing */
3051         ixgbe_config_dmac(adapter);
3052
3053         /* And now turn on interrupts */
3054         ixgbe_enable_intr(adapter);
3055
3056         /* Enable the use of the MBX by the VF's */
3057         if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3058                 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3059                 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3060                 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3061         }
3062
3063         /* Now inform the stack we're ready */
3064         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3065
3066         return;
3067 } /* ixgbe_init_locked */
3068
3069 /************************************************************************
3070  * ixgbe_init
3071  ************************************************************************/
3072 static void
3073 ixgbe_init(void *arg)
3074 {
3075         struct adapter *adapter = arg;
3076
3077         IXGBE_CORE_LOCK(adapter);
3078         ixgbe_init_locked(adapter);
3079         IXGBE_CORE_UNLOCK(adapter);
3080
3081         return;
3082 } /* ixgbe_init */
3083
3084 /************************************************************************
3085  * ixgbe_set_ivar
3086  *
3087  *   Setup the correct IVAR register for a particular MSI-X interrupt
3088  *     (yes this is all very magic and confusing :)
3089  *    - entry is the register array entry
3090  *    - vector is the MSI-X vector for this queue
3091  *    - type is RX/TX/MISC
3092  ************************************************************************/
3093 static void
3094 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3095 {
3096         struct ixgbe_hw *hw = &adapter->hw;
3097         u32 ivar, index;
3098
3099         vector |= IXGBE_IVAR_ALLOC_VAL;
3100
3101         switch (hw->mac.type) {
3102
3103         case ixgbe_mac_82598EB:
3104                 if (type == -1)
3105                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3106                 else
3107                         entry += (type * 64);
3108                 index = (entry >> 2) & 0x1F;
3109                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3110                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3111                 ivar |= (vector << (8 * (entry & 0x3)));
3112                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3113                 break;
3114
3115         case ixgbe_mac_82599EB:
3116         case ixgbe_mac_X540:
3117         case ixgbe_mac_X550:
3118         case ixgbe_mac_X550EM_x:
3119         case ixgbe_mac_X550EM_a:
3120                 if (type == -1) { /* MISC IVAR */
3121                         index = (entry & 1) * 8;
3122                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3123                         ivar &= ~(0xFF << index);
3124                         ivar |= (vector << index);
3125                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3126                 } else {          /* RX/TX IVARS */
3127                         index = (16 * (entry & 1)) + (8 * type);
3128                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3129                         ivar &= ~(0xFF << index);
3130                         ivar |= (vector << index);
3131                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3132                 }
3133
3134         default:
3135                 break;
3136         }
3137 } /* ixgbe_set_ivar */
3138
3139 /************************************************************************
3140  * ixgbe_configure_ivars
3141  ************************************************************************/
3142 static void
3143 ixgbe_configure_ivars(struct adapter *adapter)
3144 {
3145         struct ix_queue *que = adapter->queues;
3146         u32             newitr;
3147
3148         if (ixgbe_max_interrupt_rate > 0)
3149                 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3150         else {
3151                 /*
3152                  * Disable DMA coalescing if interrupt moderation is
3153                  * disabled.
3154                  */
3155                 adapter->dmac = 0;
3156                 newitr = 0;
3157         }
3158
3159         for (int i = 0; i < adapter->num_queues; i++, que++) {
3160                 struct rx_ring *rxr = &adapter->rx_rings[i];
3161                 struct tx_ring *txr = &adapter->tx_rings[i];
3162                 /* First the RX queue entry */
3163                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3164                 /* ... and the TX */
3165                 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3166                 /* Set an Initial EITR value */
3167                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3168         }
3169
3170         /* For the Link interrupt */
3171         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3172 } /* ixgbe_configure_ivars */
3173
3174 /************************************************************************
3175  * ixgbe_config_gpie
3176  ************************************************************************/
3177 static void
3178 ixgbe_config_gpie(struct adapter *adapter)
3179 {
3180         struct ixgbe_hw *hw = &adapter->hw;
3181         u32             gpie;
3182
3183         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3184
3185         if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3186                 /* Enable Enhanced MSI-X mode */
3187                 gpie |= IXGBE_GPIE_MSIX_MODE
3188                      |  IXGBE_GPIE_EIAME
3189                      |  IXGBE_GPIE_PBA_SUPPORT
3190                      |  IXGBE_GPIE_OCD;
3191         }
3192
3193         /* Fan Failure Interrupt */
3194         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3195                 gpie |= IXGBE_SDP1_GPIEN;
3196
3197         /* Thermal Sensor Interrupt */
3198         if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3199                 gpie |= IXGBE_SDP0_GPIEN_X540;
3200
3201         /* Link detection */
3202         switch (hw->mac.type) {
3203         case ixgbe_mac_82599EB:
3204                 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3205                 break;
3206         case ixgbe_mac_X550EM_x:
3207         case ixgbe_mac_X550EM_a:
3208                 gpie |= IXGBE_SDP0_GPIEN_X540;
3209                 break;
3210         default:
3211                 break;
3212         }
3213
3214         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3215
3216         return;
3217 } /* ixgbe_config_gpie */
3218
3219 /************************************************************************
3220  * ixgbe_config_delay_values
3221  *
3222  *   Requires adapter->max_frame_size to be set.
3223  ************************************************************************/
3224 static void
3225 ixgbe_config_delay_values(struct adapter *adapter)
3226 {
3227         struct ixgbe_hw *hw = &adapter->hw;
3228         u32             rxpb, frame, size, tmp;
3229
3230         frame = adapter->max_frame_size;
3231
3232         /* Calculate High Water */
3233         switch (hw->mac.type) {
3234         case ixgbe_mac_X540:
3235         case ixgbe_mac_X550:
3236         case ixgbe_mac_X550EM_x:
3237         case ixgbe_mac_X550EM_a:
3238                 tmp = IXGBE_DV_X540(frame, frame);
3239                 break;
3240         default:
3241                 tmp = IXGBE_DV(frame, frame);
3242                 break;
3243         }
3244         size = IXGBE_BT2KB(tmp);
3245         rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3246         hw->fc.high_water[0] = rxpb - size;
3247
3248         /* Now calculate Low Water */
3249         switch (hw->mac.type) {
3250         case ixgbe_mac_X540:
3251         case ixgbe_mac_X550:
3252         case ixgbe_mac_X550EM_x:
3253         case ixgbe_mac_X550EM_a:
3254                 tmp = IXGBE_LOW_DV_X540(frame);
3255                 break;
3256         default:
3257                 tmp = IXGBE_LOW_DV(frame);
3258                 break;
3259         }
3260         hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3261
3262         hw->fc.pause_time = IXGBE_FC_PAUSE;
3263         hw->fc.send_xon = TRUE;
3264 } /* ixgbe_config_delay_values */
3265
3266 /************************************************************************
3267  * ixgbe_set_multi - Multicast Update
3268  *
3269  *   Called whenever multicast address list is updated.
3270  ************************************************************************/
3271 static void
3272 ixgbe_set_multi(struct adapter *adapter)
3273 {
3274         struct ifmultiaddr   *ifma;
3275         struct ixgbe_mc_addr *mta;
3276         struct ifnet         *ifp = adapter->ifp;
3277         u8                   *update_ptr;
3278         int                  mcnt = 0;
3279         u32                  fctrl;
3280
3281         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
3282
3283         mta = adapter->mta;
3284         bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3285
3286 #if __FreeBSD_version < 800000
3287         IF_ADDR_LOCK(ifp);
3288 #else
3289         if_maddr_rlock(ifp);
3290 #endif
3291         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3292                 if (ifma->ifma_addr->sa_family != AF_LINK)
3293                         continue;
3294                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3295                         break;
3296                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
3297                     mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3298                 mta[mcnt].vmdq = adapter->pool;
3299                 mcnt++;
3300         }
3301 #if __FreeBSD_version < 800000
3302         IF_ADDR_UNLOCK(ifp);
3303 #else
3304         if_maddr_runlock(ifp);
3305 #endif
3306
3307         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3308         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3309         if (ifp->if_flags & IFF_PROMISC)
3310                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3311         else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3312             ifp->if_flags & IFF_ALLMULTI) {
3313                 fctrl |= IXGBE_FCTRL_MPE;
3314                 fctrl &= ~IXGBE_FCTRL_UPE;
3315         } else
3316                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3317
3318         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3319
3320         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3321                 update_ptr = (u8 *)mta;
3322                 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3323                     ixgbe_mc_array_itr, TRUE);
3324         }
3325
3326         return;
3327 } /* ixgbe_set_multi */
3328
3329 /************************************************************************
3330  * ixgbe_mc_array_itr
3331  *
3332  *   An iterator function needed by the multicast shared code.
3333  *   It feeds the shared code routine the addresses in the
3334  *   array of ixgbe_set_multi() one by one.
3335  ************************************************************************/
3336 static u8 *
3337 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3338 {
3339         struct ixgbe_mc_addr *mta;
3340
3341         mta = (struct ixgbe_mc_addr *)*update_ptr;
3342         *vmdq = mta->vmdq;
3343
3344         *update_ptr = (u8*)(mta + 1);
3345
3346         return (mta->addr);
3347 } /* ixgbe_mc_array_itr */
3348
3349 /************************************************************************
3350  * ixgbe_local_timer - Timer routine
3351  *
3352  *   Checks for link status, updates statistics,
3353  *   and runs the watchdog check.
3354  ************************************************************************/
3355 static void
3356 ixgbe_local_timer(void *arg)
3357 {
3358         struct adapter  *adapter = arg;
3359         device_t        dev = adapter->dev;
3360         struct ix_queue *que = adapter->queues;
3361         u64             queues = 0;
3362         int             hung = 0;
3363
3364         mtx_assert(&adapter->core_mtx, MA_OWNED);
3365
3366         /* Check for pluggable optics */
3367         if (adapter->sfp_probe)
3368                 if (!ixgbe_sfp_probe(adapter))
3369                         goto out; /* Nothing to do */
3370
3371         ixgbe_update_link_status(adapter);
3372         ixgbe_update_stats_counters(adapter);
3373
3374         /*
3375          * Check the TX queues status
3376          *      - mark hung queues so we don't schedule on them
3377          *      - watchdog only if all queues show hung
3378          */
3379         for (int i = 0; i < adapter->num_queues; i++, que++) {
3380                 /* Keep track of queues with work for soft irq */
3381                 if (que->txr->busy)
3382                         queues |= ((u64)1 << que->me);
3383                 /*
3384                  * Each time txeof runs without cleaning, but there
3385                  * are uncleaned descriptors it increments busy. If
3386                  * we get to the MAX we declare it hung.
3387                  */
3388                 if (que->busy == IXGBE_QUEUE_HUNG) {
3389                         ++hung;
3390                         /* Mark the queue as inactive */
3391                         adapter->active_queues &= ~((u64)1 << que->me);
3392                         continue;
3393                 } else {
3394                         /* Check if we've come back from hung */
3395                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
3396                                 adapter->active_queues |= ((u64)1 << que->me);
3397                 }
3398                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
3399                         device_printf(dev,
3400                             "Warning queue %d appears to be hung!\n", i);
3401                         que->txr->busy = IXGBE_QUEUE_HUNG;
3402                         ++hung;
3403                 }
3404         }
3405
3406         /* Only truly watchdog if all queues show hung */
3407         if (hung == adapter->num_queues)
3408                 goto watchdog;
3409         else if (queues != 0) { /* Force an IRQ on queues with work */
3410                 ixgbe_rearm_queues(adapter, queues);
3411         }
3412
3413 out:
3414         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3415         return;
3416
3417 watchdog:
3418         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
3419         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3420         adapter->watchdog_events++;
3421         ixgbe_init_locked(adapter);
3422 } /* ixgbe_local_timer */
3423
3424 /************************************************************************
3425  * ixgbe_sfp_probe
3426  *
3427  *   Determine if a port had optics inserted.
3428  ************************************************************************/
3429 static bool
3430 ixgbe_sfp_probe(struct adapter *adapter)
3431 {
3432         struct ixgbe_hw *hw = &adapter->hw;
3433         device_t        dev = adapter->dev;
3434         bool            result = FALSE;
3435
3436         if ((hw->phy.type == ixgbe_phy_nl) &&
3437             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3438                 s32 ret = hw->phy.ops.identify_sfp(hw);
3439                 if (ret)
3440                         goto out;
3441                 ret = hw->phy.ops.reset(hw);
3442                 adapter->sfp_probe = FALSE;
3443                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3444                         device_printf(dev, "Unsupported SFP+ module detected!");
3445                         device_printf(dev,
3446                             "Reload driver with supported module.\n");
3447                         goto out;
3448                 } else
3449                         device_printf(dev, "SFP+ module detected!\n");
3450                 /* We now have supported optics */
3451                 result = TRUE;
3452         }
3453 out:
3454
3455         return (result);
3456 } /* ixgbe_sfp_probe */
3457
3458 /************************************************************************
3459  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3460  ************************************************************************/
3461 static void
3462 ixgbe_handle_mod(void *context, int pending)
3463 {
3464         struct adapter  *adapter = context;
3465         struct ixgbe_hw *hw = &adapter->hw;
3466         device_t        dev = adapter->dev;
3467         u32             err, cage_full = 0;
3468
3469         if (adapter->hw.need_crosstalk_fix) {
3470                 switch (hw->mac.type) {
3471                 case ixgbe_mac_82599EB:
3472                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3473                             IXGBE_ESDP_SDP2;
3474                         break;
3475                 case ixgbe_mac_X550EM_x:
3476                 case ixgbe_mac_X550EM_a:
3477                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3478                             IXGBE_ESDP_SDP0;
3479                         break;
3480                 default:
3481                         break;
3482                 }
3483
3484                 if (!cage_full)
3485                         return;
3486         }
3487
3488         err = hw->phy.ops.identify_sfp(hw);
3489         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3490                 device_printf(dev,
3491                     "Unsupported SFP+ module type was detected.\n");
3492                 return;
3493         }
3494
3495         err = hw->mac.ops.setup_sfp(hw);
3496         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3497                 device_printf(dev,
3498                     "Setup failure - unsupported SFP+ module type.\n");
3499                 return;
3500         }
3501         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3502 } /* ixgbe_handle_mod */
3503
3504
3505 /************************************************************************
3506  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3507  ************************************************************************/
3508 static void
3509 ixgbe_handle_msf(void *context, int pending)
3510 {
3511         struct adapter  *adapter = context;
3512         struct ixgbe_hw *hw = &adapter->hw;
3513         u32             autoneg;
3514         bool            negotiate;
3515
3516         /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3517         adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3518
3519         autoneg = hw->phy.autoneg_advertised;
3520         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3521                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3522         if (hw->mac.ops.setup_link)
3523                 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3524
3525         /* Adjust media types shown in ifconfig */
3526         ifmedia_removeall(&adapter->media);
3527         ixgbe_add_media_types(adapter);
3528         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3529 } /* ixgbe_handle_msf */
3530
3531 /************************************************************************
3532  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3533  ************************************************************************/
3534 static void
3535 ixgbe_handle_phy(void *context, int pending)
3536 {
3537         struct adapter  *adapter = context;
3538         struct ixgbe_hw *hw = &adapter->hw;
3539         int             error;
3540
3541         error = hw->phy.ops.handle_lasi(hw);
3542         if (error == IXGBE_ERR_OVERTEMP)
3543                 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3544         else if (error)
3545                 device_printf(adapter->dev,
3546                     "Error handling LASI interrupt: %d\n", error);
3547 } /* ixgbe_handle_phy */
3548
3549 /************************************************************************
3550  * ixgbe_stop - Stop the hardware
3551  *
3552  *   Disables all traffic on the adapter by issuing a
3553  *   global reset on the MAC and deallocates TX/RX buffers.
3554  ************************************************************************/
3555 static void
3556 ixgbe_stop(void *arg)
3557 {
3558         struct ifnet    *ifp;
3559         struct adapter  *adapter = arg;
3560         struct ixgbe_hw *hw = &adapter->hw;
3561
3562         ifp = adapter->ifp;
3563
3564         mtx_assert(&adapter->core_mtx, MA_OWNED);
3565
3566         INIT_DEBUGOUT("ixgbe_stop: begin\n");
3567         ixgbe_disable_intr(adapter);
3568         callout_stop(&adapter->timer);
3569
3570         /* Let the stack know...*/
3571         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3572
3573         ixgbe_reset_hw(hw);
3574         hw->adapter_stopped = FALSE;
3575         ixgbe_stop_adapter(hw);
3576         if (hw->mac.type == ixgbe_mac_82599EB)
3577                 ixgbe_stop_mac_link_on_d3_82599(hw);
3578         /* Turn off the laser - noop with no optics */
3579         ixgbe_disable_tx_laser(hw);
3580
3581         /* Update the stack */
3582         adapter->link_up = FALSE;
3583         ixgbe_update_link_status(adapter);
3584
3585         /* reprogram the RAR[0] in case user changed it. */
3586         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3587
3588         return;
3589 } /* ixgbe_stop */
3590
3591 /************************************************************************
3592  * ixgbe_update_link_status - Update OS on link state
3593  *
3594  * Note: Only updates the OS on the cached link state.
3595  *       The real check of the hardware only happens with
3596  *       a link interrupt.
3597  ************************************************************************/
3598 static void
3599 ixgbe_update_link_status(struct adapter *adapter)
3600 {
3601         struct ifnet *ifp = adapter->ifp;
3602         device_t     dev = adapter->dev;
3603
3604         if (adapter->link_up) {
3605                 if (adapter->link_active == FALSE) {
3606                         if (bootverbose)
3607                                 device_printf(dev, "Link is up %d Gbps %s \n",
3608                                     ((adapter->link_speed == 128) ? 10 : 1),
3609                                     "Full Duplex");
3610                         adapter->link_active = TRUE;
3611                         /* Update any Flow Control changes */
3612                         ixgbe_fc_enable(&adapter->hw);
3613                         /* Update DMA coalescing config */
3614                         ixgbe_config_dmac(adapter);
3615                         if_link_state_change(ifp, LINK_STATE_UP);
3616                         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3617                                 ixgbe_ping_all_vfs(adapter);
3618                 }
3619         } else { /* Link down */
3620                 if (adapter->link_active == TRUE) {
3621                         if (bootverbose)
3622                                 device_printf(dev, "Link is Down\n");
3623                         if_link_state_change(ifp, LINK_STATE_DOWN);
3624                         adapter->link_active = FALSE;
3625                         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3626                                 ixgbe_ping_all_vfs(adapter);
3627                 }
3628         }
3629
3630         return;
3631 } /* ixgbe_update_link_status */
3632
3633 /************************************************************************
3634  * ixgbe_config_dmac - Configure DMA Coalescing
3635  ************************************************************************/
3636 static void
3637 ixgbe_config_dmac(struct adapter *adapter)
3638 {
3639         struct ixgbe_hw          *hw = &adapter->hw;
3640         struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3641
3642         if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3643                 return;
3644
3645         if (dcfg->watchdog_timer ^ adapter->dmac ||
3646             dcfg->link_speed ^ adapter->link_speed) {
3647                 dcfg->watchdog_timer = adapter->dmac;
3648                 dcfg->fcoe_en = false;
3649                 dcfg->link_speed = adapter->link_speed;
3650                 dcfg->num_tcs = 1;
3651
3652                 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3653                     dcfg->watchdog_timer, dcfg->link_speed);
3654
3655                 hw->mac.ops.dmac_config(hw);
3656         }
3657 } /* ixgbe_config_dmac */
3658
3659 /************************************************************************
3660  * ixgbe_enable_intr
3661  ************************************************************************/
3662 static void
3663 ixgbe_enable_intr(struct adapter *adapter)
3664 {
3665         struct ixgbe_hw *hw = &adapter->hw;
3666         struct ix_queue *que = adapter->queues;
3667         u32             mask, fwsm;
3668
3669         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3670
3671         switch (adapter->hw.mac.type) {
3672         case ixgbe_mac_82599EB:
3673                 mask |= IXGBE_EIMS_ECC;
3674                 /* Temperature sensor on some adapters */
3675                 mask |= IXGBE_EIMS_GPI_SDP0;
3676                 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3677                 mask |= IXGBE_EIMS_GPI_SDP1;
3678                 mask |= IXGBE_EIMS_GPI_SDP2;
3679                 break;
3680         case ixgbe_mac_X540:
3681                 /* Detect if Thermal Sensor is enabled */
3682                 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3683                 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3684                         mask |= IXGBE_EIMS_TS;
3685                 mask |= IXGBE_EIMS_ECC;
3686                 break;
3687         case ixgbe_mac_X550:
3688                 /* MAC thermal sensor is automatically enabled */
3689                 mask |= IXGBE_EIMS_TS;
3690                 mask |= IXGBE_EIMS_ECC;
3691                 break;
3692         case ixgbe_mac_X550EM_x:
3693         case ixgbe_mac_X550EM_a:
3694                 /* Some devices use SDP0 for important information */
3695                 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3696                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3697                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3698                     hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3699                         mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3700                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3701                         mask |= IXGBE_EICR_GPI_SDP0_X540;
3702                 mask |= IXGBE_EIMS_ECC;
3703                 break;
3704         default:
3705                 break;
3706         }
3707
3708         /* Enable Fan Failure detection */
3709         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3710                 mask |= IXGBE_EIMS_GPI_SDP1;
3711         /* Enable SR-IOV */
3712         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3713                 mask |= IXGBE_EIMS_MAILBOX;
3714         /* Enable Flow Director */
3715         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3716                 mask |= IXGBE_EIMS_FLOW_DIR;
3717
3718         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3719
3720         /* With MSI-X we use auto clear */
3721         if (adapter->msix_mem) {
3722                 mask = IXGBE_EIMS_ENABLE_MASK;
3723                 /* Don't autoclear Link */
3724                 mask &= ~IXGBE_EIMS_OTHER;
3725                 mask &= ~IXGBE_EIMS_LSC;
3726                 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3727                         mask &= ~IXGBE_EIMS_MAILBOX;
3728                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3729         }
3730
3731         /*
3732          * Now enable all queues, this is done separately to
3733          * allow for handling the extended (beyond 32) MSI-X
3734          * vectors that can be used by 82599
3735          */
3736         for (int i = 0; i < adapter->num_queues; i++, que++)
3737                 ixgbe_enable_queue(adapter, que->msix);
3738
3739         IXGBE_WRITE_FLUSH(hw);
3740
3741         return;
3742 } /* ixgbe_enable_intr */
3743
3744 /************************************************************************
3745  * ixgbe_disable_intr
3746  ************************************************************************/
3747 static void
3748 ixgbe_disable_intr(struct adapter *adapter)
3749 {
3750         if (adapter->msix_mem)
3751                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3752         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3753                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3754         } else {
3755                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3756                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3757                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3758         }
3759         IXGBE_WRITE_FLUSH(&adapter->hw);
3760
3761         return;
3762 } /* ixgbe_disable_intr */
3763
3764 /************************************************************************
3765  * ixgbe_legacy_irq - Legacy Interrupt Service routine
3766  ************************************************************************/
3767 static void
3768 ixgbe_legacy_irq(void *arg)
3769 {
3770         struct ix_queue *que = arg;
3771         struct adapter  *adapter = que->adapter;
3772         struct ixgbe_hw *hw = &adapter->hw;
3773         struct ifnet    *ifp = adapter->ifp;
3774         struct tx_ring  *txr = adapter->tx_rings;
3775         bool            more = false;
3776         u32             eicr, eicr_mask;
3777
3778         /* Silicon errata #26 on 82598 */
3779         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3780
3781         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3782
3783         ++que->irqs;
3784         if (eicr == 0) {
3785                 ixgbe_enable_intr(adapter);
3786                 return;
3787         }
3788
3789         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3790                 more = ixgbe_rxeof(que);
3791
3792                 IXGBE_TX_LOCK(txr);
3793                 ixgbe_txeof(txr);
3794                 if (!ixgbe_ring_empty(ifp, txr->br))
3795                         ixgbe_start_locked(ifp, txr);
3796                 IXGBE_TX_UNLOCK(txr);
3797         }
3798
3799         /* Check for fan failure */
3800         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3801                 ixgbe_check_fan_failure(adapter, eicr, true);
3802                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3803         }
3804
3805         /* Link status change */
3806         if (eicr & IXGBE_EICR_LSC)
3807                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
3808
3809         if (ixgbe_is_sfp(hw)) {
3810                 /* Pluggable optics-related interrupt */
3811                 if (hw->mac.type >= ixgbe_mac_X540)
3812                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3813                 else
3814                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3815
3816                 if (eicr & eicr_mask) {
3817                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3818                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
3819                 }
3820
3821                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3822                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3823                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
3824                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3825                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3826                 }
3827         }
3828
3829         /* External PHY interrupt */
3830         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3831             (eicr & IXGBE_EICR_GPI_SDP0_X540))
3832                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
3833
3834         if (more)
3835                 taskqueue_enqueue(que->tq, &que->que_task);
3836         else
3837                 ixgbe_enable_intr(adapter);
3838
3839         return;
3840 } /* ixgbe_legacy_irq */
3841
3842 /************************************************************************
3843  * ixgbe_free_pci_resources
3844  ************************************************************************/
3845 static void
3846 ixgbe_free_pci_resources(struct adapter *adapter)
3847 {
3848         struct ix_queue *que = adapter->queues;
3849         device_t        dev = adapter->dev;
3850         int             rid, memrid;
3851
3852         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3853                 memrid = PCIR_BAR(MSIX_82598_BAR);
3854         else
3855                 memrid = PCIR_BAR(MSIX_82599_BAR);
3856
3857         /*
3858          * There is a slight possibility of a failure mode
3859          * in attach that will result in entering this function
3860          * before interrupt resources have been initialized, and
3861          * in that case we do not want to execute the loops below
3862          * We can detect this reliably by the state of the adapter
3863          * res pointer.
3864          */
3865         if (adapter->res == NULL)
3866                 goto mem;
3867
3868         /*
3869          * Release all msix queue resources:
3870          */
3871         for (int i = 0; i < adapter->num_queues; i++, que++) {
3872                 rid = que->msix + 1;
3873                 if (que->tag != NULL) {
3874                         bus_teardown_intr(dev, que->res, que->tag);
3875                         que->tag = NULL;
3876                 }
3877                 if (que->res != NULL)
3878                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
3879         }
3880
3881
3882         if (adapter->tag != NULL) {
3883                 bus_teardown_intr(dev, adapter->res, adapter->tag);
3884                 adapter->tag = NULL;
3885         }
3886
3887         /* Clean the Legacy or Link interrupt last */
3888         if (adapter->res != NULL)
3889                 bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
3890                     adapter->res);
3891
3892 mem:
3893         if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
3894             (adapter->feat_en & IXGBE_FEATURE_MSIX))
3895                 pci_release_msi(dev);
3896
3897         if (adapter->msix_mem != NULL)
3898                 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
3899                     adapter->msix_mem);
3900
3901         if (adapter->pci_mem != NULL)
3902                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3903                     adapter->pci_mem);
3904
3905         return;
3906 } /* ixgbe_free_pci_resources */
3907
3908 /************************************************************************
3909  * ixgbe_set_sysctl_value
3910  ************************************************************************/
3911 static void
3912 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
3913     const char *description, int *limit, int value)
3914 {
3915         *limit = value;
3916         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3917             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3918             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
3919 } /* ixgbe_set_sysctl_value */
3920
3921 /************************************************************************
3922  * ixgbe_sysctl_flowcntl
3923  *
3924  *   SYSCTL wrapper around setting Flow Control
3925  ************************************************************************/
3926 static int
3927 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3928 {
3929         struct adapter *adapter;
3930         int            error, fc;
3931
3932         adapter = (struct adapter *)arg1;
3933         fc = adapter->hw.fc.current_mode;
3934
3935         error = sysctl_handle_int(oidp, &fc, 0, req);
3936         if ((error) || (req->newptr == NULL))
3937                 return (error);
3938
3939         /* Don't bother if it's not changed */
3940         if (fc == adapter->hw.fc.current_mode)
3941                 return (0);
3942
3943         return ixgbe_set_flowcntl(adapter, fc);
3944 } /* ixgbe_sysctl_flowcntl */
3945
3946 /************************************************************************
3947  * ixgbe_set_flowcntl - Set flow control
3948  *
3949  *   Flow control values:
3950  *     0 - off
3951  *     1 - rx pause
3952  *     2 - tx pause
3953  *     3 - full
3954  ************************************************************************/
3955 static int
3956 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3957 {
3958         switch (fc) {
3959         case ixgbe_fc_rx_pause:
3960         case ixgbe_fc_tx_pause:
3961         case ixgbe_fc_full:
3962                 adapter->hw.fc.requested_mode = fc;
3963                 if (adapter->num_queues > 1)
3964                         ixgbe_disable_rx_drop(adapter);
3965                 break;
3966         case ixgbe_fc_none:
3967                 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3968                 if (adapter->num_queues > 1)
3969                         ixgbe_enable_rx_drop(adapter);
3970                 break;
3971         default:
3972                 return (EINVAL);
3973         }
3974
3975         /* Don't autoneg if forcing a value */
3976         adapter->hw.fc.disable_fc_autoneg = TRUE;
3977         ixgbe_fc_enable(&adapter->hw);
3978
3979         return (0);
3980 } /* ixgbe_set_flowcntl */
3981
3982 /************************************************************************
3983  * ixgbe_enable_rx_drop
3984  *
3985  *   Enable the hardware to drop packets when the buffer is
3986  *   full. This is useful with multiqueue, so that no single
3987  *   queue being full stalls the entire RX engine. We only
3988  *   enable this when Multiqueue is enabled AND Flow Control
3989  *   is disabled.
3990  ************************************************************************/
3991 static void
3992 ixgbe_enable_rx_drop(struct adapter *adapter)
3993 {
3994         struct ixgbe_hw *hw = &adapter->hw;
3995         struct rx_ring  *rxr;
3996         u32             srrctl;
3997
3998         for (int i = 0; i < adapter->num_queues; i++) {
3999                 rxr = &adapter->rx_rings[i];
4000                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4001                 srrctl |= IXGBE_SRRCTL_DROP_EN;
4002                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4003         }
4004
4005         /* enable drop for each vf */
4006         for (int i = 0; i < adapter->num_vfs; i++) {
4007                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4008                     (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4009                     IXGBE_QDE_ENABLE));
4010         }
4011 } /* ixgbe_enable_rx_drop */
4012
4013 /************************************************************************
4014  * ixgbe_disable_rx_drop
4015  ************************************************************************/
4016 static void
4017 ixgbe_disable_rx_drop(struct adapter *adapter)
4018 {
4019         struct ixgbe_hw *hw = &adapter->hw;
4020         struct rx_ring  *rxr;
4021         u32             srrctl;
4022
4023         for (int i = 0; i < adapter->num_queues; i++) {
4024                 rxr = &adapter->rx_rings[i];
4025                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4026                 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4027                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4028         }
4029
4030         /* disable drop for each vf */
4031         for (int i = 0; i < adapter->num_vfs; i++) {
4032                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4033                     (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4034         }
4035 } /* ixgbe_disable_rx_drop */
4036
4037 /************************************************************************
4038  * ixgbe_sysctl_advertise
4039  *
4040  *   SYSCTL wrapper around setting advertised speed
4041  ************************************************************************/
4042 static int
4043 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4044 {
4045         struct adapter *adapter;
4046         int            error, advertise;
4047
4048         adapter = (struct adapter *)arg1;
4049         advertise = adapter->advertise;
4050
4051         error = sysctl_handle_int(oidp, &advertise, 0, req);
4052         if ((error) || (req->newptr == NULL))
4053                 return (error);
4054
4055         return ixgbe_set_advertise(adapter, advertise);
4056 } /* ixgbe_sysctl_advertise */
4057
4058 /************************************************************************
4059  * ixgbe_set_advertise - Control advertised link speed
4060  *
4061  *   Flags:
4062  *     0x1 - advertise 100 Mb
4063  *     0x2 - advertise 1G
4064  *     0x4 - advertise 10G
4065  *     0x8 - advertise 10 Mb (yes, Mb)
4066  ************************************************************************/
4067 static int
4068 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4069 {
4070         device_t         dev;
4071         struct ixgbe_hw  *hw;
4072         ixgbe_link_speed speed = 0;
4073         ixgbe_link_speed link_caps = 0;
4074         s32              err = IXGBE_NOT_IMPLEMENTED;
4075         bool             negotiate = FALSE;
4076
4077         /* Checks to validate new value */
4078         if (adapter->advertise == advertise) /* no change */
4079                 return (0);
4080
4081         dev = adapter->dev;
4082         hw = &adapter->hw;
4083
4084         /* No speed changes for backplane media */
4085         if (hw->phy.media_type == ixgbe_media_type_backplane)
4086                 return (ENODEV);
4087
4088         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4089               (hw->phy.multispeed_fiber))) {
4090                 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4091                 return (EINVAL);
4092         }
4093
4094         if (advertise < 0x1 || advertise > 0xF) {
4095                 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4096                 return (EINVAL);
4097         }
4098
4099         if (hw->mac.ops.get_link_capabilities) {
4100                 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4101                     &negotiate);
4102                 if (err != IXGBE_SUCCESS) {
4103                         device_printf(dev, "Unable to determine supported advertise speeds\n");
4104                         return (ENODEV);
4105                 }
4106         }
4107
4108         /* Set new value and report new advertised mode */
4109         if (advertise & 0x1) {
4110                 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4111                         device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4112                         return (EINVAL);
4113                 }
4114                 speed |= IXGBE_LINK_SPEED_100_FULL;
4115         }
4116         if (advertise & 0x2) {
4117                 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4118                         device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4119                         return (EINVAL);
4120                 }
4121                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4122         }
4123         if (advertise & 0x4) {
4124                 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4125                         device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4126                         return (EINVAL);
4127                 }
4128                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4129         }
4130         if (advertise & 0x8) {
4131                 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4132                         device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4133                         return (EINVAL);
4134                 }
4135                 speed |= IXGBE_LINK_SPEED_10_FULL;
4136         }
4137
4138         hw->mac.autotry_restart = TRUE;
4139         hw->mac.ops.setup_link(hw, speed, TRUE);
4140         adapter->advertise = advertise;
4141
4142         return (0);
4143 } /* ixgbe_set_advertise */
4144
4145 /************************************************************************
4146  * ixgbe_get_advertise - Get current advertised speed settings
4147  *
4148  *   Formatted for sysctl usage.
4149  *   Flags:
4150  *     0x1 - advertise 100 Mb
4151  *     0x2 - advertise 1G
4152  *     0x4 - advertise 10G
4153  *     0x8 - advertise 10 Mb (yes, Mb)
4154  ************************************************************************/
4155 static int
4156 ixgbe_get_advertise(struct adapter *adapter)
4157 {
4158         struct ixgbe_hw  *hw = &adapter->hw;
4159         int              speed;
4160         ixgbe_link_speed link_caps = 0;
4161         s32              err;
4162         bool             negotiate = FALSE;
4163
4164         /*
4165          * Advertised speed means nothing unless it's copper or
4166          * multi-speed fiber
4167          */
4168         if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4169             !(hw->phy.multispeed_fiber))
4170                 return (0);
4171
4172         err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4173         if (err != IXGBE_SUCCESS)
4174                 return (0);
4175
4176         speed =
4177             ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4178             ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4179             ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4180             ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4181
4182         return speed;
4183 } /* ixgbe_get_advertise */
4184
4185 /************************************************************************
4186  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4187  *
4188  *   Control values:
4189  *     0/1 - off / on (use default value of 1000)
4190  *
4191  *     Legal timer values are:
4192  *     50,100,250,500,1000,2000,5000,10000
4193  *
4194  *     Turning off interrupt moderation will also turn this off.
4195  ************************************************************************/
4196 static int
4197 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4198 {
4199         struct adapter *adapter = (struct adapter *)arg1;
4200         struct ifnet   *ifp = adapter->ifp;
4201         int            error;
4202         u32            newval;
4203
4204         newval = adapter->dmac;
4205         error = sysctl_handle_int(oidp, &newval, 0, req);
4206         if ((error) || (req->newptr == NULL))
4207                 return (error);
4208
4209         switch (newval) {
4210         case 0:
4211                 /* Disabled */
4212                 adapter->dmac = 0;
4213                 break;
4214         case 1:
4215                 /* Enable and use default */
4216                 adapter->dmac = 1000;
4217                 break;
4218         case 50:
4219         case 100:
4220         case 250:
4221         case 500:
4222         case 1000:
4223         case 2000:
4224         case 5000:
4225         case 10000:
4226                 /* Legal values - allow */
4227                 adapter->dmac = newval;
4228                 break;
4229         default:
4230                 /* Do nothing, illegal value */
4231                 return (EINVAL);
4232         }
4233
4234         /* Re-initialize hardware if it's already running */
4235         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4236                 ixgbe_init(adapter);
4237
4238         return (0);
4239 } /* ixgbe_sysctl_dmac */
4240
4241 #ifdef IXGBE_DEBUG
4242 /************************************************************************
4243  * ixgbe_sysctl_power_state
4244  *
4245  *   Sysctl to test power states
4246  *   Values:
4247  *     0      - set device to D0
4248  *     3      - set device to D3
4249  *     (none) - get current device power state
4250  ************************************************************************/
4251 static int
4252 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4253 {
4254         struct adapter *adapter = (struct adapter *)arg1;
4255         device_t       dev = adapter->dev;
4256         int            curr_ps, new_ps, error = 0;
4257
4258         curr_ps = new_ps = pci_get_powerstate(dev);
4259
4260         error = sysctl_handle_int(oidp, &new_ps, 0, req);
4261         if ((error) || (req->newptr == NULL))
4262                 return (error);
4263
4264         if (new_ps == curr_ps)
4265                 return (0);
4266
4267         if (new_ps == 3 && curr_ps == 0)
4268                 error = DEVICE_SUSPEND(dev);
4269         else if (new_ps == 0 && curr_ps == 3)
4270                 error = DEVICE_RESUME(dev);
4271         else
4272                 return (EINVAL);
4273
4274         device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4275
4276         return (error);
4277 } /* ixgbe_sysctl_power_state */
4278 #endif
4279
4280 /************************************************************************
4281  * ixgbe_sysctl_wol_enable
4282  *
4283  *   Sysctl to enable/disable the WoL capability,
4284  *   if supported by the adapter.
4285  *
4286  *   Values:
4287  *     0 - disabled
4288  *     1 - enabled
4289  ************************************************************************/
4290 static int
4291 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4292 {
4293         struct adapter  *adapter = (struct adapter *)arg1;
4294         struct ixgbe_hw *hw = &adapter->hw;
4295         int             new_wol_enabled;
4296         int             error = 0;
4297
4298         new_wol_enabled = hw->wol_enabled;
4299         error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4300         if ((error) || (req->newptr == NULL))
4301                 return (error);
4302         new_wol_enabled = !!(new_wol_enabled);
4303         if (new_wol_enabled == hw->wol_enabled)
4304                 return (0);
4305
4306         if (new_wol_enabled > 0 && !adapter->wol_support)
4307                 return (ENODEV);
4308         else
4309                 hw->wol_enabled = new_wol_enabled;
4310
4311         return (0);
4312 } /* ixgbe_sysctl_wol_enable */
4313
4314 /************************************************************************
4315  * ixgbe_sysctl_wufc - Wake Up Filter Control
4316  *
4317  *   Sysctl to enable/disable the types of packets that the
4318  *   adapter will wake up on upon receipt.
4319  *   Flags:
4320  *     0x1  - Link Status Change
4321  *     0x2  - Magic Packet
4322  *     0x4  - Direct Exact
4323  *     0x8  - Directed Multicast
4324  *     0x10 - Broadcast
4325  *     0x20 - ARP/IPv4 Request Packet
4326  *     0x40 - Direct IPv4 Packet
4327  *     0x80 - Direct IPv6 Packet
4328  *
4329  *   Settings not listed above will cause the sysctl to return an error.
4330  ************************************************************************/
4331 static int
4332 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4333 {
4334         struct adapter *adapter = (struct adapter *)arg1;
4335         int            error = 0;
4336         u32            new_wufc;
4337
4338         new_wufc = adapter->wufc;
4339
4340         error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4341         if ((error) || (req->newptr == NULL))
4342                 return (error);
4343         if (new_wufc == adapter->wufc)
4344                 return (0);
4345
4346         if (new_wufc & 0xffffff00)
4347                 return (EINVAL);
4348
4349         new_wufc &= 0xff;
4350         new_wufc |= (0xffffff & adapter->wufc);
4351         adapter->wufc = new_wufc;
4352
4353         return (0);
4354 } /* ixgbe_sysctl_wufc */
4355
4356 #ifdef IXGBE_DEBUG
4357 /************************************************************************
4358  * ixgbe_sysctl_print_rss_config
4359  ************************************************************************/
4360 static int
4361 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4362 {
4363         struct adapter  *adapter = (struct adapter *)arg1;
4364         struct ixgbe_hw *hw = &adapter->hw;
4365         device_t        dev = adapter->dev;
4366         struct sbuf     *buf;
4367         int             error = 0, reta_size;
4368         u32             reg;
4369
4370         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4371         if (!buf) {
4372                 device_printf(dev, "Could not allocate sbuf for output.\n");
4373                 return (ENOMEM);
4374         }
4375
4376         // TODO: use sbufs to make a string to print out
4377         /* Set multiplier for RETA setup and table size based on MAC */
4378         switch (adapter->hw.mac.type) {
4379         case ixgbe_mac_X550:
4380         case ixgbe_mac_X550EM_x:
4381         case ixgbe_mac_X550EM_a:
4382                 reta_size = 128;
4383                 break;
4384         default:
4385                 reta_size = 32;
4386                 break;
4387         }
4388
4389         /* Print out the redirection table */
4390         sbuf_cat(buf, "\n");
4391         for (int i = 0; i < reta_size; i++) {
4392                 if (i < 32) {
4393                         reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4394                         sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4395                 } else {
4396                         reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4397                         sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4398                 }
4399         }
4400
4401         // TODO: print more config
4402
4403         error = sbuf_finish(buf);
4404         if (error)
4405                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4406
4407         sbuf_delete(buf);
4408
4409         return (0);
4410 } /* ixgbe_sysctl_print_rss_config */
4411 #endif /* IXGBE_DEBUG */
4412
4413 /************************************************************************
4414  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4415  *
4416  *   For X552/X557-AT devices using an external PHY
4417  ************************************************************************/
4418 static int
4419 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4420 {
4421         struct adapter  *adapter = (struct adapter *)arg1;
4422         struct ixgbe_hw *hw = &adapter->hw;
4423         u16             reg;
4424
4425         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4426                 device_printf(adapter->dev,
4427                     "Device has no supported external thermal sensor.\n");
4428                 return (ENODEV);
4429         }
4430
4431         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4432             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4433                 device_printf(adapter->dev,
4434                     "Error reading from PHY's current temperature register\n");
4435                 return (EAGAIN);
4436         }
4437
4438         /* Shift temp for output */
4439         reg = reg >> 8;
4440
4441         return (sysctl_handle_int(oidp, NULL, reg, req));
4442 } /* ixgbe_sysctl_phy_temp */
4443
4444 /************************************************************************
4445  * ixgbe_sysctl_phy_overtemp_occurred
4446  *
4447  *   Reports (directly from the PHY) whether the current PHY
4448  *   temperature is over the overtemp threshold.
4449  ************************************************************************/
4450 static int
4451 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4452 {
4453         struct adapter  *adapter = (struct adapter *)arg1;
4454         struct ixgbe_hw *hw = &adapter->hw;
4455         u16             reg;
4456
4457         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4458                 device_printf(adapter->dev,
4459                     "Device has no supported external thermal sensor.\n");
4460                 return (ENODEV);
4461         }
4462
4463         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4464             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4465                 device_printf(adapter->dev,
4466                     "Error reading from PHY's temperature status register\n");
4467                 return (EAGAIN);
4468         }
4469
4470         /* Get occurrence bit */
4471         reg = !!(reg & 0x4000);
4472
4473         return (sysctl_handle_int(oidp, 0, reg, req));
4474 } /* ixgbe_sysctl_phy_overtemp_occurred */
4475
4476 /************************************************************************
4477  * ixgbe_sysctl_eee_state
4478  *
4479  *   Sysctl to set EEE power saving feature
4480  *   Values:
4481  *     0      - disable EEE
4482  *     1      - enable EEE
4483  *     (none) - get current device EEE state
4484  ************************************************************************/
4485 static int
4486 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4487 {
4488         struct adapter *adapter = (struct adapter *)arg1;
4489         device_t       dev = adapter->dev;
4490         int            curr_eee, new_eee, error = 0;
4491         s32            retval;
4492
4493         curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4494
4495         error = sysctl_handle_int(oidp, &new_eee, 0, req);
4496         if ((error) || (req->newptr == NULL))
4497                 return (error);
4498
4499         /* Nothing to do */
4500         if (new_eee == curr_eee)
4501                 return (0);
4502
4503         /* Not supported */
4504         if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4505                 return (EINVAL);
4506
4507         /* Bounds checking */
4508         if ((new_eee < 0) || (new_eee > 1))
4509                 return (EINVAL);
4510
4511         retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4512         if (retval) {
4513                 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4514                 return (EINVAL);
4515         }
4516
4517         /* Restart auto-neg */
4518         ixgbe_init(adapter);
4519
4520         device_printf(dev, "New EEE state: %d\n", new_eee);
4521
4522         /* Cache new value */
4523         if (new_eee)
4524                 adapter->feat_en |= IXGBE_FEATURE_EEE;
4525         else
4526                 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4527
4528         return (error);
4529 } /* ixgbe_sysctl_eee_state */
4530
4531 /************************************************************************
4532  * ixgbe_init_device_features
4533  ************************************************************************/
4534 static void
4535 ixgbe_init_device_features(struct adapter *adapter)
4536 {
4537         adapter->feat_cap = IXGBE_FEATURE_NETMAP
4538                           | IXGBE_FEATURE_RSS
4539                           | IXGBE_FEATURE_MSI
4540                           | IXGBE_FEATURE_MSIX
4541                           | IXGBE_FEATURE_LEGACY_IRQ
4542                           | IXGBE_FEATURE_LEGACY_TX;
4543
4544         /* Set capabilities first... */
4545         switch (adapter->hw.mac.type) {
4546         case ixgbe_mac_82598EB:
4547                 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4548                         adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4549                 break;
4550         case ixgbe_mac_X540:
4551                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4552                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4553                 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4554                     (adapter->hw.bus.func == 0))
4555                         adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4556                 break;
4557         case ixgbe_mac_X550:
4558                 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4559                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4560                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4561                 break;
4562         case ixgbe_mac_X550EM_x:
4563                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4564                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4565                 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4566                         adapter->feat_cap |= IXGBE_FEATURE_EEE;
4567                 break;
4568         case ixgbe_mac_X550EM_a:
4569                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4570                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4571                 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4572                 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4573                     (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4574                         adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4575                         adapter->feat_cap |= IXGBE_FEATURE_EEE;
4576                 }
4577                 break;
4578         case ixgbe_mac_82599EB:
4579                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4580                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4581                 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4582                     (adapter->hw.bus.func == 0))
4583                         adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4584                 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4585                         adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4586                 break;
4587         default:
4588                 break;
4589         }
4590
4591         /* Enabled by default... */
4592         /* Fan failure detection */
4593         if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4594                 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4595         /* Netmap */
4596         if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4597                 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4598         /* EEE */
4599         if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4600                 adapter->feat_en |= IXGBE_FEATURE_EEE;
4601         /* Thermal Sensor */
4602         if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4603                 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4604
4605         /* Enabled via global sysctl... */
4606         /* Flow Director */
4607         if (ixgbe_enable_fdir) {
4608                 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4609                         adapter->feat_en |= IXGBE_FEATURE_FDIR;
4610                 else
4611                         device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4612         }
4613         /* Legacy (single queue) transmit */
4614         if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
4615             ixgbe_enable_legacy_tx)
4616                 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
4617         /*
4618          * Message Signal Interrupts - Extended (MSI-X)
4619          * Normal MSI is only enabled if MSI-X calls fail.
4620          */
4621         if (!ixgbe_enable_msix)
4622                 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4623         /* Receive-Side Scaling (RSS) */
4624         if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4625                 adapter->feat_en |= IXGBE_FEATURE_RSS;
4626
4627         /* Disable features with unmet dependencies... */
4628         /* No MSI-X */
4629         if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4630                 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4631                 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4632                 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4633                 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4634         }
4635 } /* ixgbe_init_device_features */
4636
4637 /************************************************************************
4638  * ixgbe_probe - Device identification routine
4639  *
4640  *   Determines if the driver should be loaded on
4641  *   adapter based on its PCI vendor/device ID.
4642  *
4643  *   return BUS_PROBE_DEFAULT on success, positive on failure
4644  ************************************************************************/
4645 static int
4646 ixgbe_probe(device_t dev)
4647 {
4648         ixgbe_vendor_info_t *ent;
4649
4650         u16  pci_vendor_id = 0;
4651         u16  pci_device_id = 0;
4652         u16  pci_subvendor_id = 0;
4653         u16  pci_subdevice_id = 0;
4654         char adapter_name[256];
4655
4656         INIT_DEBUGOUT("ixgbe_probe: begin");
4657
4658         pci_vendor_id = pci_get_vendor(dev);
4659         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
4660                 return (ENXIO);
4661
4662         pci_device_id = pci_get_device(dev);
4663         pci_subvendor_id = pci_get_subvendor(dev);
4664         pci_subdevice_id = pci_get_subdevice(dev);
4665
4666         ent = ixgbe_vendor_info_array;
4667         while (ent->vendor_id != 0) {
4668                 if ((pci_vendor_id == ent->vendor_id) &&
4669                     (pci_device_id == ent->device_id) &&
4670                     ((pci_subvendor_id == ent->subvendor_id) ||
4671                      (ent->subvendor_id == 0)) &&
4672                     ((pci_subdevice_id == ent->subdevice_id) ||
4673                      (ent->subdevice_id == 0))) {
4674                         sprintf(adapter_name, "%s, Version - %s",
4675                                 ixgbe_strings[ent->index],
4676                                 ixgbe_driver_version);
4677                         device_set_desc_copy(dev, adapter_name);
4678                         ++ixgbe_total_ports;
4679                         return (BUS_PROBE_DEFAULT);
4680                 }
4681                 ent++;
4682         }
4683
4684         return (ENXIO);
4685 } /* ixgbe_probe */
4686
4687
4688 /************************************************************************
4689  * ixgbe_ioctl - Ioctl entry point
4690  *
4691  *   Called when the user wants to configure the interface.
4692  *
4693  *   return 0 on success, positive on failure
4694  ************************************************************************/
4695 static int
4696 ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4697 {
4698         struct adapter *adapter = ifp->if_softc;
4699         struct ifreq   *ifr = (struct ifreq *) data;
4700 #if defined(INET) || defined(INET6)
4701         struct ifaddr  *ifa = (struct ifaddr *)data;
4702 #endif
4703         int            error = 0;
4704         bool           avoid_reset = FALSE;
4705
4706         switch (command) {
4707         case SIOCSIFADDR:
4708 #ifdef INET
4709                 if (ifa->ifa_addr->sa_family == AF_INET)
4710                         avoid_reset = TRUE;
4711 #endif
4712 #ifdef INET6
4713                 if (ifa->ifa_addr->sa_family == AF_INET6)
4714                         avoid_reset = TRUE;
4715 #endif
4716                 /*
4717                  * Calling init results in link renegotiation,
4718                  * so we avoid doing it when possible.
4719                  */
4720                 if (avoid_reset) {
4721                         ifp->if_flags |= IFF_UP;
4722                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4723                                 ixgbe_init(adapter);
4724 #ifdef INET
4725                         if (!(ifp->if_flags & IFF_NOARP))
4726                                 arp_ifinit(ifp, ifa);
4727 #endif
4728                 } else
4729                         error = ether_ioctl(ifp, command, data);
4730                 break;
4731         case SIOCSIFMTU:
4732                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4733                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
4734                         error = EINVAL;
4735                 } else {
4736                         IXGBE_CORE_LOCK(adapter);
4737                         ifp->if_mtu = ifr->ifr_mtu;
4738                         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
4739                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4740                                 ixgbe_init_locked(adapter);
4741                         ixgbe_recalculate_max_frame(adapter);
4742                         IXGBE_CORE_UNLOCK(adapter);
4743                 }
4744                 break;
4745         case SIOCSIFFLAGS:
4746                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4747                 IXGBE_CORE_LOCK(adapter);
4748                 if (ifp->if_flags & IFF_UP) {
4749                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4750                                 if ((ifp->if_flags ^ adapter->if_flags) &
4751                                     (IFF_PROMISC | IFF_ALLMULTI)) {
4752                                         ixgbe_set_promisc(adapter);
4753                                 }
4754                         } else
4755                                 ixgbe_init_locked(adapter);
4756                 } else
4757                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4758                                 ixgbe_stop(adapter);
4759                 adapter->if_flags = ifp->if_flags;
4760                 IXGBE_CORE_UNLOCK(adapter);
4761                 break;
4762         case SIOCADDMULTI:
4763         case SIOCDELMULTI:
4764                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
4765                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4766                         IXGBE_CORE_LOCK(adapter);
4767                         ixgbe_disable_intr(adapter);
4768                         ixgbe_set_multi(adapter);
4769                         ixgbe_enable_intr(adapter);
4770                         IXGBE_CORE_UNLOCK(adapter);
4771                 }
4772                 break;
4773         case SIOCSIFMEDIA:
4774         case SIOCGIFMEDIA:
4775                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4776                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
4777                 break;
4778         case SIOCSIFCAP:
4779         {
4780                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
4781
4782                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4783
4784                 if (!mask)
4785                         break;
4786
4787                 /* HW cannot turn these on/off separately */
4788                 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4789                         ifp->if_capenable ^= IFCAP_RXCSUM;
4790                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
4791                 }
4792                 if (mask & IFCAP_TXCSUM)
4793                         ifp->if_capenable ^= IFCAP_TXCSUM;
4794                 if (mask & IFCAP_TXCSUM_IPV6)
4795                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
4796                 if (mask & IFCAP_TSO4)
4797                         ifp->if_capenable ^= IFCAP_TSO4;
4798                 if (mask & IFCAP_TSO6)
4799                         ifp->if_capenable ^= IFCAP_TSO6;
4800                 if (mask & IFCAP_LRO)
4801                         ifp->if_capenable ^= IFCAP_LRO;
4802                 if (mask & IFCAP_VLAN_HWTAGGING)
4803                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4804                 if (mask & IFCAP_VLAN_HWFILTER)
4805                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
4806                 if (mask & IFCAP_VLAN_HWTSO)
4807                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4808
4809                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4810                         IXGBE_CORE_LOCK(adapter);
4811                         ixgbe_init_locked(adapter);
4812                         IXGBE_CORE_UNLOCK(adapter);
4813                 }
4814                 VLAN_CAPABILITIES(ifp);
4815                 break;
4816         }
4817 #if __FreeBSD_version >= 1100036
4818         case SIOCGI2C:
4819         {
4820                 struct ixgbe_hw *hw = &adapter->hw;
4821                 struct ifi2creq i2c;
4822                 int i;
4823
4824                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
4825                 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
4826                 if (error != 0)
4827                         break;
4828                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4829                         error = EINVAL;
4830                         break;
4831                 }
4832                 if (i2c.len > sizeof(i2c.data)) {
4833                         error = EINVAL;
4834                         break;
4835                 }
4836
4837                 for (i = 0; i < i2c.len; i++)
4838                         hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
4839                             i2c.dev_addr, &i2c.data[i]);
4840                 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
4841                 break;
4842         }
4843 #endif
4844         default:
4845                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
4846                 error = ether_ioctl(ifp, command, data);
4847                 break;
4848         }
4849
4850         return (error);
4851 } /* ixgbe_ioctl */
4852
4853 /************************************************************************
4854  * ixgbe_check_fan_failure
4855  ************************************************************************/
4856 static void
4857 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4858 {
4859         u32 mask;
4860
4861         mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4862             IXGBE_ESDP_SDP1;
4863
4864         if (reg & mask)
4865                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4866 } /* ixgbe_check_fan_failure */
4867
4868 /************************************************************************
4869  * ixgbe_handle_que
4870  ************************************************************************/
4871 static void
4872 ixgbe_handle_que(void *context, int pending)
4873 {
4874         struct ix_queue *que = context;
4875         struct adapter  *adapter = que->adapter;
4876         struct tx_ring  *txr = que->txr;
4877         struct ifnet    *ifp = adapter->ifp;
4878
4879         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4880                 ixgbe_rxeof(que);
4881                 IXGBE_TX_LOCK(txr);
4882                 ixgbe_txeof(txr);
4883                 if (!ixgbe_ring_empty(ifp, txr->br))
4884                         ixgbe_start_locked(ifp, txr);
4885                 IXGBE_TX_UNLOCK(txr);
4886         }
4887
4888         /* Re-enable this interrupt */
4889         if (que->res != NULL)
4890                 ixgbe_enable_queue(adapter, que->msix);
4891         else
4892                 ixgbe_enable_intr(adapter);
4893
4894         return;
4895 } /* ixgbe_handle_que */
4896
4897
4898
4899 /************************************************************************
4900  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
4901  ************************************************************************/
4902 static int
4903 ixgbe_allocate_legacy(struct adapter *adapter)
4904 {
4905         device_t        dev = adapter->dev;
4906         struct ix_queue *que = adapter->queues;
4907         struct tx_ring  *txr = adapter->tx_rings;
4908         int             error;
4909
4910         /* We allocate a single interrupt resource */
4911         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
4912             &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
4913         if (adapter->res == NULL) {
4914                 device_printf(dev,
4915                     "Unable to allocate bus resource: interrupt\n");
4916                 return (ENXIO);
4917         }
4918
4919         /*
4920          * Try allocating a fast interrupt and the associated deferred
4921          * processing contexts.
4922          */
4923         if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
4924                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
4925         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
4926         que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
4927             taskqueue_thread_enqueue, &que->tq);
4928         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
4929             device_get_nameunit(adapter->dev));
4930
4931         /* Tasklets for Link, SFP and Multispeed Fiber */
4932         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
4933         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
4934         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
4935         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
4936         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4937                 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
4938         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
4939             taskqueue_thread_enqueue, &adapter->tq);
4940         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
4941             device_get_nameunit(adapter->dev));
4942
4943         if ((error = bus_setup_intr(dev, adapter->res,
4944             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
4945             &adapter->tag)) != 0) {
4946                 device_printf(dev,
4947                     "Failed to register fast interrupt handler: %d\n", error);
4948                 taskqueue_free(que->tq);
4949                 taskqueue_free(adapter->tq);
4950                 que->tq = NULL;
4951                 adapter->tq = NULL;
4952
4953                 return (error);
4954         }
4955         /* For simplicity in the handlers */
4956         adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
4957
4958         return (0);
4959 } /* ixgbe_allocate_legacy */
4960
4961
4962 /************************************************************************
4963  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
4964  ************************************************************************/
4965 static int
4966 ixgbe_allocate_msix(struct adapter *adapter)
4967 {
4968         device_t        dev = adapter->dev;
4969         struct ix_queue *que = adapter->queues;
4970         struct tx_ring  *txr = adapter->tx_rings;
4971         int             error, rid, vector = 0;
4972         int             cpu_id = 0;
4973         unsigned int    rss_buckets = 0;
4974         cpuset_t        cpu_mask;
4975
4976         /*
4977          * If we're doing RSS, the number of queues needs to
4978          * match the number of RSS buckets that are configured.
4979          *
4980          * + If there's more queues than RSS buckets, we'll end
4981          *   up with queues that get no traffic.
4982          *
4983          * + If there's more RSS buckets than queues, we'll end
4984          *   up having multiple RSS buckets map to the same queue,
4985          *   so there'll be some contention.
4986          */
4987         rss_buckets = rss_getnumbuckets();
4988         if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
4989             (adapter->num_queues != rss_buckets)) {
4990                 device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
4991                     __func__, adapter->num_queues, rss_buckets);
4992         }
4993
4994         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
4995                 rid = vector + 1;
4996                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
4997                     RF_SHAREABLE | RF_ACTIVE);
4998                 if (que->res == NULL) {
4999                         device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
5000                             vector);
5001                         return (ENXIO);
5002                 }
5003                 /* Set the handler function */
5004                 error = bus_setup_intr(dev, que->res,
5005                     INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
5006                     &que->tag);
5007                 if (error) {
5008                         que->res = NULL;
5009                         device_printf(dev, "Failed to register QUE handler");
5010                         return (error);
5011                 }
5012 #if __FreeBSD_version >= 800504
5013                 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
5014 #endif
5015                 que->msix = vector;
5016                 adapter->active_queues |= (u64)(1 << que->msix);
5017
5018                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
5019                         /*
5020                          * The queue ID is used as the RSS layer bucket ID.
5021                          * We look up the queue ID -> RSS CPU ID and select
5022                          * that.
5023                          */
5024                         cpu_id = rss_getcpu(i % rss_buckets);
5025                         CPU_SETOF(cpu_id, &cpu_mask);
5026                 } else {
5027                         /*
5028                          * Bind the MSI-X vector, and thus the
5029                          * rings to the corresponding CPU.
5030                          *
5031                          * This just happens to match the default RSS
5032                          * round-robin bucket -> queue -> CPU allocation.
5033                          */
5034                         if (adapter->num_queues > 1)
5035                                 cpu_id = i;
5036                 }
5037                 if (adapter->num_queues > 1)
5038                         bus_bind_intr(dev, que->res, cpu_id);
5039 #ifdef IXGBE_DEBUG
5040                 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5041                         device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
5042                             cpu_id);
5043                 else
5044                         device_printf(dev, "Bound queue %d to cpu %d\n", i,
5045                             cpu_id);
5046 #endif /* IXGBE_DEBUG */
5047
5048
5049                 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5050                         TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
5051                             txr);
5052                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5053                 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5054                     taskqueue_thread_enqueue, &que->tq);
5055 #if __FreeBSD_version < 1100000
5056                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
5057                     device_get_nameunit(adapter->dev), i);
5058 #else
5059                 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5060                         taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5061                             &cpu_mask, "%s (bucket %d)",
5062                             device_get_nameunit(adapter->dev), cpu_id);
5063                 else
5064                         taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5065                             NULL, "%s:q%d", device_get_nameunit(adapter->dev),
5066                             i);
5067 #endif
5068         }
5069
5070         /* and Link */
5071         adapter->link_rid = vector + 1;
5072         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
5073             &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
5074         if (!adapter->res) {
5075                 device_printf(dev,
5076                     "Unable to allocate bus resource: Link interrupt [%d]\n",
5077                     adapter->link_rid);
5078                 return (ENXIO);
5079         }
5080         /* Set the link handler function */
5081         error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
5082             NULL, ixgbe_msix_link, adapter, &adapter->tag);
5083         if (error) {
5084                 adapter->res = NULL;
5085                 device_printf(dev, "Failed to register LINK handler");
5086                 return (error);
5087         }
5088 #if __FreeBSD_version >= 800504
5089         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
5090 #endif
5091         adapter->vector = vector;
5092         /* Tasklets for Link, SFP and Multispeed Fiber */
5093         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
5094         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
5095         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
5096         if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5097                 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
5098         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
5099         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5100                 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
5101         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
5102             taskqueue_thread_enqueue, &adapter->tq);
5103         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
5104             device_get_nameunit(adapter->dev));
5105
5106         return (0);
5107 } /* ixgbe_allocate_msix */
5108
5109 /************************************************************************
5110  * ixgbe_configure_interrupts
5111  *
5112  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
5113  *   This will also depend on user settings.
5114  ************************************************************************/
5115 static int
5116 ixgbe_configure_interrupts(struct adapter *adapter)
5117 {
5118         device_t dev = adapter->dev;
5119         int      rid, want, queues, msgs;
5120
5121         /* Default to 1 queue if MSI-X setup fails */
5122         adapter->num_queues = 1;
5123
5124         /* Override by tuneable */
5125         if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
5126                 goto msi;
5127
5128         /* First try MSI-X */
5129         msgs = pci_msix_count(dev);
5130         if (msgs == 0)
5131                 goto msi;
5132         rid = PCIR_BAR(MSIX_82598_BAR);
5133         adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5134             RF_ACTIVE);
5135         if (adapter->msix_mem == NULL) {
5136                 rid += 4;  /* 82599 maps in higher BAR */
5137                 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
5138                     &rid, RF_ACTIVE);
5139         }
5140         if (adapter->msix_mem == NULL) {
5141                 /* May not be enabled */
5142                 device_printf(adapter->dev, "Unable to map MSI-X table.\n");
5143                 goto msi;
5144         }
5145
5146         /* Figure out a reasonable auto config value */
5147         queues = min(mp_ncpus, msgs - 1);
5148         /* If we're doing RSS, clamp at the number of RSS buckets */
5149         if (adapter->feat_en & IXGBE_FEATURE_RSS)
5150                 queues = min(queues, rss_getnumbuckets());
5151         if (ixgbe_num_queues > queues) {
5152                 device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
5153                 ixgbe_num_queues = queues;
5154         }
5155
5156         if (ixgbe_num_queues != 0)
5157                 queues = ixgbe_num_queues;
5158         /* Set max queues to 8 when autoconfiguring */
5159         else
5160                 queues = min(queues, 8);
5161
5162         /* reflect correct sysctl value */
5163         ixgbe_num_queues = queues;
5164
5165         /*
5166          * Want one vector (RX/TX pair) per queue
5167          * plus an additional for Link.
5168          */
5169         want = queues + 1;
5170         if (msgs >= want)
5171                 msgs = want;
5172         else {
5173                 device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
5174                     msgs, want);
5175                 goto msi;
5176         }
5177         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
5178                 device_printf(adapter->dev,
5179                     "Using MSI-X interrupts with %d vectors\n", msgs);
5180                 adapter->num_queues = queues;
5181                 adapter->feat_en |= IXGBE_FEATURE_MSIX;
5182                 return (0);
5183         }
5184         /*
5185          * MSI-X allocation failed or provided us with
5186          * less vectors than needed. Free MSI-X resources
5187          * and we'll try enabling MSI.
5188          */
5189         pci_release_msi(dev);
5190
5191 msi:
5192         /* Without MSI-X, some features are no longer supported */
5193         adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5194         adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
5195         adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5196         adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
5197
5198         if (adapter->msix_mem != NULL) {
5199                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
5200                     adapter->msix_mem);
5201                 adapter->msix_mem = NULL;
5202         }
5203         msgs = 1;
5204         if (pci_alloc_msi(dev, &msgs) == 0) {
5205                 adapter->feat_en |= IXGBE_FEATURE_MSI;
5206                 adapter->link_rid = 1;
5207                 device_printf(adapter->dev, "Using an MSI interrupt\n");
5208                 return (0);
5209         }
5210
5211         if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
5212                 device_printf(adapter->dev,
5213                     "Device does not support legacy interrupts.\n");
5214                 return 1;
5215         }
5216
5217         adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5218         adapter->link_rid = 0;
5219         device_printf(adapter->dev, "Using a Legacy interrupt\n");
5220
5221         return (0);
5222 } /* ixgbe_configure_interrupts */
5223
5224
5225 /************************************************************************
5226  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
5227  *
5228  *   Done outside of interrupt context since the driver might sleep
5229  ************************************************************************/
5230 static void
5231 ixgbe_handle_link(void *context, int pending)
5232 {
5233         struct adapter  *adapter = context;
5234         struct ixgbe_hw *hw = &adapter->hw;
5235
5236         ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
5237         ixgbe_update_link_status(adapter);
5238
5239         /* Re-enable link interrupts */
5240         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
5241 } /* ixgbe_handle_link */
5242
5243 /************************************************************************
5244  * ixgbe_rearm_queues
5245  ************************************************************************/
5246 static void
5247 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5248 {
5249         u32 mask;
5250
5251         switch (adapter->hw.mac.type) {
5252         case ixgbe_mac_82598EB:
5253                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5254                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5255                 break;
5256         case ixgbe_mac_82599EB:
5257         case ixgbe_mac_X540:
5258         case ixgbe_mac_X550:
5259         case ixgbe_mac_X550EM_x:
5260         case ixgbe_mac_X550EM_a:
5261                 mask = (queues & 0xFFFFFFFF);
5262                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5263                 mask = (queues >> 32);
5264                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5265                 break;
5266         default:
5267                 break;
5268         }
5269 } /* ixgbe_rearm_queues */
5270