]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/if_ix.c
Upgrade to OpenSSH 7.5p1.
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / if_ix.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41
42 #include "ixgbe.h"
43
44 /************************************************************************
45  * Driver version
46  ************************************************************************/
47 char ixgbe_driver_version[] = "3.2.12-k";
48
49
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixgbe_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
64         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
84         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
85         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
87         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
88         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
89         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
90         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
91         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
92         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
93         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
94         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
95         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
96         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
97         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
98         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
99         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
100         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
101         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
102         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
103         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
104         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
105         /* required last entry */
106         {0, 0, 0, 0, 0}
107 };
108
109 /************************************************************************
110  * Table of branding strings
111  ************************************************************************/
112 static char    *ixgbe_strings[] = {
113         "Intel(R) PRO/10GbE PCI-Express Network Driver"
114 };
115
116 /************************************************************************
117  * Function prototypes
118  ************************************************************************/
119 static int      ixgbe_probe(device_t);
120 static int      ixgbe_attach(device_t);
121 static int      ixgbe_detach(device_t);
122 static int      ixgbe_shutdown(device_t);
123 static int      ixgbe_suspend(device_t);
124 static int      ixgbe_resume(device_t);
125 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
126 static void     ixgbe_init(void *);
127 static void     ixgbe_init_locked(struct adapter *);
128 static void     ixgbe_stop(void *);
129 #if __FreeBSD_version >= 1100036
130 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
131 #endif
132 static void     ixgbe_init_device_features(struct adapter *);
133 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
134 static void     ixgbe_add_media_types(struct adapter *);
135 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
136 static int      ixgbe_media_change(struct ifnet *);
137 static int      ixgbe_allocate_pci_resources(struct adapter *);
138 static void     ixgbe_get_slot_info(struct adapter *);
139 static int      ixgbe_allocate_msix(struct adapter *);
140 static int      ixgbe_allocate_legacy(struct adapter *);
141 static int      ixgbe_configure_interrupts(struct adapter *);
142 static void     ixgbe_free_pci_resources(struct adapter *);
143 static void     ixgbe_local_timer(void *);
144 static int      ixgbe_setup_interface(device_t, struct adapter *);
145 static void     ixgbe_config_gpie(struct adapter *);
146 static void     ixgbe_config_dmac(struct adapter *);
147 static void     ixgbe_config_delay_values(struct adapter *);
148 static void     ixgbe_config_link(struct adapter *);
149 static void     ixgbe_check_wol_support(struct adapter *);
150 static int      ixgbe_setup_low_power_mode(struct adapter *);
151 static void     ixgbe_rearm_queues(struct adapter *, u64);
152
153 static void     ixgbe_initialize_transmit_units(struct adapter *);
154 static void     ixgbe_initialize_receive_units(struct adapter *);
155 static void     ixgbe_enable_rx_drop(struct adapter *);
156 static void     ixgbe_disable_rx_drop(struct adapter *);
157 static void     ixgbe_initialize_rss_mapping(struct adapter *);
158
159 static void     ixgbe_enable_intr(struct adapter *);
160 static void     ixgbe_disable_intr(struct adapter *);
161 static void     ixgbe_update_stats_counters(struct adapter *);
162 static void     ixgbe_set_promisc(struct adapter *);
163 static void     ixgbe_set_multi(struct adapter *);
164 static void     ixgbe_update_link_status(struct adapter *);
165 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
166 static void     ixgbe_configure_ivars(struct adapter *);
167 static u8       *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
168
169 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
170 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
171 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
172
173 static void     ixgbe_add_device_sysctls(struct adapter *);
174 static void     ixgbe_add_hw_stats(struct adapter *);
175 static int      ixgbe_set_flowcntl(struct adapter *, int);
176 static int      ixgbe_set_advertise(struct adapter *, int);
177 static int      ixgbe_get_advertise(struct adapter *);
178
179 /* Sysctl handlers */
180 static void     ixgbe_set_sysctl_value(struct adapter *, const char *,
181                                        const char *, int *, int);
182 static int      ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
183 static int      ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
184 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
185 static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
186 static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
187 static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
188 #ifdef IXGBE_DEBUG
189 static int      ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
190 static int      ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
191 #endif
192 static int      ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
193 static int      ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
194 static int      ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
195 static int      ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
196 static int      ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
197 static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
198 static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
199
200 /* Support for pluggable optic modules */
201 static bool     ixgbe_sfp_probe(struct adapter *);
202
203 /* Legacy (single vector) interrupt handler */
204 static void     ixgbe_legacy_irq(void *);
205
206 /* The MSI/MSI-X Interrupt handlers */
207 static void     ixgbe_msix_que(void *);
208 static void     ixgbe_msix_link(void *);
209
210 /* Deferred interrupt tasklets */
211 static void     ixgbe_handle_que(void *, int);
212 static void     ixgbe_handle_link(void *, int);
213 static void     ixgbe_handle_msf(void *, int);
214 static void     ixgbe_handle_mod(void *, int);
215 static void     ixgbe_handle_phy(void *, int);
216
217
218 /************************************************************************
219  *  FreeBSD Device Interface Entry Points
220  ************************************************************************/
221 static device_method_t ix_methods[] = {
222         /* Device interface */
223         DEVMETHOD(device_probe, ixgbe_probe),
224         DEVMETHOD(device_attach, ixgbe_attach),
225         DEVMETHOD(device_detach, ixgbe_detach),
226         DEVMETHOD(device_shutdown, ixgbe_shutdown),
227         DEVMETHOD(device_suspend, ixgbe_suspend),
228         DEVMETHOD(device_resume, ixgbe_resume),
229 #ifdef PCI_IOV
230         DEVMETHOD(pci_iov_init, ixgbe_init_iov),
231         DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
232         DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
233 #endif /* PCI_IOV */
234         DEVMETHOD_END
235 };
236
237 static driver_t ix_driver = {
238         "ix", ix_methods, sizeof(struct adapter),
239 };
240
241 devclass_t ix_devclass;
242 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
243
244 MODULE_DEPEND(ix, pci, 1, 1, 1);
245 MODULE_DEPEND(ix, ether, 1, 1, 1);
246 MODULE_DEPEND(ix, netmap, 1, 1, 1);
247
248 /*
249  * TUNEABLE PARAMETERS:
250  */
251
252 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
253
254 /*
255  * AIM: Adaptive Interrupt Moderation
256  * which means that the interrupt rate
257  * is varied over time based on the
258  * traffic for that interrupt vector
259  */
260 static int ixgbe_enable_aim = TRUE;
261 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
262     "Enable adaptive interrupt moderation");
263
264 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
265 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
266     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
267
268 /* How many packets rxeof tries to clean at a time */
269 static int ixgbe_rx_process_limit = 256;
270 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
271     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
272
273 /* How many packets txeof tries to clean at a time */
274 static int ixgbe_tx_process_limit = 256;
275 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
276     &ixgbe_tx_process_limit, 0,
277     "Maximum number of sent packets to process at a time, -1 means unlimited");
278
279 /* Flow control setting, default to full */
280 static int ixgbe_flow_control = ixgbe_fc_full;
281 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
282     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
283
284 /* Advertise Speed, default to 0 (auto) */
285 static int ixgbe_advertise_speed = 0;
286 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
287     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
288
289 /*
290  * Smart speed setting, default to on
291  * this only works as a compile option
292  * right now as its during attach, set
293  * this to 'ixgbe_smart_speed_off' to
294  * disable.
295  */
296 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
297
298 /*
299  * MSI-X should be the default for best performance,
300  * but this allows it to be forced off for testing.
301  */
302 static int ixgbe_enable_msix = 1;
303 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
304     "Enable MSI-X interrupts");
305
306 /*
307  * Number of Queues, can be set to 0,
308  * it then autoconfigures based on the
309  * number of cpus with a max of 8. This
310  * can be overriden manually here.
311  */
312 static int ixgbe_num_queues = 0;
313 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
314     "Number of queues to configure, 0 indicates autoconfigure");
315
316 /*
317  * Number of TX descriptors per ring,
318  * setting higher than RX as this seems
319  * the better performing choice.
320  */
321 static int ixgbe_txd = PERFORM_TXD;
322 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
323     "Number of transmit descriptors per queue");
324
325 /* Number of RX descriptors per ring */
326 static int ixgbe_rxd = PERFORM_RXD;
327 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
328     "Number of receive descriptors per queue");
329
330 /*
331  * Defining this on will allow the use
332  * of unsupported SFP+ modules, note that
333  * doing so you are on your own :)
334  */
335 static int allow_unsupported_sfp = FALSE;
336 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
337     &allow_unsupported_sfp, 0,
338     "Allow unsupported SFP modules...use at your own risk");
339
340 /*
341  * Not sure if Flow Director is fully baked,
342  * so we'll default to turning it off.
343  */
344 static int ixgbe_enable_fdir = 0;
345 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
346     "Enable Flow Director");
347
348 /* Legacy Transmit (single queue) */
349 static int ixgbe_enable_legacy_tx = 0;
350 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
351     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
352
353 /* Receive-Side Scaling */
354 static int ixgbe_enable_rss = 1;
355 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
356     "Enable Receive-Side Scaling (RSS)");
357
358 /* Keep running tab on them for sanity check */
359 static int ixgbe_total_ports;
360
361 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
362 static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
363
364 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
365
366 /************************************************************************
367  * ixgbe_initialize_rss_mapping
368  ************************************************************************/
369 static void
370 ixgbe_initialize_rss_mapping(struct adapter *adapter)
371 {
372         struct ixgbe_hw *hw = &adapter->hw;
373         u32             reta = 0, mrqc, rss_key[10];
374         int             queue_id, table_size, index_mult;
375         int             i, j;
376         u32             rss_hash_config;
377
378         if (adapter->feat_en & IXGBE_FEATURE_RSS) {
379                 /* Fetch the configured RSS key */
380                 rss_getkey((uint8_t *)&rss_key);
381         } else {
382                 /* set up random bits */
383                 arc4rand(&rss_key, sizeof(rss_key), 0);
384         }
385
386         /* Set multiplier for RETA setup and table size based on MAC */
387         index_mult = 0x1;
388         table_size = 128;
389         switch (adapter->hw.mac.type) {
390         case ixgbe_mac_82598EB:
391                 index_mult = 0x11;
392                 break;
393         case ixgbe_mac_X550:
394         case ixgbe_mac_X550EM_x:
395         case ixgbe_mac_X550EM_a:
396                 table_size = 512;
397                 break;
398         default:
399                 break;
400         }
401
402         /* Set up the redirection table */
403         for (i = 0, j = 0; i < table_size; i++, j++) {
404                 if (j == adapter->num_queues)
405                         j = 0;
406
407                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
408                         /*
409                          * Fetch the RSS bucket id for the given indirection
410                          * entry. Cap it at the number of configured buckets
411                          * (which is num_queues.)
412                          */
413                         queue_id = rss_get_indirection_to_bucket(i);
414                         queue_id = queue_id % adapter->num_queues;
415                 } else
416                         queue_id = (j * index_mult);
417
418                 /*
419                  * The low 8 bits are for hash value (n+0);
420                  * The next 8 bits are for hash value (n+1), etc.
421                  */
422                 reta = reta >> 8;
423                 reta = reta | (((uint32_t)queue_id) << 24);
424                 if ((i & 3) == 3) {
425                         if (i < 128)
426                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
427                         else
428                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
429                                     reta);
430                         reta = 0;
431                 }
432         }
433
434         /* Now fill our hash function seeds */
435         for (i = 0; i < 10; i++)
436                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
437
438         /* Perform hash on these packet types */
439         if (adapter->feat_en & IXGBE_FEATURE_RSS)
440                 rss_hash_config = rss_gethashconfig();
441         else {
442                 /*
443                  * Disable UDP - IP fragments aren't currently being handled
444                  * and so we end up with a mix of 2-tuple and 4-tuple
445                  * traffic.
446                  */
447                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
448                                 | RSS_HASHTYPE_RSS_TCP_IPV4
449                                 | RSS_HASHTYPE_RSS_IPV6
450                                 | RSS_HASHTYPE_RSS_TCP_IPV6
451                                 | RSS_HASHTYPE_RSS_IPV6_EX
452                                 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
453         }
454
455         mrqc = IXGBE_MRQC_RSSEN;
456         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
457                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
458         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
459                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
460         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
461                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
462         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
463                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
464         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
465                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
466         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
467                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
468         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
469                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
470         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
471                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
472                     __func__);
473         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
474                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
475         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
476                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
477         mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
478         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
479 } /* ixgbe_initialize_rss_mapping */
480
481 /************************************************************************
482  * ixgbe_initialize_receive_units - Setup receive registers and features.
483  ************************************************************************/
484 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
485
486 static void
487 ixgbe_initialize_receive_units(struct adapter *adapter)
488 {
489         struct rx_ring  *rxr = adapter->rx_rings;
490         struct ixgbe_hw *hw = &adapter->hw;
491         struct ifnet    *ifp = adapter->ifp;
492         int             i, j;
493         u32             bufsz, fctrl, srrctl, rxcsum;
494         u32             hlreg;
495
496         /*
497          * Make sure receives are disabled while
498          * setting up the descriptor ring
499          */
500         ixgbe_disable_rx(hw);
501
502         /* Enable broadcasts */
503         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
504         fctrl |= IXGBE_FCTRL_BAM;
505         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
506                 fctrl |= IXGBE_FCTRL_DPF;
507                 fctrl |= IXGBE_FCTRL_PMCF;
508         }
509         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
510
511         /* Set for Jumbo Frames? */
512         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
513         if (ifp->if_mtu > ETHERMTU)
514                 hlreg |= IXGBE_HLREG0_JUMBOEN;
515         else
516                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
517
518 #ifdef DEV_NETMAP
519         /* CRC stripping is conditional in Netmap */
520         if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
521             (ifp->if_capenable & IFCAP_NETMAP) &&
522             !ix_crcstrip)
523                 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
524         else
525 #endif /* DEV_NETMAP */
526                 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
527
528         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
529
530         bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
531             IXGBE_SRRCTL_BSIZEPKT_SHIFT;
532
533         for (i = 0; i < adapter->num_queues; i++, rxr++) {
534                 u64 rdba = rxr->rxdma.dma_paddr;
535                 j = rxr->me;
536
537                 /* Setup the Base and Length of the Rx Descriptor Ring */
538                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
539                     (rdba & 0x00000000ffffffffULL));
540                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
541                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
542                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
543
544                 /* Set up the SRRCTL register */
545                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
546                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
547                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
548                 srrctl |= bufsz;
549                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
550
551                 /*
552                  * Set DROP_EN iff we have no flow control and >1 queue.
553                  * Note that srrctl was cleared shortly before during reset,
554                  * so we do not need to clear the bit, but do it just in case
555                  * this code is moved elsewhere.
556                  */
557                 if (adapter->num_queues > 1 &&
558                     adapter->hw.fc.requested_mode == ixgbe_fc_none) {
559                         srrctl |= IXGBE_SRRCTL_DROP_EN;
560                 } else {
561                         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
562                 }
563
564                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
565
566                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
567                 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
568                 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
569
570                 /* Set the driver rx tail address */
571                 rxr->tail =  IXGBE_RDT(rxr->me);
572         }
573
574         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
575                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
576                             | IXGBE_PSRTYPE_UDPHDR
577                             | IXGBE_PSRTYPE_IPV4HDR
578                             | IXGBE_PSRTYPE_IPV6HDR;
579                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
580         }
581
582         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
583
584         ixgbe_initialize_rss_mapping(adapter);
585
586         if (adapter->num_queues > 1) {
587                 /* RSS and RX IPP Checksum are mutually exclusive */
588                 rxcsum |= IXGBE_RXCSUM_PCSD;
589         }
590
591         if (ifp->if_capenable & IFCAP_RXCSUM)
592                 rxcsum |= IXGBE_RXCSUM_PCSD;
593
594         /* This is useful for calculating UDP/IP fragment checksums */
595         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
596                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
597
598         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
599
600         return;
601 } /* ixgbe_initialize_receive_units */
602
603 /************************************************************************
604  * ixgbe_initialize_transmit_units - Enable transmit units.
605  ************************************************************************/
606 static void
607 ixgbe_initialize_transmit_units(struct adapter *adapter)
608 {
609         struct tx_ring  *txr = adapter->tx_rings;
610         struct ixgbe_hw *hw = &adapter->hw;
611
612         /* Setup the Base and Length of the Tx Descriptor Ring */
613         for (int i = 0; i < adapter->num_queues; i++, txr++) {
614                 u64 tdba = txr->txdma.dma_paddr;
615                 u32 txctrl = 0;
616                 int j = txr->me;
617
618                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
619                     (tdba & 0x00000000ffffffffULL));
620                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
621                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
622                     adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
623
624                 /* Setup the HW Tx Head and Tail descriptor pointers */
625                 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
626                 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
627
628                 /* Cache the tail address */
629                 txr->tail = IXGBE_TDT(j);
630
631                 /* Disable Head Writeback */
632                 /*
633                  * Note: for X550 series devices, these registers are actually
634                  * prefixed with TPH_ isntead of DCA_, but the addresses and
635                  * fields remain the same.
636                  */
637                 switch (hw->mac.type) {
638                 case ixgbe_mac_82598EB:
639                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
640                         break;
641                 default:
642                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
643                         break;
644                 }
645                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
646                 switch (hw->mac.type) {
647                 case ixgbe_mac_82598EB:
648                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
649                         break;
650                 default:
651                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
652                         break;
653                 }
654
655         }
656
657         if (hw->mac.type != ixgbe_mac_82598EB) {
658                 u32 dmatxctl, rttdcs;
659
660                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
661                 dmatxctl |= IXGBE_DMATXCTL_TE;
662                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
663                 /* Disable arbiter to set MTQC */
664                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
665                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
666                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
667                 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
668                     ixgbe_get_mtqc(adapter->iov_mode));
669                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
670                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
671         }
672
673         return;
674 } /* ixgbe_initialize_transmit_units */
675
676 /************************************************************************
677  * ixgbe_attach - Device initialization routine
678  *
679  *   Called when the driver is being loaded.
680  *   Identifies the type of hardware, allocates all resources
681  *   and initializes the hardware.
682  *
683  *   return 0 on success, positive on failure
684  ************************************************************************/
685 static int
686 ixgbe_attach(device_t dev)
687 {
688         struct adapter  *adapter;
689         struct ixgbe_hw *hw;
690         int             error = 0;
691         u32             ctrl_ext;
692
693         INIT_DEBUGOUT("ixgbe_attach: begin");
694
695         /* Allocate, clear, and link in our adapter structure */
696         adapter = device_get_softc(dev);
697         adapter->hw.back = adapter;
698         adapter->dev = dev;
699         hw = &adapter->hw;
700
701         /* Core Lock Init*/
702         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
703
704         /* Set up the timer callout */
705         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
706
707         /* Determine hardware revision */
708         hw->vendor_id = pci_get_vendor(dev);
709         hw->device_id = pci_get_device(dev);
710         hw->revision_id = pci_get_revid(dev);
711         hw->subsystem_vendor_id = pci_get_subvendor(dev);
712         hw->subsystem_device_id = pci_get_subdevice(dev);
713
714         /*
715          * Make sure BUSMASTER is set
716          */
717         pci_enable_busmaster(dev);
718
719         /* Do base PCI setup - map BAR0 */
720         if (ixgbe_allocate_pci_resources(adapter)) {
721                 device_printf(dev, "Allocation of PCI resources failed\n");
722                 error = ENXIO;
723                 goto err_out;
724         }
725
726         /* let hardware know driver is loaded */
727         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
728         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
729         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
730
731         /*
732          * Initialize the shared code
733          */
734         if (ixgbe_init_shared_code(hw)) {
735                 device_printf(dev, "Unable to initialize the shared code\n");
736                 error = ENXIO;
737                 goto err_out;
738         }
739
740         if (hw->mbx.ops.init_params)
741                 hw->mbx.ops.init_params(hw);
742
743         hw->allow_unsupported_sfp = allow_unsupported_sfp;
744
745         /* Pick up the 82599 settings */
746         if (hw->mac.type != ixgbe_mac_82598EB) {
747                 hw->phy.smart_speed = ixgbe_smart_speed;
748                 adapter->num_segs = IXGBE_82599_SCATTER;
749         } else
750                 adapter->num_segs = IXGBE_82598_SCATTER;
751
752         ixgbe_init_device_features(adapter);
753
754         if (ixgbe_configure_interrupts(adapter)) {
755                 error = ENXIO;
756                 goto err_out;
757         }
758
759         /* Allocate multicast array memory. */
760         adapter->mta = malloc(sizeof(*adapter->mta) *
761             MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
762         if (adapter->mta == NULL) {
763                 device_printf(dev, "Can not allocate multicast setup array\n");
764                 error = ENOMEM;
765                 goto err_out;
766         }
767
768         /* Enable WoL (if supported) */
769         ixgbe_check_wol_support(adapter);
770
771         /* Register for VLAN events */
772         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
773             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
774         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
775             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
776
777         /* Verify adapter fan is still functional (if applicable) */
778         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
779                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
780                 ixgbe_check_fan_failure(adapter, esdp, FALSE);
781         }
782
783         /* Ensure SW/FW semaphore is free */
784         ixgbe_init_swfw_semaphore(hw);
785
786         /* Enable EEE power saving */
787         if (adapter->feat_en & IXGBE_FEATURE_EEE)
788                 hw->mac.ops.setup_eee(hw, TRUE);
789
790         /* Set an initial default flow control value */
791         hw->fc.requested_mode = ixgbe_flow_control;
792
793         /* Sysctls for limiting the amount of work done in the taskqueues */
794         ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
795             "max number of rx packets to process",
796             &adapter->rx_process_limit, ixgbe_rx_process_limit);
797
798         ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
799             "max number of tx packets to process",
800             &adapter->tx_process_limit, ixgbe_tx_process_limit);
801
802         /* Do descriptor calc and sanity checks */
803         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
804             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
805                 device_printf(dev, "TXD config issue, using default!\n");
806                 adapter->num_tx_desc = DEFAULT_TXD;
807         } else
808                 adapter->num_tx_desc = ixgbe_txd;
809
810         /*
811          * With many RX rings it is easy to exceed the
812          * system mbuf allocation. Tuning nmbclusters
813          * can alleviate this.
814          */
815         if (nmbclusters > 0) {
816                 int s;
817                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
818                 if (s > nmbclusters) {
819                         device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
820                         ixgbe_rxd = DEFAULT_RXD;
821                 }
822         }
823
824         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
825             ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
826                 device_printf(dev, "RXD config issue, using default!\n");
827                 adapter->num_rx_desc = DEFAULT_RXD;
828         } else
829                 adapter->num_rx_desc = ixgbe_rxd;
830
831         /* Allocate our TX/RX Queues */
832         if (ixgbe_allocate_queues(adapter)) {
833                 error = ENOMEM;
834                 goto err_out;
835         }
836
837         hw->phy.reset_if_overtemp = TRUE;
838         error = ixgbe_reset_hw(hw);
839         hw->phy.reset_if_overtemp = FALSE;
840         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
841                 /*
842                  * No optics in this port, set up
843                  * so the timer routine will probe
844                  * for later insertion.
845                  */
846                 adapter->sfp_probe = TRUE;
847                 error = IXGBE_SUCCESS;
848         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849                 device_printf(dev, "Unsupported SFP+ module detected!\n");
850                 error = EIO;
851                 goto err_late;
852         } else if (error) {
853                 device_printf(dev, "Hardware initialization failed\n");
854                 error = EIO;
855                 goto err_late;
856         }
857
858         /* Make sure we have a good EEPROM before we read from it */
859         if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
860                 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
861                 error = EIO;
862                 goto err_late;
863         }
864
865         /* Setup OS specific network interface */
866         if (ixgbe_setup_interface(dev, adapter) != 0)
867                 goto err_late;
868
869         if (adapter->feat_en & IXGBE_FEATURE_MSIX)
870                 error = ixgbe_allocate_msix(adapter);
871         else
872                 error = ixgbe_allocate_legacy(adapter);
873         if (error)
874                 goto err_late;
875
876         error = ixgbe_start_hw(hw);
877         switch (error) {
878         case IXGBE_ERR_EEPROM_VERSION:
879                 device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
880                 break;
881         case IXGBE_ERR_SFP_NOT_SUPPORTED:
882                 device_printf(dev, "Unsupported SFP+ Module\n");
883                 error = EIO;
884                 goto err_late;
885         case IXGBE_ERR_SFP_NOT_PRESENT:
886                 device_printf(dev, "No SFP+ Module found\n");
887                 /* falls thru */
888         default:
889                 break;
890         }
891
892         /* Enable the optics for 82599 SFP+ fiber */
893         ixgbe_enable_tx_laser(hw);
894
895         /* Enable power to the phy. */
896         ixgbe_set_phy_power(hw, TRUE);
897
898         /* Initialize statistics */
899         ixgbe_update_stats_counters(adapter);
900
901         /* Check PCIE slot type/speed/width */
902         ixgbe_get_slot_info(adapter);
903
904         /*
905          * Do time init and sysctl init here, but
906          * only on the first port of a bypass adapter.
907          */
908         ixgbe_bypass_init(adapter);
909
910         /* Set an initial dmac value */
911         adapter->dmac = 0;
912         /* Set initial advertised speeds (if applicable) */
913         adapter->advertise = ixgbe_get_advertise(adapter);
914
915         if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
916                 ixgbe_define_iov_schemas(dev, &error);
917
918         /* Add sysctls */
919         ixgbe_add_device_sysctls(adapter);
920         ixgbe_add_hw_stats(adapter);
921
922         /* For Netmap */
923         adapter->init_locked = ixgbe_init_locked;
924         adapter->stop_locked = ixgbe_stop;
925
926         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
927                 ixgbe_netmap_attach(adapter);
928
929         INIT_DEBUGOUT("ixgbe_attach: end");
930
931         return (0);
932
933 err_late:
934         ixgbe_free_transmit_structures(adapter);
935         ixgbe_free_receive_structures(adapter);
936         free(adapter->queues, M_DEVBUF);
937 err_out:
938         if (adapter->ifp != NULL)
939                 if_free(adapter->ifp);
940         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
941         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
942         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
943         ixgbe_free_pci_resources(adapter);
944         free(adapter->mta, M_IXGBE);
945         IXGBE_CORE_LOCK_DESTROY(adapter);
946
947         return (error);
948 } /* ixgbe_attach */
949
950 /************************************************************************
951  * ixgbe_check_wol_support
952  *
953  *   Checks whether the adapter's ports are capable of
954  *   Wake On LAN by reading the adapter's NVM.
955  *
956  *   Sets each port's hw->wol_enabled value depending
957  *   on the value read here.
958  ************************************************************************/
959 static void
960 ixgbe_check_wol_support(struct adapter *adapter)
961 {
962         struct ixgbe_hw *hw = &adapter->hw;
963         u16             dev_caps = 0;
964
965         /* Find out WoL support for port */
966         adapter->wol_support = hw->wol_enabled = 0;
967         ixgbe_get_device_caps(hw, &dev_caps);
968         if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
969             ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
970              hw->bus.func == 0))
971                 adapter->wol_support = hw->wol_enabled = 1;
972
973         /* Save initial wake up filter configuration */
974         adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
975
976         return;
977 } /* ixgbe_check_wol_support */
978
979 /************************************************************************
980  * ixgbe_setup_interface
981  *
982  *   Setup networking device structure and register an interface.
983  ************************************************************************/
984 static int
985 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
986 {
987         struct ifnet *ifp;
988
989         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
990
991         ifp = adapter->ifp = if_alloc(IFT_ETHER);
992         if (ifp == NULL) {
993                 device_printf(dev, "can not allocate ifnet structure\n");
994                 return (-1);
995         }
996         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
997         ifp->if_baudrate = IF_Gbps(10);
998         ifp->if_init = ixgbe_init;
999         ifp->if_softc = adapter;
1000         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1001         ifp->if_ioctl = ixgbe_ioctl;
1002 #if __FreeBSD_version >= 1100036
1003         if_setgetcounterfn(ifp, ixgbe_get_counter);
1004 #endif
1005 #if __FreeBSD_version >= 1100045
1006         /* TSO parameters */
1007         ifp->if_hw_tsomax = 65518;
1008         ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1009         ifp->if_hw_tsomaxsegsize = 2048;
1010 #endif
1011         if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1012                 ifp->if_start = ixgbe_legacy_start;
1013                 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1014                 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
1015                 IFQ_SET_READY(&ifp->if_snd);
1016                 ixgbe_start_locked = ixgbe_legacy_start_locked;
1017                 ixgbe_ring_empty = ixgbe_legacy_ring_empty;
1018         } else {
1019                 ifp->if_transmit = ixgbe_mq_start;
1020                 ifp->if_qflush = ixgbe_qflush;
1021                 ixgbe_start_locked = ixgbe_mq_start_locked;
1022                 ixgbe_ring_empty = drbr_empty;
1023         }
1024
1025         ether_ifattach(ifp, adapter->hw.mac.addr);
1026
1027         adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1028
1029         /*
1030          * Tell the upper layer(s) we support long frames.
1031          */
1032         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1033
1034         /* Set capability flags */
1035         ifp->if_capabilities |= IFCAP_HWCSUM
1036                              |  IFCAP_HWCSUM_IPV6
1037                              |  IFCAP_TSO
1038                              |  IFCAP_LRO
1039                              |  IFCAP_VLAN_HWTAGGING
1040                              |  IFCAP_VLAN_HWTSO
1041                              |  IFCAP_VLAN_HWCSUM
1042                              |  IFCAP_JUMBO_MTU
1043                              |  IFCAP_VLAN_MTU
1044                              |  IFCAP_HWSTATS;
1045
1046         /* Enable the above capabilities by default */
1047         ifp->if_capenable = ifp->if_capabilities;
1048
1049         /*
1050          * Don't turn this on by default, if vlans are
1051          * created on another pseudo device (eg. lagg)
1052          * then vlan events are not passed thru, breaking
1053          * operation, but with HW FILTER off it works. If
1054          * using vlans directly on the ixgbe driver you can
1055          * enable this and get full hardware tag filtering.
1056          */
1057         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1058
1059         /*
1060          * Specify the media types supported by this adapter and register
1061          * callbacks to update media and link information
1062          */
1063         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1064             ixgbe_media_status);
1065
1066         adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1067         ixgbe_add_media_types(adapter);
1068
1069         /* Set autoselect media by default */
1070         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1071
1072         return (0);
1073 } /* ixgbe_setup_interface */
1074
1075 #if __FreeBSD_version >= 1100036
1076 /************************************************************************
1077  * ixgbe_get_counter
1078  ************************************************************************/
1079 static uint64_t
1080 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
1081 {
1082         struct adapter *adapter;
1083         struct tx_ring *txr;
1084         uint64_t       rv;
1085
1086         adapter = if_getsoftc(ifp);
1087
1088         switch (cnt) {
1089         case IFCOUNTER_IPACKETS:
1090                 return (adapter->ipackets);
1091         case IFCOUNTER_OPACKETS:
1092                 return (adapter->opackets);
1093         case IFCOUNTER_IBYTES:
1094                 return (adapter->ibytes);
1095         case IFCOUNTER_OBYTES:
1096                 return (adapter->obytes);
1097         case IFCOUNTER_IMCASTS:
1098                 return (adapter->imcasts);
1099         case IFCOUNTER_OMCASTS:
1100                 return (adapter->omcasts);
1101         case IFCOUNTER_COLLISIONS:
1102                 return (0);
1103         case IFCOUNTER_IQDROPS:
1104                 return (adapter->iqdrops);
1105         case IFCOUNTER_OQDROPS:
1106                 rv = 0;
1107                 txr = adapter->tx_rings;
1108                 for (int i = 0; i < adapter->num_queues; i++, txr++)
1109                         rv += txr->br->br_drops;
1110                 return (rv);
1111         case IFCOUNTER_IERRORS:
1112                 return (adapter->ierrors);
1113         default:
1114                 return (if_get_counter_default(ifp, cnt));
1115         }
1116 } /* ixgbe_get_counter */
1117 #endif
1118
1119 /************************************************************************
1120  * ixgbe_add_media_types
1121  ************************************************************************/
1122 static void
1123 ixgbe_add_media_types(struct adapter *adapter)
1124 {
1125         struct ixgbe_hw *hw = &adapter->hw;
1126         device_t        dev = adapter->dev;
1127         u64             layer;
1128
1129         layer = adapter->phy_layer;
1130
1131         /* Media types with matching FreeBSD media defines */
1132         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1133                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1134         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1135                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1136         if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1137                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1138         if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1139                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1140
1141         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1142             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1143                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1144                     NULL);
1145
1146         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1147                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1148                 if (hw->phy.multispeed_fiber)
1149                         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1150                             NULL);
1151         }
1152         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1153                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1154                 if (hw->phy.multispeed_fiber)
1155                         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1156                             NULL);
1157         } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1158                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1159         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1160                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1161
1162 #ifdef IFM_ETH_XTYPE
1163         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1164                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1165         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1166                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1167         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1168                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1169         if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1170                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1171 #else
1172         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1173                 device_printf(dev, "Media supported: 10GbaseKR\n");
1174                 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1175                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1176         }
1177         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1178                 device_printf(dev, "Media supported: 10GbaseKX4\n");
1179                 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1180                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1181         }
1182         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1183                 device_printf(dev, "Media supported: 1000baseKX\n");
1184                 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1185                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1186         }
1187         if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1188                 device_printf(dev, "Media supported: 2500baseKX\n");
1189                 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1190                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1191         }
1192 #endif
1193         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1194                 device_printf(dev, "Media supported: 1000baseBX\n");
1195
1196         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1197                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1198                     0, NULL);
1199                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1200         }
1201
1202         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1203 } /* ixgbe_add_media_types */
1204
1205 /************************************************************************
1206  * ixgbe_is_sfp
1207  ************************************************************************/
1208 static inline bool
1209 ixgbe_is_sfp(struct ixgbe_hw *hw)
1210 {
1211         switch (hw->mac.type) {
1212         case ixgbe_mac_82598EB:
1213                 if (hw->phy.type == ixgbe_phy_nl)
1214                         return TRUE;
1215                 return FALSE;
1216         case ixgbe_mac_82599EB:
1217                 switch (hw->mac.ops.get_media_type(hw)) {
1218                 case ixgbe_media_type_fiber:
1219                 case ixgbe_media_type_fiber_qsfp:
1220                         return TRUE;
1221                 default:
1222                         return FALSE;
1223                 }
1224         case ixgbe_mac_X550EM_x:
1225         case ixgbe_mac_X550EM_a:
1226                 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1227                         return TRUE;
1228                 return FALSE;
1229         default:
1230                 return FALSE;
1231         }
1232 } /* ixgbe_is_sfp */
1233
1234 /************************************************************************
1235  * ixgbe_config_link
1236  ************************************************************************/
1237 static void
1238 ixgbe_config_link(struct adapter *adapter)
1239 {
1240         struct ixgbe_hw *hw = &adapter->hw;
1241         u32             autoneg, err = 0;
1242         bool            sfp, negotiate;
1243
1244         sfp = ixgbe_is_sfp(hw);
1245
1246         if (sfp) {
1247                 if (hw->phy.multispeed_fiber) {
1248                         hw->mac.ops.setup_sfp(hw);
1249                         ixgbe_enable_tx_laser(hw);
1250                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1251                 } else
1252                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1253         } else {
1254                 if (hw->mac.ops.check_link)
1255                         err = ixgbe_check_link(hw, &adapter->link_speed,
1256                             &adapter->link_up, FALSE);
1257                 if (err)
1258                         goto out;
1259                 autoneg = hw->phy.autoneg_advertised;
1260                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1261                         err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1262                             &negotiate);
1263                 if (err)
1264                         goto out;
1265                 if (hw->mac.ops.setup_link)
1266                         err = hw->mac.ops.setup_link(hw, autoneg,
1267                             adapter->link_up);
1268         }
1269 out:
1270
1271         return;
1272 } /* ixgbe_config_link */
1273
1274 /************************************************************************
1275  * ixgbe_update_stats_counters - Update board statistics counters.
1276  ************************************************************************/
1277 static void
1278 ixgbe_update_stats_counters(struct adapter *adapter)
1279 {
1280         struct ixgbe_hw       *hw = &adapter->hw;
1281         struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1282         u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1283         u64                   total_missed_rx = 0;
1284
1285         stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1286         stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1287         stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1288         stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1289         stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1290
1291         for (int i = 0; i < 16; i++) {
1292                 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1293                 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1294                 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1295         }
1296         stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1297         stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1298         stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1299
1300         /* Hardware workaround, gprc counts missed packets */
1301         stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1302         stats->gprc -= missed_rx;
1303
1304         if (hw->mac.type != ixgbe_mac_82598EB) {
1305                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1306                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1307                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1308                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1309                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1310                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1311                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1312                 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1313         } else {
1314                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1315                 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1316                 /* 82598 only has a counter in the high register */
1317                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1318                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1319                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1320         }
1321
1322         /*
1323          * Workaround: mprc hardware is incorrectly counting
1324          * broadcasts, so for now we subtract those.
1325          */
1326         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1327         stats->bprc += bprc;
1328         stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1329         if (hw->mac.type == ixgbe_mac_82598EB)
1330                 stats->mprc -= bprc;
1331
1332         stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1333         stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1334         stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1335         stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1336         stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1337         stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1338
1339         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1340         stats->lxontxc += lxon;
1341         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1342         stats->lxofftxc += lxoff;
1343         total = lxon + lxoff;
1344
1345         stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1346         stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1347         stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1348         stats->gptc -= total;
1349         stats->mptc -= total;
1350         stats->ptc64 -= total;
1351         stats->gotc -= total * ETHER_MIN_LEN;
1352
1353         stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1354         stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1355         stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1356         stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1357         stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1358         stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1359         stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1360         stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1361         stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1362         stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1363         stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1364         stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1365         stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1366         stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1367         stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1368         stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1369         stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1370         stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1371         /* Only read FCOE on 82599 */
1372         if (hw->mac.type != ixgbe_mac_82598EB) {
1373                 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1374                 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1375                 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1376                 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1377                 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1378         }
1379
1380         /* Fill out the OS statistics structure */
1381         IXGBE_SET_IPACKETS(adapter, stats->gprc);
1382         IXGBE_SET_OPACKETS(adapter, stats->gptc);
1383         IXGBE_SET_IBYTES(adapter, stats->gorc);
1384         IXGBE_SET_OBYTES(adapter, stats->gotc);
1385         IXGBE_SET_IMCASTS(adapter, stats->mprc);
1386         IXGBE_SET_OMCASTS(adapter, stats->mptc);
1387         IXGBE_SET_COLLISIONS(adapter, 0);
1388         IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1389         IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1390 } /* ixgbe_update_stats_counters */
1391
1392 /************************************************************************
1393  * ixgbe_add_hw_stats
1394  *
1395  *   Add sysctl variables, one per statistic, to the system.
1396  ************************************************************************/
1397 static void
1398 ixgbe_add_hw_stats(struct adapter *adapter)
1399 {
1400         device_t               dev = adapter->dev;
1401         struct tx_ring         *txr = adapter->tx_rings;
1402         struct rx_ring         *rxr = adapter->rx_rings;
1403         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1404         struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1405         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1406         struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1407         struct sysctl_oid      *stat_node, *queue_node;
1408         struct sysctl_oid_list *stat_list, *queue_list;
1409
1410 #define QUEUE_NAME_LEN 32
1411         char                   namebuf[QUEUE_NAME_LEN];
1412
1413         /* Driver Statistics */
1414         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1415             CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1416         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1417             CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1418         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1419             CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1420         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1421             CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1422
1423         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1424                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1425                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1426                     CTLFLAG_RD, NULL, "Queue Name");
1427                 queue_list = SYSCTL_CHILDREN(queue_node);
1428
1429                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1430                     CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
1431                     sizeof(&adapter->queues[i]),
1432                     ixgbe_sysctl_interrupt_rate_handler, "IU",
1433                     "Interrupt Rate");
1434                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1435                     CTLFLAG_RD, &(adapter->queues[i].irqs),
1436                     "irqs on this queue");
1437                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1438                     CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1439                     ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1440                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1441                     CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1442                     ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1443                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1444                     CTLFLAG_RD, &txr->tso_tx, "TSO");
1445                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1446                     CTLFLAG_RD, &txr->no_tx_dma_setup,
1447                     "Driver tx dma failure in xmit");
1448                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
1449                     CTLFLAG_RD, &txr->no_desc_avail,
1450                     "Queue No Descriptor Available");
1451                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1452                     CTLFLAG_RD, &txr->total_packets,
1453                     "Queue Packets Transmitted");
1454                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1455                     CTLFLAG_RD, &txr->br->br_drops,
1456                     "Packets dropped in buf_ring");
1457         }
1458
1459         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1460                 struct lro_ctrl *lro = &rxr->lro;
1461
1462                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1463                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1464                     CTLFLAG_RD, NULL, "Queue Name");
1465                 queue_list = SYSCTL_CHILDREN(queue_node);
1466
1467                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1468                     CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1469                     ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1470                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1471                     CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1472                     ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1473                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1474                     CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1475                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1476                     CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1477                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1478                     CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1479                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1480                     CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1481                 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
1482                     CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
1483                 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
1484                     CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
1485         }
1486
1487         /* MAC stats get their own sub node */
1488
1489         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1490             CTLFLAG_RD, NULL, "MAC Statistics");
1491         stat_list = SYSCTL_CHILDREN(stat_node);
1492
1493         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1494             CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1495         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1496             CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1497         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1498             CTLFLAG_RD, &stats->errbc, "Byte Errors");
1499         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1500             CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1501         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1502             CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1503         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1504             CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1505         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1506             CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1507         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1508             CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1509
1510         /* Flow Control stats */
1511         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1512             CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1513         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1514             CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1515         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1516             CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1517         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1518             CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1519
1520         /* Packet Reception Stats */
1521         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1522             CTLFLAG_RD, &stats->tor, "Total Octets Received");
1523         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1524             CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1525         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1526             CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1527         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1528             CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1529         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1530             CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1531         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1532             CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1533         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1534             CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1535         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1536             CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1537         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1538             CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1539         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1540             CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1541         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1542             CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1543         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1544             CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1545         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1546             CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1547         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1548             CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1549         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1550             CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1551         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1552             CTLFLAG_RD, &stats->rjc, "Received Jabber");
1553         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1554             CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1555         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1556             CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1557         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1558             CTLFLAG_RD, &stats->xec, "Checksum Errors");
1559
1560         /* Packet Transmission Stats */
1561         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1562             CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1563         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1564             CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1565         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1566             CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1567         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1568             CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1569         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1570             CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1571         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1572             CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1573         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1574             CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1575         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1576             CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1577         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1578             CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1579         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1580             CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1581         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1582             CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1583         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1584             CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1585 } /* ixgbe_add_hw_stats */
1586
1587 /************************************************************************
1588  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1589  *
1590  *   Retrieves the TDH value from the hardware
1591  ************************************************************************/
1592 static int
1593 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1594 {
1595         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1596         int            error;
1597         unsigned int   val;
1598
1599         if (!txr)
1600                 return (0);
1601
1602         val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1603         error = sysctl_handle_int(oidp, &val, 0, req);
1604         if (error || !req->newptr)
1605                 return error;
1606
1607         return (0);
1608 } /* ixgbe_sysctl_tdh_handler */
1609
1610 /************************************************************************
1611  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1612  *
1613  *   Retrieves the TDT value from the hardware
1614  ************************************************************************/
1615 static int
1616 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1617 {
1618         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1619         int            error;
1620         unsigned int   val;
1621
1622         if (!txr)
1623                 return (0);
1624
1625         val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1626         error = sysctl_handle_int(oidp, &val, 0, req);
1627         if (error || !req->newptr)
1628                 return error;
1629
1630         return (0);
1631 } /* ixgbe_sysctl_tdt_handler */
1632
1633 /************************************************************************
1634  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1635  *
1636  *   Retrieves the RDH value from the hardware
1637  ************************************************************************/
1638 static int
1639 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1640 {
1641         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1642         int            error;
1643         unsigned int   val;
1644
1645         if (!rxr)
1646                 return (0);
1647
1648         val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1649         error = sysctl_handle_int(oidp, &val, 0, req);
1650         if (error || !req->newptr)
1651                 return error;
1652
1653         return (0);
1654 } /* ixgbe_sysctl_rdh_handler */
1655
1656 /************************************************************************
1657  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1658  *
1659  *   Retrieves the RDT value from the hardware
1660  ************************************************************************/
1661 static int
1662 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1663 {
1664         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1665         int            error;
1666         unsigned int   val;
1667
1668         if (!rxr)
1669                 return (0);
1670
1671         val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1672         error = sysctl_handle_int(oidp, &val, 0, req);
1673         if (error || !req->newptr)
1674                 return error;
1675
1676         return (0);
1677 } /* ixgbe_sysctl_rdt_handler */
1678
1679 /************************************************************************
1680  * ixgbe_register_vlan
1681  *
1682  *   Run via vlan config EVENT, it enables us to use the
1683  *   HW Filter table since we can get the vlan id. This
1684  *   just creates the entry in the soft version of the
1685  *   VFTA, init will repopulate the real table.
1686  ************************************************************************/
1687 static void
1688 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1689 {
1690         struct adapter *adapter = ifp->if_softc;
1691         u16            index, bit;
1692
1693         if (ifp->if_softc != arg)   /* Not our event */
1694                 return;
1695
1696         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1697                 return;
1698
1699         IXGBE_CORE_LOCK(adapter);
1700         index = (vtag >> 5) & 0x7F;
1701         bit = vtag & 0x1F;
1702         adapter->shadow_vfta[index] |= (1 << bit);
1703         ++adapter->num_vlans;
1704         ixgbe_setup_vlan_hw_support(adapter);
1705         IXGBE_CORE_UNLOCK(adapter);
1706 } /* ixgbe_register_vlan */
1707
1708 /************************************************************************
1709  * ixgbe_unregister_vlan
1710  *
1711  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1712  ************************************************************************/
1713 static void
1714 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1715 {
1716         struct adapter *adapter = ifp->if_softc;
1717         u16            index, bit;
1718
1719         if (ifp->if_softc != arg)
1720                 return;
1721
1722         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1723                 return;
1724
1725         IXGBE_CORE_LOCK(adapter);
1726         index = (vtag >> 5) & 0x7F;
1727         bit = vtag & 0x1F;
1728         adapter->shadow_vfta[index] &= ~(1 << bit);
1729         --adapter->num_vlans;
1730         /* Re-init to load the changes */
1731         ixgbe_setup_vlan_hw_support(adapter);
1732         IXGBE_CORE_UNLOCK(adapter);
1733 } /* ixgbe_unregister_vlan */
1734
1735 /************************************************************************
1736  * ixgbe_setup_vlan_hw_support
1737  ************************************************************************/
1738 static void
1739 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
1740 {
1741         struct ifnet    *ifp = adapter->ifp;
1742         struct ixgbe_hw *hw = &adapter->hw;
1743         struct rx_ring  *rxr;
1744         int             i;
1745         u32             ctrl;
1746
1747
1748         /*
1749          * We get here thru init_locked, meaning
1750          * a soft reset, this has already cleared
1751          * the VFTA and other state, so if there
1752          * have been no vlan's registered do nothing.
1753          */
1754         if (adapter->num_vlans == 0)
1755                 return;
1756
1757         /* Setup the queues for vlans */
1758         for (i = 0; i < adapter->num_queues; i++) {
1759                 rxr = &adapter->rx_rings[i];
1760                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1761                 if (hw->mac.type != ixgbe_mac_82598EB) {
1762                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1763                         ctrl |= IXGBE_RXDCTL_VME;
1764                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1765                 }
1766                 rxr->vtag_strip = TRUE;
1767         }
1768
1769         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1770                 return;
1771         /*
1772          * A soft reset zero's out the VFTA, so
1773          * we need to repopulate it now.
1774          */
1775         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1776                 if (adapter->shadow_vfta[i] != 0)
1777                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1778                             adapter->shadow_vfta[i]);
1779
1780         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1781         /* Enable the Filter Table if enabled */
1782         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1783                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1784                 ctrl |= IXGBE_VLNCTRL_VFE;
1785         }
1786         if (hw->mac.type == ixgbe_mac_82598EB)
1787                 ctrl |= IXGBE_VLNCTRL_VME;
1788         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1789 } /* ixgbe_setup_vlan_hw_support */
1790
1791 /************************************************************************
1792  * ixgbe_get_slot_info
1793  *
1794  *   Get the width and transaction speed of
1795  *   the slot this adapter is plugged into.
1796  ************************************************************************/
1797 static void
1798 ixgbe_get_slot_info(struct adapter *adapter)
1799 {
1800         device_t              dev = adapter->dev;
1801         struct ixgbe_hw       *hw = &adapter->hw;
1802         u32                   offset;
1803         u16                   link;
1804         int                   bus_info_valid = TRUE;
1805
1806         /* Some devices are behind an internal bridge */
1807         switch (hw->device_id) {
1808         case IXGBE_DEV_ID_82599_SFP_SF_QP:
1809         case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1810                 goto get_parent_info;
1811         default:
1812                 break;
1813         }
1814
1815         ixgbe_get_bus_info(hw);
1816
1817         /*
1818          * Some devices don't use PCI-E, but there is no need
1819          * to display "Unknown" for bus speed and width.
1820          */
1821         switch (hw->mac.type) {
1822         case ixgbe_mac_X550EM_x:
1823         case ixgbe_mac_X550EM_a:
1824                 return;
1825         default:
1826                 goto display;
1827         }
1828
1829 get_parent_info:
1830         /*
1831          * For the Quad port adapter we need to parse back
1832          * up the PCI tree to find the speed of the expansion
1833          * slot into which this adapter is plugged. A bit more work.
1834          */
1835         dev = device_get_parent(device_get_parent(dev));
1836 #ifdef IXGBE_DEBUG
1837         device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1838             pci_get_slot(dev), pci_get_function(dev));
1839 #endif
1840         dev = device_get_parent(device_get_parent(dev));
1841 #ifdef IXGBE_DEBUG
1842         device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1843             pci_get_slot(dev), pci_get_function(dev));
1844 #endif
1845         /* Now get the PCI Express Capabilities offset */
1846         if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1847                 /*
1848                  * Hmm...can't get PCI-Express capabilities.
1849                  * Falling back to default method.
1850                  */
1851                 bus_info_valid = FALSE;
1852                 ixgbe_get_bus_info(hw);
1853                 goto display;
1854         }
1855         /* ...and read the Link Status Register */
1856         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1857         ixgbe_set_pci_config_data_generic(hw, link);
1858
1859 display:
1860         device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1861             ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1862              (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1863              (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1864              "Unknown"),
1865             ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1866              (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1867              (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1868              "Unknown"));
1869
1870         if (bus_info_valid) {
1871                 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1872                     ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1873                     (hw->bus.speed == ixgbe_bus_speed_2500))) {
1874                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1875                         device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1876                 }
1877                 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1878                     ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1879                     (hw->bus.speed < ixgbe_bus_speed_8000))) {
1880                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1881                         device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1882                 }
1883         } else
1884                 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1885
1886         return;
1887 } /* ixgbe_get_slot_info */
1888
1889 /************************************************************************
1890  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1891  ************************************************************************/
1892 static inline void
1893 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1894 {
1895         struct ixgbe_hw *hw = &adapter->hw;
1896         u64             queue = (u64)(1 << vector);
1897         u32             mask;
1898
1899         if (hw->mac.type == ixgbe_mac_82598EB) {
1900                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1901                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1902         } else {
1903                 mask = (queue & 0xFFFFFFFF);
1904                 if (mask)
1905                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1906                 mask = (queue >> 32);
1907                 if (mask)
1908                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1909         }
1910 } /* ixgbe_enable_queue */
1911
1912 /************************************************************************
1913  * ixgbe_disable_queue
1914  ************************************************************************/
1915 static inline void
1916 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1917 {
1918         struct ixgbe_hw *hw = &adapter->hw;
1919         u64             queue = (u64)(1 << vector);
1920         u32             mask;
1921
1922         if (hw->mac.type == ixgbe_mac_82598EB) {
1923                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1924                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1925         } else {
1926                 mask = (queue & 0xFFFFFFFF);
1927                 if (mask)
1928                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1929                 mask = (queue >> 32);
1930                 if (mask)
1931                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1932         }
1933 } /* ixgbe_disable_queue */
1934
1935 /************************************************************************
1936  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1937  ************************************************************************/
1938 void
1939 ixgbe_msix_que(void *arg)
1940 {
1941         struct ix_queue *que = arg;
1942         struct adapter  *adapter = que->adapter;
1943         struct ifnet    *ifp = adapter->ifp;
1944         struct tx_ring  *txr = que->txr;
1945         struct rx_ring  *rxr = que->rxr;
1946         bool            more;
1947         u32             newitr = 0;
1948
1949
1950         /* Protect against spurious interrupts */
1951         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1952                 return;
1953
1954         ixgbe_disable_queue(adapter, que->msix);
1955         ++que->irqs;
1956
1957         more = ixgbe_rxeof(que);
1958
1959         IXGBE_TX_LOCK(txr);
1960         ixgbe_txeof(txr);
1961         if (!ixgbe_ring_empty(ifp, txr->br))
1962                 ixgbe_start_locked(ifp, txr);
1963         IXGBE_TX_UNLOCK(txr);
1964
1965         /* Do AIM now? */
1966
1967         if (adapter->enable_aim == FALSE)
1968                 goto no_calc;
1969         /*
1970          * Do Adaptive Interrupt Moderation:
1971          *  - Write out last calculated setting
1972          *  - Calculate based on average size over
1973          *    the last interval.
1974          */
1975         if (que->eitr_setting)
1976                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1977                     que->eitr_setting);
1978
1979         que->eitr_setting = 0;
1980
1981         /* Idle, do nothing */
1982         if ((txr->bytes == 0) && (rxr->bytes == 0))
1983                 goto no_calc;
1984
1985         if ((txr->bytes) && (txr->packets))
1986                 newitr = txr->bytes/txr->packets;
1987         if ((rxr->bytes) && (rxr->packets))
1988                 newitr = max(newitr, (rxr->bytes / rxr->packets));
1989         newitr += 24; /* account for hardware frame, crc */
1990
1991         /* set an upper boundary */
1992         newitr = min(newitr, 3000);
1993
1994         /* Be nice to the mid range */
1995         if ((newitr > 300) && (newitr < 1200))
1996                 newitr = (newitr / 3);
1997         else
1998                 newitr = (newitr / 2);
1999
2000         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2001                 newitr |= newitr << 16;
2002         else
2003                 newitr |= IXGBE_EITR_CNT_WDIS;
2004
2005         /* save for next interrupt */
2006         que->eitr_setting = newitr;
2007
2008         /* Reset state */
2009         txr->bytes = 0;
2010         txr->packets = 0;
2011         rxr->bytes = 0;
2012         rxr->packets = 0;
2013
2014 no_calc:
2015         if (more)
2016                 taskqueue_enqueue(que->tq, &que->que_task);
2017         else
2018                 ixgbe_enable_queue(adapter, que->msix);
2019
2020         return;
2021 } /* ixgbe_msix_que */
2022
2023 /************************************************************************
2024  * ixgbe_media_status - Media Ioctl callback
2025  *
2026  *   Called whenever the user queries the status of
2027  *   the interface using ifconfig.
2028  ************************************************************************/
2029 static void
2030 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2031 {
2032         struct adapter  *adapter = ifp->if_softc;
2033         struct ixgbe_hw *hw = &adapter->hw;
2034         int             layer;
2035
2036         INIT_DEBUGOUT("ixgbe_media_status: begin");
2037         IXGBE_CORE_LOCK(adapter);
2038         ixgbe_update_link_status(adapter);
2039
2040         ifmr->ifm_status = IFM_AVALID;
2041         ifmr->ifm_active = IFM_ETHER;
2042
2043         if (!adapter->link_active) {
2044                 IXGBE_CORE_UNLOCK(adapter);
2045                 return;
2046         }
2047
2048         ifmr->ifm_status |= IFM_ACTIVE;
2049         layer = adapter->phy_layer;
2050
2051         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2052             layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2053             layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2054             layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2055                 switch (adapter->link_speed) {
2056                 case IXGBE_LINK_SPEED_10GB_FULL:
2057                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2058                         break;
2059                 case IXGBE_LINK_SPEED_1GB_FULL:
2060                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2061                         break;
2062                 case IXGBE_LINK_SPEED_100_FULL:
2063                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2064                         break;
2065                 case IXGBE_LINK_SPEED_10_FULL:
2066                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2067                         break;
2068                 }
2069         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2070             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2071                 switch (adapter->link_speed) {
2072                 case IXGBE_LINK_SPEED_10GB_FULL:
2073                         ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2074                         break;
2075                 }
2076         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2077                 switch (adapter->link_speed) {
2078                 case IXGBE_LINK_SPEED_10GB_FULL:
2079                         ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2080                         break;
2081                 case IXGBE_LINK_SPEED_1GB_FULL:
2082                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2083                         break;
2084                 }
2085         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2086                 switch (adapter->link_speed) {
2087                 case IXGBE_LINK_SPEED_10GB_FULL:
2088                         ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2089                         break;
2090                 case IXGBE_LINK_SPEED_1GB_FULL:
2091                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2092                         break;
2093                 }
2094         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2095             layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2096                 switch (adapter->link_speed) {
2097                 case IXGBE_LINK_SPEED_10GB_FULL:
2098                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2099                         break;
2100                 case IXGBE_LINK_SPEED_1GB_FULL:
2101                         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2102                         break;
2103                 }
2104         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2105                 switch (adapter->link_speed) {
2106                 case IXGBE_LINK_SPEED_10GB_FULL:
2107                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2108                         break;
2109                 }
2110         /*
2111          * XXX: These need to use the proper media types once
2112          * they're added.
2113          */
2114 #ifndef IFM_ETH_XTYPE
2115         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2116                 switch (adapter->link_speed) {
2117                 case IXGBE_LINK_SPEED_10GB_FULL:
2118                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2119                         break;
2120                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2121                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2122                         break;
2123                 case IXGBE_LINK_SPEED_1GB_FULL:
2124                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2125                         break;
2126                 }
2127         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2128             layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2129             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2130                 switch (adapter->link_speed) {
2131                 case IXGBE_LINK_SPEED_10GB_FULL:
2132                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2133                         break;
2134                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2135                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2136                         break;
2137                 case IXGBE_LINK_SPEED_1GB_FULL:
2138                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2139                         break;
2140                 }
2141 #else
2142         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2143                 switch (adapter->link_speed) {
2144                 case IXGBE_LINK_SPEED_10GB_FULL:
2145                         ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2146                         break;
2147                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2148                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2149                         break;
2150                 case IXGBE_LINK_SPEED_1GB_FULL:
2151                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2152                         break;
2153                 }
2154         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2155             layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2156             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2157                 switch (adapter->link_speed) {
2158                 case IXGBE_LINK_SPEED_10GB_FULL:
2159                         ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2160                         break;
2161                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2162                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2163                         break;
2164                 case IXGBE_LINK_SPEED_1GB_FULL:
2165                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2166                         break;
2167                 }
2168 #endif
2169
2170         /* If nothing is recognized... */
2171         if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2172                 ifmr->ifm_active |= IFM_UNKNOWN;
2173
2174 #if __FreeBSD_version >= 900025
2175         /* Display current flow control setting used on link */
2176         if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2177             hw->fc.current_mode == ixgbe_fc_full)
2178                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2179         if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2180             hw->fc.current_mode == ixgbe_fc_full)
2181                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2182 #endif
2183
2184         IXGBE_CORE_UNLOCK(adapter);
2185
2186         return;
2187 } /* ixgbe_media_status */
2188
2189 /************************************************************************
2190  * ixgbe_media_change - Media Ioctl callback
2191  *
2192  *   Called when the user changes speed/duplex using
2193  *   media/mediopt option with ifconfig.
2194  ************************************************************************/
2195 static int
2196 ixgbe_media_change(struct ifnet *ifp)
2197 {
2198         struct adapter   *adapter = ifp->if_softc;
2199         struct ifmedia   *ifm = &adapter->media;
2200         struct ixgbe_hw  *hw = &adapter->hw;
2201         ixgbe_link_speed speed = 0;
2202
2203         INIT_DEBUGOUT("ixgbe_media_change: begin");
2204
2205         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2206                 return (EINVAL);
2207
2208         if (hw->phy.media_type == ixgbe_media_type_backplane)
2209                 return (ENODEV);
2210
2211         /*
2212          * We don't actually need to check against the supported
2213          * media types of the adapter; ifmedia will take care of
2214          * that for us.
2215          */
2216         switch (IFM_SUBTYPE(ifm->ifm_media)) {
2217                 case IFM_AUTO:
2218                 case IFM_10G_T:
2219                         speed |= IXGBE_LINK_SPEED_100_FULL;
2220                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2221                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2222                         break;
2223                 case IFM_10G_LRM:
2224                 case IFM_10G_LR:
2225 #ifndef IFM_ETH_XTYPE
2226                 case IFM_10G_SR: /* KR, too */
2227                 case IFM_10G_CX4: /* KX4 */
2228 #else
2229                 case IFM_10G_KR:
2230                 case IFM_10G_KX4:
2231 #endif
2232                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2233                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2234                         break;
2235 #ifndef IFM_ETH_XTYPE
2236                 case IFM_1000_CX: /* KX */
2237 #else
2238                 case IFM_1000_KX:
2239 #endif
2240                 case IFM_1000_LX:
2241                 case IFM_1000_SX:
2242                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2243                         break;
2244                 case IFM_1000_T:
2245                         speed |= IXGBE_LINK_SPEED_100_FULL;
2246                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2247                         break;
2248                 case IFM_10G_TWINAX:
2249                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2250                         break;
2251                 case IFM_100_TX:
2252                         speed |= IXGBE_LINK_SPEED_100_FULL;
2253                         break;
2254                 case IFM_10_T:
2255                         speed |= IXGBE_LINK_SPEED_10_FULL;
2256                         break;
2257                 default:
2258                         goto invalid;
2259         }
2260
2261         hw->mac.autotry_restart = TRUE;
2262         hw->mac.ops.setup_link(hw, speed, TRUE);
2263         adapter->advertise =
2264             ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2265             ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2266             ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2267             ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2268
2269         return (0);
2270
2271 invalid:
2272         device_printf(adapter->dev, "Invalid media type!\n");
2273
2274         return (EINVAL);
2275 } /* ixgbe_media_change */
2276
2277 /************************************************************************
2278  * ixgbe_set_promisc
2279  ************************************************************************/
2280 static void
2281 ixgbe_set_promisc(struct adapter *adapter)
2282 {
2283         struct ifnet *ifp = adapter->ifp;
2284         int          mcnt = 0;
2285         u32          rctl;
2286
2287         rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2288         rctl &= (~IXGBE_FCTRL_UPE);
2289         if (ifp->if_flags & IFF_ALLMULTI)
2290                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2291         else {
2292                 struct ifmultiaddr *ifma;
2293 #if __FreeBSD_version < 800000
2294                 IF_ADDR_LOCK(ifp);
2295 #else
2296                 if_maddr_rlock(ifp);
2297 #endif
2298                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2299                         if (ifma->ifma_addr->sa_family != AF_LINK)
2300                                 continue;
2301                         if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2302                                 break;
2303                         mcnt++;
2304                 }
2305 #if __FreeBSD_version < 800000
2306                 IF_ADDR_UNLOCK(ifp);
2307 #else
2308                 if_maddr_runlock(ifp);
2309 #endif
2310         }
2311         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2312                 rctl &= (~IXGBE_FCTRL_MPE);
2313         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2314
2315         if (ifp->if_flags & IFF_PROMISC) {
2316                 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2317                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2318         } else if (ifp->if_flags & IFF_ALLMULTI) {
2319                 rctl |= IXGBE_FCTRL_MPE;
2320                 rctl &= ~IXGBE_FCTRL_UPE;
2321                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2322         }
2323 } /* ixgbe_set_promisc */
2324
2325 /************************************************************************
2326  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2327  ************************************************************************/
2328 static void
2329 ixgbe_msix_link(void *arg)
2330 {
2331         struct adapter  *adapter = arg;
2332         struct ixgbe_hw *hw = &adapter->hw;
2333         u32             eicr, eicr_mask;
2334         s32             retval;
2335
2336         ++adapter->link_irq;
2337
2338         /* Pause other interrupts */
2339         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2340
2341         /* First get the cause */
2342         eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2343         /* Be sure the queue bits are not cleared */
2344         eicr &= ~IXGBE_EICR_RTX_QUEUE;
2345         /* Clear interrupt with write */
2346         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2347
2348         /* Link status change */
2349         if (eicr & IXGBE_EICR_LSC) {
2350                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2351                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
2352         }
2353
2354         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2355                 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2356                     (eicr & IXGBE_EICR_FLOW_DIR)) {
2357                         /* This is probably overkill :) */
2358                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2359                                 return;
2360                         /* Disable the interrupt */
2361                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2362                         taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
2363                 }
2364
2365                 if (eicr & IXGBE_EICR_ECC) {
2366                         device_printf(adapter->dev,
2367                             "CRITICAL: ECC ERROR!!  Please Reboot!!\n");
2368                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2369                 }
2370
2371                 /* Check for over temp condition */
2372                 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2373                         switch (adapter->hw.mac.type) {
2374                         case ixgbe_mac_X550EM_a:
2375                                 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2376                                         break;
2377                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2378                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
2379                                 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2380                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
2381                                 retval = hw->phy.ops.check_overtemp(hw);
2382                                 if (retval != IXGBE_ERR_OVERTEMP)
2383                                         break;
2384                                 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2385                                 device_printf(adapter->dev, "System shutdown required!\n");
2386                                 break;
2387                         default:
2388                                 if (!(eicr & IXGBE_EICR_TS))
2389                                         break;
2390                                 retval = hw->phy.ops.check_overtemp(hw);
2391                                 if (retval != IXGBE_ERR_OVERTEMP)
2392                                         break;
2393                                 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2394                                 device_printf(adapter->dev, "System shutdown required!\n");
2395                                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2396                                 break;
2397                         }
2398                 }
2399
2400                 /* Check for VF message */
2401                 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2402                     (eicr & IXGBE_EICR_MAILBOX))
2403                         taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
2404         }
2405
2406         if (ixgbe_is_sfp(hw)) {
2407                 /* Pluggable optics-related interrupt */
2408                 if (hw->mac.type >= ixgbe_mac_X540)
2409                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2410                 else
2411                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2412
2413                 if (eicr & eicr_mask) {
2414                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2415                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2416                 }
2417
2418                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2419                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2420                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
2421                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2422                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2423                 }
2424         }
2425
2426         /* Check for fan failure */
2427         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2428                 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2429                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2430         }
2431
2432         /* External PHY interrupt */
2433         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2434             (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2435                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2436                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
2437         }
2438
2439         /* Re-enable other interrupts */
2440         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2441 } /* ixgbe_msix_link */
2442
2443 /************************************************************************
2444  * ixgbe_sysctl_interrupt_rate_handler
2445  ************************************************************************/
2446 static int
2447 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2448 {
2449         struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
2450         int             error;
2451         unsigned int    reg, usec, rate;
2452
2453         reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2454         usec = ((reg & 0x0FF8) >> 3);
2455         if (usec > 0)
2456                 rate = 500000 / usec;
2457         else
2458                 rate = 0;
2459         error = sysctl_handle_int(oidp, &rate, 0, req);
2460         if (error || !req->newptr)
2461                 return error;
2462         reg &= ~0xfff; /* default, no limitation */
2463         ixgbe_max_interrupt_rate = 0;
2464         if (rate > 0 && rate < 500000) {
2465                 if (rate < 1000)
2466                         rate = 1000;
2467                 ixgbe_max_interrupt_rate = rate;
2468                 reg |= ((4000000/rate) & 0xff8);
2469         }
2470         IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2471
2472         return (0);
2473 } /* ixgbe_sysctl_interrupt_rate_handler */
2474
2475 /************************************************************************
2476  * ixgbe_add_device_sysctls
2477  ************************************************************************/
2478 static void
2479 ixgbe_add_device_sysctls(struct adapter *adapter)
2480 {
2481         device_t               dev = adapter->dev;
2482         struct ixgbe_hw        *hw = &adapter->hw;
2483         struct sysctl_oid_list *child;
2484         struct sysctl_ctx_list *ctx;
2485
2486         ctx = device_get_sysctl_ctx(dev);
2487         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2488
2489         /* Sysctls for all devices */
2490         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
2491             adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
2492
2493         adapter->enable_aim = ixgbe_enable_aim;
2494         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2495             &adapter->enable_aim, 1, "Interrupt Moderation");
2496
2497         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
2498             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2499             IXGBE_SYSCTL_DESC_ADV_SPEED);
2500
2501 #ifdef IXGBE_DEBUG
2502         /* testing sysctls (for all devices) */
2503         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
2504             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2505             "I", "PCI Power State");
2506
2507         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
2508             CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2509             ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2510 #endif
2511         /* for X550 series devices */
2512         if (hw->mac.type >= ixgbe_mac_X550)
2513                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
2514                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2515                     "I", "DMA Coalesce");
2516
2517         /* for WoL-capable devices */
2518         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2519                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
2520                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2521                     ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2522
2523                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
2524                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2525                     "I", "Enable/Disable Wake Up Filters");
2526         }
2527
2528         /* for X552/X557-AT devices */
2529         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2530                 struct sysctl_oid *phy_node;
2531                 struct sysctl_oid_list *phy_list;
2532
2533                 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
2534                     CTLFLAG_RD, NULL, "External PHY sysctls");
2535                 phy_list = SYSCTL_CHILDREN(phy_node);
2536
2537                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
2538                     CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2539                     "I", "Current External PHY Temperature (Celsius)");
2540
2541                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
2542                     CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
2543                     ixgbe_sysctl_phy_overtemp_occurred, "I",
2544                     "External PHY High Temperature Event Occurred");
2545         }
2546
2547         if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2548                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
2549                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2550                     ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2551         }
2552 } /* ixgbe_add_device_sysctls */
2553
2554 /************************************************************************
2555  * ixgbe_allocate_pci_resources
2556  ************************************************************************/
2557 static int
2558 ixgbe_allocate_pci_resources(struct adapter *adapter)
2559 {
2560         device_t dev = adapter->dev;
2561         int      rid;
2562
2563         rid = PCIR_BAR(0);
2564         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2565             RF_ACTIVE);
2566
2567         if (!(adapter->pci_mem)) {
2568                 device_printf(dev, "Unable to allocate bus resource: memory\n");
2569                 return (ENXIO);
2570         }
2571
2572         /* Save bus_space values for READ/WRITE_REG macros */
2573         adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2574         adapter->osdep.mem_bus_space_handle =
2575             rman_get_bushandle(adapter->pci_mem);
2576         /* Set hw values for shared code */
2577         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2578
2579         return (0);
2580 } /* ixgbe_allocate_pci_resources */
2581
2582 /************************************************************************
2583  * ixgbe_detach - Device removal routine
2584  *
2585  *   Called when the driver is being removed.
2586  *   Stops the adapter and deallocates all the resources
2587  *   that were allocated for driver operation.
2588  *
2589  *   return 0 on success, positive on failure
2590  ************************************************************************/
2591 static int
2592 ixgbe_detach(device_t dev)
2593 {
2594         struct adapter  *adapter = device_get_softc(dev);
2595         struct ix_queue *que = adapter->queues;
2596         struct tx_ring  *txr = adapter->tx_rings;
2597         u32             ctrl_ext;
2598
2599         INIT_DEBUGOUT("ixgbe_detach: begin");
2600
2601         /* Make sure VLANS are not using driver */
2602         if (adapter->ifp->if_vlantrunk != NULL) {
2603                 device_printf(dev, "Vlan in use, detach first\n");
2604                 return (EBUSY);
2605         }
2606
2607         if (ixgbe_pci_iov_detach(dev) != 0) {
2608                 device_printf(dev, "SR-IOV in use; detach first.\n");
2609                 return (EBUSY);
2610         }
2611
2612         ether_ifdetach(adapter->ifp);
2613         /* Stop the adapter */
2614         IXGBE_CORE_LOCK(adapter);
2615         ixgbe_setup_low_power_mode(adapter);
2616         IXGBE_CORE_UNLOCK(adapter);
2617
2618         for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
2619                 if (que->tq) {
2620                         if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2621                                 taskqueue_drain(que->tq, &txr->txq_task);
2622                         taskqueue_drain(que->tq, &que->que_task);
2623                         taskqueue_free(que->tq);
2624                 }
2625         }
2626
2627         /* Drain the Link queue */
2628         if (adapter->tq) {
2629                 taskqueue_drain(adapter->tq, &adapter->link_task);
2630                 taskqueue_drain(adapter->tq, &adapter->mod_task);
2631                 taskqueue_drain(adapter->tq, &adapter->msf_task);
2632                 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2633                         taskqueue_drain(adapter->tq, &adapter->mbx_task);
2634                 taskqueue_drain(adapter->tq, &adapter->phy_task);
2635                 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2636                         taskqueue_drain(adapter->tq, &adapter->fdir_task);
2637                 taskqueue_free(adapter->tq);
2638         }
2639
2640         /* let hardware know driver is unloading */
2641         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2642         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2643         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2644
2645         /* Unregister VLAN events */
2646         if (adapter->vlan_attach != NULL)
2647                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
2648         if (adapter->vlan_detach != NULL)
2649                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
2650
2651         callout_drain(&adapter->timer);
2652
2653         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
2654                 netmap_detach(adapter->ifp);
2655
2656         ixgbe_free_pci_resources(adapter);
2657         bus_generic_detach(dev);
2658         if_free(adapter->ifp);
2659
2660         ixgbe_free_transmit_structures(adapter);
2661         ixgbe_free_receive_structures(adapter);
2662         free(adapter->queues, M_DEVBUF);
2663         free(adapter->mta, M_IXGBE);
2664
2665         IXGBE_CORE_LOCK_DESTROY(adapter);
2666
2667         return (0);
2668 } /* ixgbe_detach */
2669
2670 /************************************************************************
2671  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2672  *
2673  *   Prepare the adapter/port for LPLU and/or WoL
2674  ************************************************************************/
2675 static int
2676 ixgbe_setup_low_power_mode(struct adapter *adapter)
2677 {
2678         struct ixgbe_hw *hw = &adapter->hw;
2679         device_t        dev = adapter->dev;
2680         s32             error = 0;
2681
2682         mtx_assert(&adapter->core_mtx, MA_OWNED);
2683
2684         /* Limit power management flow to X550EM baseT */
2685         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2686             hw->phy.ops.enter_lplu) {
2687                 /* Turn off support for APM wakeup. (Using ACPI instead) */
2688                 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2689                     IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2690
2691                 /*
2692                  * Clear Wake Up Status register to prevent any previous wakeup
2693                  * events from waking us up immediately after we suspend.
2694                  */
2695                 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2696
2697                 /*
2698                  * Program the Wakeup Filter Control register with user filter
2699                  * settings
2700                  */
2701                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2702
2703                 /* Enable wakeups and power management in Wakeup Control */
2704                 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2705                     IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2706
2707                 /* X550EM baseT adapters need a special LPLU flow */
2708                 hw->phy.reset_disable = true;
2709                 ixgbe_stop(adapter);
2710                 error = hw->phy.ops.enter_lplu(hw);
2711                 if (error)
2712                         device_printf(dev, "Error entering LPLU: %d\n", error);
2713                 hw->phy.reset_disable = false;
2714         } else {
2715                 /* Just stop for other adapters */
2716                 ixgbe_stop(adapter);
2717         }
2718
2719         return error;
2720 } /* ixgbe_setup_low_power_mode */
2721
2722 /************************************************************************
2723  * ixgbe_shutdown - Shutdown entry point
2724  ************************************************************************/
2725 static int
2726 ixgbe_shutdown(device_t dev)
2727 {
2728         struct adapter *adapter = device_get_softc(dev);
2729         int            error = 0;
2730
2731         INIT_DEBUGOUT("ixgbe_shutdown: begin");
2732
2733         IXGBE_CORE_LOCK(adapter);
2734         error = ixgbe_setup_low_power_mode(adapter);
2735         IXGBE_CORE_UNLOCK(adapter);
2736
2737         return (error);
2738 } /* ixgbe_shutdown */
2739
2740 /************************************************************************
2741  * ixgbe_suspend
2742  *
2743  *   From D0 to D3
2744  ************************************************************************/
2745 static int
2746 ixgbe_suspend(device_t dev)
2747 {
2748         struct adapter *adapter = device_get_softc(dev);
2749         int            error = 0;
2750
2751         INIT_DEBUGOUT("ixgbe_suspend: begin");
2752
2753         IXGBE_CORE_LOCK(adapter);
2754
2755         error = ixgbe_setup_low_power_mode(adapter);
2756
2757         IXGBE_CORE_UNLOCK(adapter);
2758
2759         return (error);
2760 } /* ixgbe_suspend */
2761
2762 /************************************************************************
2763  * ixgbe_resume
2764  *
2765  *   From D3 to D0
2766  ************************************************************************/
2767 static int
2768 ixgbe_resume(device_t dev)
2769 {
2770         struct adapter  *adapter = device_get_softc(dev);
2771         struct ifnet    *ifp = adapter->ifp;
2772         struct ixgbe_hw *hw = &adapter->hw;
2773         u32             wus;
2774
2775         INIT_DEBUGOUT("ixgbe_resume: begin");
2776
2777         IXGBE_CORE_LOCK(adapter);
2778
2779         /* Read & clear WUS register */
2780         wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2781         if (wus)
2782                 device_printf(dev, "Woken up by (WUS): %#010x\n",
2783                     IXGBE_READ_REG(hw, IXGBE_WUS));
2784         IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2785         /* And clear WUFC until next low-power transition */
2786         IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2787
2788         /*
2789          * Required after D3->D0 transition;
2790          * will re-advertise all previous advertised speeds
2791          */
2792         if (ifp->if_flags & IFF_UP)
2793                 ixgbe_init_locked(adapter);
2794
2795         IXGBE_CORE_UNLOCK(adapter);
2796
2797         return (0);
2798 } /* ixgbe_resume */
2799
2800 /************************************************************************
2801  * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
2802  *
2803  *   Takes the ifnet's if_capenable flags (e.g. set by the user using
2804  *   ifconfig) and indicates to the OS via the ifnet's if_hwassist
2805  *   field what mbuf offload flags the driver will understand.
2806  ************************************************************************/
2807 static void
2808 ixgbe_set_if_hwassist(struct adapter *adapter)
2809 {
2810         struct ifnet *ifp = adapter->ifp;
2811
2812         ifp->if_hwassist = 0;
2813 #if __FreeBSD_version >= 1000000
2814         if (ifp->if_capenable & IFCAP_TSO4)
2815                 ifp->if_hwassist |= CSUM_IP_TSO;
2816         if (ifp->if_capenable & IFCAP_TSO6)
2817                 ifp->if_hwassist |= CSUM_IP6_TSO;
2818         if (ifp->if_capenable & IFCAP_TXCSUM) {
2819                 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
2820                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2821                         ifp->if_hwassist |= CSUM_IP_SCTP;
2822         }
2823         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
2824                 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
2825                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2826                         ifp->if_hwassist |= CSUM_IP6_SCTP;
2827         }
2828 #else
2829         if (ifp->if_capenable & IFCAP_TSO)
2830                 ifp->if_hwassist |= CSUM_TSO;
2831         if (ifp->if_capenable & IFCAP_TXCSUM) {
2832                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
2833                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2834                         ifp->if_hwassist |= CSUM_SCTP;
2835         }
2836 #endif
2837 } /* ixgbe_set_if_hwassist */
2838
2839 /************************************************************************
2840  * ixgbe_init_locked - Init entry point
2841  *
2842  *   Used in two ways: It is used by the stack as an init
2843  *   entry point in network interface structure. It is also
2844  *   used by the driver as a hw/sw initialization routine to
2845  *   get to a consistent state.
2846  *
2847  *   return 0 on success, positive on failure
2848  ************************************************************************/
2849 void
2850 ixgbe_init_locked(struct adapter *adapter)
2851 {
2852         struct ifnet    *ifp = adapter->ifp;
2853         device_t        dev = adapter->dev;
2854         struct ixgbe_hw *hw = &adapter->hw;
2855         struct tx_ring  *txr;
2856         struct rx_ring  *rxr;
2857         u32             txdctl, mhadd;
2858         u32             rxdctl, rxctrl;
2859         u32             ctrl_ext;
2860         int             err = 0;
2861
2862         mtx_assert(&adapter->core_mtx, MA_OWNED);
2863         INIT_DEBUGOUT("ixgbe_init_locked: begin");
2864
2865         hw->adapter_stopped = FALSE;
2866         ixgbe_stop_adapter(hw);
2867         callout_stop(&adapter->timer);
2868
2869         /* Queue indices may change with IOV mode */
2870         ixgbe_align_all_queue_indices(adapter);
2871
2872         /* reprogram the RAR[0] in case user changed it. */
2873         ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2874
2875         /* Get the latest mac address, User can use a LAA */
2876         bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2877         ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2878         hw->addr_ctrl.rar_used_count = 1;
2879
2880         /* Set hardware offload abilities from ifnet flags */
2881         ixgbe_set_if_hwassist(adapter);
2882
2883         /* Prepare transmit descriptors and buffers */
2884         if (ixgbe_setup_transmit_structures(adapter)) {
2885                 device_printf(dev, "Could not setup transmit structures\n");
2886                 ixgbe_stop(adapter);
2887                 return;
2888         }
2889
2890         ixgbe_init_hw(hw);
2891         ixgbe_initialize_iov(adapter);
2892         ixgbe_initialize_transmit_units(adapter);
2893
2894         /* Setup Multicast table */
2895         ixgbe_set_multi(adapter);
2896
2897         /* Determine the correct mbuf pool, based on frame size */
2898         if (adapter->max_frame_size <= MCLBYTES)
2899                 adapter->rx_mbuf_sz = MCLBYTES;
2900         else
2901                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
2902
2903         /* Prepare receive descriptors and buffers */
2904         if (ixgbe_setup_receive_structures(adapter)) {
2905                 device_printf(dev, "Could not setup receive structures\n");
2906                 ixgbe_stop(adapter);
2907                 return;
2908         }
2909
2910         /* Configure RX settings */
2911         ixgbe_initialize_receive_units(adapter);
2912
2913         /* Enable SDP & MSI-X interrupts based on adapter */
2914         ixgbe_config_gpie(adapter);
2915
2916         /* Set MTU size */
2917         if (ifp->if_mtu > ETHERMTU) {
2918                 /* aka IXGBE_MAXFRS on 82599 and newer */
2919                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2920                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2921                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2922                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2923         }
2924
2925         /* Now enable all the queues */
2926         for (int i = 0; i < adapter->num_queues; i++) {
2927                 txr = &adapter->tx_rings[i];
2928                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2929                 txdctl |= IXGBE_TXDCTL_ENABLE;
2930                 /* Set WTHRESH to 8, burst writeback */
2931                 txdctl |= (8 << 16);
2932                 /*
2933                  * When the internal queue falls below PTHRESH (32),
2934                  * start prefetching as long as there are at least
2935                  * HTHRESH (1) buffers ready. The values are taken
2936                  * from the Intel linux driver 3.8.21.
2937                  * Prefetching enables tx line rate even with 1 queue.
2938                  */
2939                 txdctl |= (32 << 0) | (1 << 8);
2940                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2941         }
2942
2943         for (int i = 0, j = 0; i < adapter->num_queues; i++) {
2944                 rxr = &adapter->rx_rings[i];
2945                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2946                 if (hw->mac.type == ixgbe_mac_82598EB) {
2947                         /*
2948                          * PTHRESH = 21
2949                          * HTHRESH = 4
2950                          * WTHRESH = 8
2951                          */
2952                         rxdctl &= ~0x3FFFFF;
2953                         rxdctl |= 0x080420;
2954                 }
2955                 rxdctl |= IXGBE_RXDCTL_ENABLE;
2956                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2957                 for (; j < 10; j++) {
2958                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2959                             IXGBE_RXDCTL_ENABLE)
2960                                 break;
2961                         else
2962                                 msec_delay(1);
2963                 }
2964                 wmb();
2965
2966                 /*
2967                  * In netmap mode, we must preserve the buffers made
2968                  * available to userspace before the if_init()
2969                  * (this is true by default on the TX side, because
2970                  * init makes all buffers available to userspace).
2971                  *
2972                  * netmap_reset() and the device specific routines
2973                  * (e.g. ixgbe_setup_receive_rings()) map these
2974                  * buffers at the end of the NIC ring, so here we
2975                  * must set the RDT (tail) register to make sure
2976                  * they are not overwritten.
2977                  *
2978                  * In this driver the NIC ring starts at RDH = 0,
2979                  * RDT points to the last slot available for reception (?),
2980                  * so RDT = num_rx_desc - 1 means the whole ring is available.
2981                  */
2982 #ifdef DEV_NETMAP
2983                 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
2984                     (ifp->if_capenable & IFCAP_NETMAP)) {
2985                         struct netmap_adapter *na = NA(adapter->ifp);
2986                         struct netmap_kring *kring = &na->rx_rings[i];
2987                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2988
2989                         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
2990                 } else
2991 #endif /* DEV_NETMAP */
2992                         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
2993                             adapter->num_rx_desc - 1);
2994         }
2995
2996         /* Enable Receive engine */
2997         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2998         if (hw->mac.type == ixgbe_mac_82598EB)
2999                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3000         rxctrl |= IXGBE_RXCTRL_RXEN;
3001         ixgbe_enable_rx_dma(hw, rxctrl);
3002
3003         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3004
3005         /* Set up MSI-X routing */
3006         if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3007                 ixgbe_configure_ivars(adapter);
3008                 /* Set up auto-mask */
3009                 if (hw->mac.type == ixgbe_mac_82598EB)
3010                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3011                 else {
3012                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3013                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3014                 }
3015         } else {  /* Simple settings for Legacy/MSI */
3016                 ixgbe_set_ivar(adapter, 0, 0, 0);
3017                 ixgbe_set_ivar(adapter, 0, 0, 1);
3018                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3019         }
3020
3021         ixgbe_init_fdir(adapter);
3022
3023         /*
3024          * Check on any SFP devices that
3025          * need to be kick-started
3026          */
3027         if (hw->phy.type == ixgbe_phy_none) {
3028                 err = hw->phy.ops.identify(hw);
3029                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3030                         device_printf(dev,
3031                             "Unsupported SFP+ module type was detected.\n");
3032                         return;
3033                 }
3034         }
3035
3036         /* Set moderation on the Link interrupt */
3037         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3038
3039         /* Config/Enable Link */
3040         ixgbe_config_link(adapter);
3041
3042         /* Hardware Packet Buffer & Flow Control setup */
3043         ixgbe_config_delay_values(adapter);
3044
3045         /* Initialize the FC settings */
3046         ixgbe_start_hw(hw);
3047
3048         /* Set up VLAN support and filter */
3049         ixgbe_setup_vlan_hw_support(adapter);
3050
3051         /* Setup DMA Coalescing */
3052         ixgbe_config_dmac(adapter);
3053
3054         /* And now turn on interrupts */
3055         ixgbe_enable_intr(adapter);
3056
3057         /* Enable the use of the MBX by the VF's */
3058         if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3059                 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3060                 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3061                 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3062         }
3063
3064         /* Now inform the stack we're ready */
3065         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3066
3067         return;
3068 } /* ixgbe_init_locked */
3069
3070 /************************************************************************
3071  * ixgbe_init
3072  ************************************************************************/
3073 static void
3074 ixgbe_init(void *arg)
3075 {
3076         struct adapter *adapter = arg;
3077
3078         IXGBE_CORE_LOCK(adapter);
3079         ixgbe_init_locked(adapter);
3080         IXGBE_CORE_UNLOCK(adapter);
3081
3082         return;
3083 } /* ixgbe_init */
3084
3085 /************************************************************************
3086  * ixgbe_set_ivar
3087  *
3088  *   Setup the correct IVAR register for a particular MSI-X interrupt
3089  *     (yes this is all very magic and confusing :)
3090  *    - entry is the register array entry
3091  *    - vector is the MSI-X vector for this queue
3092  *    - type is RX/TX/MISC
3093  ************************************************************************/
3094 static void
3095 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3096 {
3097         struct ixgbe_hw *hw = &adapter->hw;
3098         u32 ivar, index;
3099
3100         vector |= IXGBE_IVAR_ALLOC_VAL;
3101
3102         switch (hw->mac.type) {
3103
3104         case ixgbe_mac_82598EB:
3105                 if (type == -1)
3106                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3107                 else
3108                         entry += (type * 64);
3109                 index = (entry >> 2) & 0x1F;
3110                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3111                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3112                 ivar |= (vector << (8 * (entry & 0x3)));
3113                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3114                 break;
3115
3116         case ixgbe_mac_82599EB:
3117         case ixgbe_mac_X540:
3118         case ixgbe_mac_X550:
3119         case ixgbe_mac_X550EM_x:
3120         case ixgbe_mac_X550EM_a:
3121                 if (type == -1) { /* MISC IVAR */
3122                         index = (entry & 1) * 8;
3123                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3124                         ivar &= ~(0xFF << index);
3125                         ivar |= (vector << index);
3126                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3127                 } else {          /* RX/TX IVARS */
3128                         index = (16 * (entry & 1)) + (8 * type);
3129                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3130                         ivar &= ~(0xFF << index);
3131                         ivar |= (vector << index);
3132                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3133                 }
3134
3135         default:
3136                 break;
3137         }
3138 } /* ixgbe_set_ivar */
3139
3140 /************************************************************************
3141  * ixgbe_configure_ivars
3142  ************************************************************************/
3143 static void
3144 ixgbe_configure_ivars(struct adapter *adapter)
3145 {
3146         struct ix_queue *que = adapter->queues;
3147         u32             newitr;
3148
3149         if (ixgbe_max_interrupt_rate > 0)
3150                 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3151         else {
3152                 /*
3153                  * Disable DMA coalescing if interrupt moderation is
3154                  * disabled.
3155                  */
3156                 adapter->dmac = 0;
3157                 newitr = 0;
3158         }
3159
3160         for (int i = 0; i < adapter->num_queues; i++, que++) {
3161                 struct rx_ring *rxr = &adapter->rx_rings[i];
3162                 struct tx_ring *txr = &adapter->tx_rings[i];
3163                 /* First the RX queue entry */
3164                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3165                 /* ... and the TX */
3166                 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3167                 /* Set an Initial EITR value */
3168                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3169         }
3170
3171         /* For the Link interrupt */
3172         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3173 } /* ixgbe_configure_ivars */
3174
3175 /************************************************************************
3176  * ixgbe_config_gpie
3177  ************************************************************************/
3178 static void
3179 ixgbe_config_gpie(struct adapter *adapter)
3180 {
3181         struct ixgbe_hw *hw = &adapter->hw;
3182         u32             gpie;
3183
3184         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3185
3186         if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3187                 /* Enable Enhanced MSI-X mode */
3188                 gpie |= IXGBE_GPIE_MSIX_MODE
3189                      |  IXGBE_GPIE_EIAME
3190                      |  IXGBE_GPIE_PBA_SUPPORT
3191                      |  IXGBE_GPIE_OCD;
3192         }
3193
3194         /* Fan Failure Interrupt */
3195         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3196                 gpie |= IXGBE_SDP1_GPIEN;
3197
3198         /* Thermal Sensor Interrupt */
3199         if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3200                 gpie |= IXGBE_SDP0_GPIEN_X540;
3201
3202         /* Link detection */
3203         switch (hw->mac.type) {
3204         case ixgbe_mac_82599EB:
3205                 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3206                 break;
3207         case ixgbe_mac_X550EM_x:
3208         case ixgbe_mac_X550EM_a:
3209                 gpie |= IXGBE_SDP0_GPIEN_X540;
3210                 break;
3211         default:
3212                 break;
3213         }
3214
3215         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3216
3217         return;
3218 } /* ixgbe_config_gpie */
3219
3220 /************************************************************************
3221  * ixgbe_config_delay_values
3222  *
3223  *   Requires adapter->max_frame_size to be set.
3224  ************************************************************************/
3225 static void
3226 ixgbe_config_delay_values(struct adapter *adapter)
3227 {
3228         struct ixgbe_hw *hw = &adapter->hw;
3229         u32             rxpb, frame, size, tmp;
3230
3231         frame = adapter->max_frame_size;
3232
3233         /* Calculate High Water */
3234         switch (hw->mac.type) {
3235         case ixgbe_mac_X540:
3236         case ixgbe_mac_X550:
3237         case ixgbe_mac_X550EM_x:
3238         case ixgbe_mac_X550EM_a:
3239                 tmp = IXGBE_DV_X540(frame, frame);
3240                 break;
3241         default:
3242                 tmp = IXGBE_DV(frame, frame);
3243                 break;
3244         }
3245         size = IXGBE_BT2KB(tmp);
3246         rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3247         hw->fc.high_water[0] = rxpb - size;
3248
3249         /* Now calculate Low Water */
3250         switch (hw->mac.type) {
3251         case ixgbe_mac_X540:
3252         case ixgbe_mac_X550:
3253         case ixgbe_mac_X550EM_x:
3254         case ixgbe_mac_X550EM_a:
3255                 tmp = IXGBE_LOW_DV_X540(frame);
3256                 break;
3257         default:
3258                 tmp = IXGBE_LOW_DV(frame);
3259                 break;
3260         }
3261         hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3262
3263         hw->fc.pause_time = IXGBE_FC_PAUSE;
3264         hw->fc.send_xon = TRUE;
3265 } /* ixgbe_config_delay_values */
3266
3267 /************************************************************************
3268  * ixgbe_set_multi - Multicast Update
3269  *
3270  *   Called whenever multicast address list is updated.
3271  ************************************************************************/
3272 static void
3273 ixgbe_set_multi(struct adapter *adapter)
3274 {
3275         struct ifmultiaddr   *ifma;
3276         struct ixgbe_mc_addr *mta;
3277         struct ifnet         *ifp = adapter->ifp;
3278         u8                   *update_ptr;
3279         int                  mcnt = 0;
3280         u32                  fctrl;
3281
3282         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
3283
3284         mta = adapter->mta;
3285         bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3286
3287 #if __FreeBSD_version < 800000
3288         IF_ADDR_LOCK(ifp);
3289 #else
3290         if_maddr_rlock(ifp);
3291 #endif
3292         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3293                 if (ifma->ifma_addr->sa_family != AF_LINK)
3294                         continue;
3295                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3296                         break;
3297                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
3298                     mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3299                 mta[mcnt].vmdq = adapter->pool;
3300                 mcnt++;
3301         }
3302 #if __FreeBSD_version < 800000
3303         IF_ADDR_UNLOCK(ifp);
3304 #else
3305         if_maddr_runlock(ifp);
3306 #endif
3307
3308         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3309         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3310         if (ifp->if_flags & IFF_PROMISC)
3311                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3312         else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3313             ifp->if_flags & IFF_ALLMULTI) {
3314                 fctrl |= IXGBE_FCTRL_MPE;
3315                 fctrl &= ~IXGBE_FCTRL_UPE;
3316         } else
3317                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3318
3319         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3320
3321         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3322                 update_ptr = (u8 *)mta;
3323                 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3324                     ixgbe_mc_array_itr, TRUE);
3325         }
3326
3327         return;
3328 } /* ixgbe_set_multi */
3329
3330 /************************************************************************
3331  * ixgbe_mc_array_itr
3332  *
3333  *   An iterator function needed by the multicast shared code.
3334  *   It feeds the shared code routine the addresses in the
3335  *   array of ixgbe_set_multi() one by one.
3336  ************************************************************************/
3337 static u8 *
3338 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3339 {
3340         struct ixgbe_mc_addr *mta;
3341
3342         mta = (struct ixgbe_mc_addr *)*update_ptr;
3343         *vmdq = mta->vmdq;
3344
3345         *update_ptr = (u8*)(mta + 1);
3346
3347         return (mta->addr);
3348 } /* ixgbe_mc_array_itr */
3349
3350 /************************************************************************
3351  * ixgbe_local_timer - Timer routine
3352  *
3353  *   Checks for link status, updates statistics,
3354  *   and runs the watchdog check.
3355  ************************************************************************/
3356 static void
3357 ixgbe_local_timer(void *arg)
3358 {
3359         struct adapter  *adapter = arg;
3360         device_t        dev = adapter->dev;
3361         struct ix_queue *que = adapter->queues;
3362         u64             queues = 0;
3363         int             hung = 0;
3364
3365         mtx_assert(&adapter->core_mtx, MA_OWNED);
3366
3367         /* Check for pluggable optics */
3368         if (adapter->sfp_probe)
3369                 if (!ixgbe_sfp_probe(adapter))
3370                         goto out; /* Nothing to do */
3371
3372         ixgbe_update_link_status(adapter);
3373         ixgbe_update_stats_counters(adapter);
3374
3375         /*
3376          * Check the TX queues status
3377          *      - mark hung queues so we don't schedule on them
3378          *      - watchdog only if all queues show hung
3379          */
3380         for (int i = 0; i < adapter->num_queues; i++, que++) {
3381                 /* Keep track of queues with work for soft irq */
3382                 if (que->txr->busy)
3383                         queues |= ((u64)1 << que->me);
3384                 /*
3385                  * Each time txeof runs without cleaning, but there
3386                  * are uncleaned descriptors it increments busy. If
3387                  * we get to the MAX we declare it hung.
3388                  */
3389                 if (que->busy == IXGBE_QUEUE_HUNG) {
3390                         ++hung;
3391                         /* Mark the queue as inactive */
3392                         adapter->active_queues &= ~((u64)1 << que->me);
3393                         continue;
3394                 } else {
3395                         /* Check if we've come back from hung */
3396                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
3397                                 adapter->active_queues |= ((u64)1 << que->me);
3398                 }
3399                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
3400                         device_printf(dev,
3401                             "Warning queue %d appears to be hung!\n", i);
3402                         que->txr->busy = IXGBE_QUEUE_HUNG;
3403                         ++hung;
3404                 }
3405         }
3406
3407         /* Only truly watchdog if all queues show hung */
3408         if (hung == adapter->num_queues)
3409                 goto watchdog;
3410         else if (queues != 0) { /* Force an IRQ on queues with work */
3411                 ixgbe_rearm_queues(adapter, queues);
3412         }
3413
3414 out:
3415         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3416         return;
3417
3418 watchdog:
3419         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
3420         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3421         adapter->watchdog_events++;
3422         ixgbe_init_locked(adapter);
3423 } /* ixgbe_local_timer */
3424
3425 /************************************************************************
3426  * ixgbe_sfp_probe
3427  *
3428  *   Determine if a port had optics inserted.
3429  ************************************************************************/
3430 static bool
3431 ixgbe_sfp_probe(struct adapter *adapter)
3432 {
3433         struct ixgbe_hw *hw = &adapter->hw;
3434         device_t        dev = adapter->dev;
3435         bool            result = FALSE;
3436
3437         if ((hw->phy.type == ixgbe_phy_nl) &&
3438             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3439                 s32 ret = hw->phy.ops.identify_sfp(hw);
3440                 if (ret)
3441                         goto out;
3442                 ret = hw->phy.ops.reset(hw);
3443                 adapter->sfp_probe = FALSE;
3444                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3445                         device_printf(dev, "Unsupported SFP+ module detected!");
3446                         device_printf(dev,
3447                             "Reload driver with supported module.\n");
3448                         goto out;
3449                 } else
3450                         device_printf(dev, "SFP+ module detected!\n");
3451                 /* We now have supported optics */
3452                 result = TRUE;
3453         }
3454 out:
3455
3456         return (result);
3457 } /* ixgbe_sfp_probe */
3458
3459 /************************************************************************
3460  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3461  ************************************************************************/
3462 static void
3463 ixgbe_handle_mod(void *context, int pending)
3464 {
3465         struct adapter  *adapter = context;
3466         struct ixgbe_hw *hw = &adapter->hw;
3467         device_t        dev = adapter->dev;
3468         u32             err, cage_full = 0;
3469
3470         if (adapter->hw.need_crosstalk_fix) {
3471                 switch (hw->mac.type) {
3472                 case ixgbe_mac_82599EB:
3473                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3474                             IXGBE_ESDP_SDP2;
3475                         break;
3476                 case ixgbe_mac_X550EM_x:
3477                 case ixgbe_mac_X550EM_a:
3478                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3479                             IXGBE_ESDP_SDP0;
3480                         break;
3481                 default:
3482                         break;
3483                 }
3484
3485                 if (!cage_full)
3486                         return;
3487         }
3488
3489         err = hw->phy.ops.identify_sfp(hw);
3490         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3491                 device_printf(dev,
3492                     "Unsupported SFP+ module type was detected.\n");
3493                 return;
3494         }
3495
3496         err = hw->mac.ops.setup_sfp(hw);
3497         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3498                 device_printf(dev,
3499                     "Setup failure - unsupported SFP+ module type.\n");
3500                 return;
3501         }
3502         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3503 } /* ixgbe_handle_mod */
3504
3505
3506 /************************************************************************
3507  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3508  ************************************************************************/
3509 static void
3510 ixgbe_handle_msf(void *context, int pending)
3511 {
3512         struct adapter  *adapter = context;
3513         struct ixgbe_hw *hw = &adapter->hw;
3514         u32             autoneg;
3515         bool            negotiate;
3516
3517         /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3518         adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3519
3520         autoneg = hw->phy.autoneg_advertised;
3521         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3522                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3523         if (hw->mac.ops.setup_link)
3524                 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3525
3526         /* Adjust media types shown in ifconfig */
3527         ifmedia_removeall(&adapter->media);
3528         ixgbe_add_media_types(adapter);
3529         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3530 } /* ixgbe_handle_msf */
3531
3532 /************************************************************************
3533  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3534  ************************************************************************/
3535 static void
3536 ixgbe_handle_phy(void *context, int pending)
3537 {
3538         struct adapter  *adapter = context;
3539         struct ixgbe_hw *hw = &adapter->hw;
3540         int             error;
3541
3542         error = hw->phy.ops.handle_lasi(hw);
3543         if (error == IXGBE_ERR_OVERTEMP)
3544                 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3545         else if (error)
3546                 device_printf(adapter->dev,
3547                     "Error handling LASI interrupt: %d\n", error);
3548 } /* ixgbe_handle_phy */
3549
3550 /************************************************************************
3551  * ixgbe_stop - Stop the hardware
3552  *
3553  *   Disables all traffic on the adapter by issuing a
3554  *   global reset on the MAC and deallocates TX/RX buffers.
3555  ************************************************************************/
3556 static void
3557 ixgbe_stop(void *arg)
3558 {
3559         struct ifnet    *ifp;
3560         struct adapter  *adapter = arg;
3561         struct ixgbe_hw *hw = &adapter->hw;
3562
3563         ifp = adapter->ifp;
3564
3565         mtx_assert(&adapter->core_mtx, MA_OWNED);
3566
3567         INIT_DEBUGOUT("ixgbe_stop: begin\n");
3568         ixgbe_disable_intr(adapter);
3569         callout_stop(&adapter->timer);
3570
3571         /* Let the stack know...*/
3572         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3573
3574         ixgbe_reset_hw(hw);
3575         hw->adapter_stopped = FALSE;
3576         ixgbe_stop_adapter(hw);
3577         if (hw->mac.type == ixgbe_mac_82599EB)
3578                 ixgbe_stop_mac_link_on_d3_82599(hw);
3579         /* Turn off the laser - noop with no optics */
3580         ixgbe_disable_tx_laser(hw);
3581
3582         /* Update the stack */
3583         adapter->link_up = FALSE;
3584         ixgbe_update_link_status(adapter);
3585
3586         /* reprogram the RAR[0] in case user changed it. */
3587         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3588
3589         return;
3590 } /* ixgbe_stop */
3591
3592 /************************************************************************
3593  * ixgbe_update_link_status - Update OS on link state
3594  *
3595  * Note: Only updates the OS on the cached link state.
3596  *       The real check of the hardware only happens with
3597  *       a link interrupt.
3598  ************************************************************************/
3599 static void
3600 ixgbe_update_link_status(struct adapter *adapter)
3601 {
3602         struct ifnet *ifp = adapter->ifp;
3603         device_t     dev = adapter->dev;
3604
3605         if (adapter->link_up) {
3606                 if (adapter->link_active == FALSE) {
3607                         if (bootverbose)
3608                                 device_printf(dev, "Link is up %d Gbps %s \n",
3609                                     ((adapter->link_speed == 128) ? 10 : 1),
3610                                     "Full Duplex");
3611                         adapter->link_active = TRUE;
3612                         /* Update any Flow Control changes */
3613                         ixgbe_fc_enable(&adapter->hw);
3614                         /* Update DMA coalescing config */
3615                         ixgbe_config_dmac(adapter);
3616                         if_link_state_change(ifp, LINK_STATE_UP);
3617                         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3618                                 ixgbe_ping_all_vfs(adapter);
3619                 }
3620         } else { /* Link down */
3621                 if (adapter->link_active == TRUE) {
3622                         if (bootverbose)
3623                                 device_printf(dev, "Link is Down\n");
3624                         if_link_state_change(ifp, LINK_STATE_DOWN);
3625                         adapter->link_active = FALSE;
3626                         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3627                                 ixgbe_ping_all_vfs(adapter);
3628                 }
3629         }
3630
3631         return;
3632 } /* ixgbe_update_link_status */
3633
3634 /************************************************************************
3635  * ixgbe_config_dmac - Configure DMA Coalescing
3636  ************************************************************************/
3637 static void
3638 ixgbe_config_dmac(struct adapter *adapter)
3639 {
3640         struct ixgbe_hw          *hw = &adapter->hw;
3641         struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3642
3643         if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3644                 return;
3645
3646         if (dcfg->watchdog_timer ^ adapter->dmac ||
3647             dcfg->link_speed ^ adapter->link_speed) {
3648                 dcfg->watchdog_timer = adapter->dmac;
3649                 dcfg->fcoe_en = false;
3650                 dcfg->link_speed = adapter->link_speed;
3651                 dcfg->num_tcs = 1;
3652
3653                 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3654                     dcfg->watchdog_timer, dcfg->link_speed);
3655
3656                 hw->mac.ops.dmac_config(hw);
3657         }
3658 } /* ixgbe_config_dmac */
3659
3660 /************************************************************************
3661  * ixgbe_enable_intr
3662  ************************************************************************/
3663 static void
3664 ixgbe_enable_intr(struct adapter *adapter)
3665 {
3666         struct ixgbe_hw *hw = &adapter->hw;
3667         struct ix_queue *que = adapter->queues;
3668         u32             mask, fwsm;
3669
3670         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3671
3672         switch (adapter->hw.mac.type) {
3673         case ixgbe_mac_82599EB:
3674                 mask |= IXGBE_EIMS_ECC;
3675                 /* Temperature sensor on some adapters */
3676                 mask |= IXGBE_EIMS_GPI_SDP0;
3677                 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3678                 mask |= IXGBE_EIMS_GPI_SDP1;
3679                 mask |= IXGBE_EIMS_GPI_SDP2;
3680                 break;
3681         case ixgbe_mac_X540:
3682                 /* Detect if Thermal Sensor is enabled */
3683                 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3684                 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3685                         mask |= IXGBE_EIMS_TS;
3686                 mask |= IXGBE_EIMS_ECC;
3687                 break;
3688         case ixgbe_mac_X550:
3689                 /* MAC thermal sensor is automatically enabled */
3690                 mask |= IXGBE_EIMS_TS;
3691                 mask |= IXGBE_EIMS_ECC;
3692                 break;
3693         case ixgbe_mac_X550EM_x:
3694         case ixgbe_mac_X550EM_a:
3695                 /* Some devices use SDP0 for important information */
3696                 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3697                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3698                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3699                     hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3700                         mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3701                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3702                         mask |= IXGBE_EICR_GPI_SDP0_X540;
3703                 mask |= IXGBE_EIMS_ECC;
3704                 break;
3705         default:
3706                 break;
3707         }
3708
3709         /* Enable Fan Failure detection */
3710         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3711                 mask |= IXGBE_EIMS_GPI_SDP1;
3712         /* Enable SR-IOV */
3713         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3714                 mask |= IXGBE_EIMS_MAILBOX;
3715         /* Enable Flow Director */
3716         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3717                 mask |= IXGBE_EIMS_FLOW_DIR;
3718
3719         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3720
3721         /* With MSI-X we use auto clear */
3722         if (adapter->msix_mem) {
3723                 mask = IXGBE_EIMS_ENABLE_MASK;
3724                 /* Don't autoclear Link */
3725                 mask &= ~IXGBE_EIMS_OTHER;
3726                 mask &= ~IXGBE_EIMS_LSC;
3727                 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3728                         mask &= ~IXGBE_EIMS_MAILBOX;
3729                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3730         }
3731
3732         /*
3733          * Now enable all queues, this is done separately to
3734          * allow for handling the extended (beyond 32) MSI-X
3735          * vectors that can be used by 82599
3736          */
3737         for (int i = 0; i < adapter->num_queues; i++, que++)
3738                 ixgbe_enable_queue(adapter, que->msix);
3739
3740         IXGBE_WRITE_FLUSH(hw);
3741
3742         return;
3743 } /* ixgbe_enable_intr */
3744
3745 /************************************************************************
3746  * ixgbe_disable_intr
3747  ************************************************************************/
3748 static void
3749 ixgbe_disable_intr(struct adapter *adapter)
3750 {
3751         if (adapter->msix_mem)
3752                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3753         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3754                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3755         } else {
3756                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3757                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3758                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3759         }
3760         IXGBE_WRITE_FLUSH(&adapter->hw);
3761
3762         return;
3763 } /* ixgbe_disable_intr */
3764
3765 /************************************************************************
3766  * ixgbe_legacy_irq - Legacy Interrupt Service routine
3767  ************************************************************************/
3768 static void
3769 ixgbe_legacy_irq(void *arg)
3770 {
3771         struct ix_queue *que = arg;
3772         struct adapter  *adapter = que->adapter;
3773         struct ixgbe_hw *hw = &adapter->hw;
3774         struct ifnet    *ifp = adapter->ifp;
3775         struct tx_ring  *txr = adapter->tx_rings;
3776         bool            more = false;
3777         u32             eicr, eicr_mask;
3778
3779         /* Silicon errata #26 on 82598 */
3780         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3781
3782         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3783
3784         ++que->irqs;
3785         if (eicr == 0) {
3786                 ixgbe_enable_intr(adapter);
3787                 return;
3788         }
3789
3790         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3791                 more = ixgbe_rxeof(que);
3792
3793                 IXGBE_TX_LOCK(txr);
3794                 ixgbe_txeof(txr);
3795                 if (!ixgbe_ring_empty(ifp, txr->br))
3796                         ixgbe_start_locked(ifp, txr);
3797                 IXGBE_TX_UNLOCK(txr);
3798         }
3799
3800         /* Check for fan failure */
3801         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3802                 ixgbe_check_fan_failure(adapter, eicr, true);
3803                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3804         }
3805
3806         /* Link status change */
3807         if (eicr & IXGBE_EICR_LSC)
3808                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
3809
3810         if (ixgbe_is_sfp(hw)) {
3811                 /* Pluggable optics-related interrupt */
3812                 if (hw->mac.type >= ixgbe_mac_X540)
3813                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3814                 else
3815                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3816
3817                 if (eicr & eicr_mask) {
3818                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3819                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
3820                 }
3821
3822                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3823                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3824                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
3825                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3826                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3827                 }
3828         }
3829
3830         /* External PHY interrupt */
3831         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3832             (eicr & IXGBE_EICR_GPI_SDP0_X540))
3833                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
3834
3835         if (more)
3836                 taskqueue_enqueue(que->tq, &que->que_task);
3837         else
3838                 ixgbe_enable_intr(adapter);
3839
3840         return;
3841 } /* ixgbe_legacy_irq */
3842
3843 /************************************************************************
3844  * ixgbe_free_pci_resources
3845  ************************************************************************/
3846 static void
3847 ixgbe_free_pci_resources(struct adapter *adapter)
3848 {
3849         struct ix_queue *que = adapter->queues;
3850         device_t        dev = adapter->dev;
3851         int             rid, memrid;
3852
3853         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3854                 memrid = PCIR_BAR(MSIX_82598_BAR);
3855         else
3856                 memrid = PCIR_BAR(MSIX_82599_BAR);
3857
3858         /*
3859          * There is a slight possibility of a failure mode
3860          * in attach that will result in entering this function
3861          * before interrupt resources have been initialized, and
3862          * in that case we do not want to execute the loops below
3863          * We can detect this reliably by the state of the adapter
3864          * res pointer.
3865          */
3866         if (adapter->res == NULL)
3867                 goto mem;
3868
3869         /*
3870          * Release all msix queue resources:
3871          */
3872         for (int i = 0; i < adapter->num_queues; i++, que++) {
3873                 rid = que->msix + 1;
3874                 if (que->tag != NULL) {
3875                         bus_teardown_intr(dev, que->res, que->tag);
3876                         que->tag = NULL;
3877                 }
3878                 if (que->res != NULL)
3879                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
3880         }
3881
3882
3883         if (adapter->tag != NULL) {
3884                 bus_teardown_intr(dev, adapter->res, adapter->tag);
3885                 adapter->tag = NULL;
3886         }
3887
3888         /* Clean the Legacy or Link interrupt last */
3889         if (adapter->res != NULL)
3890                 bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
3891                     adapter->res);
3892
3893 mem:
3894         if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
3895             (adapter->feat_en & IXGBE_FEATURE_MSIX))
3896                 pci_release_msi(dev);
3897
3898         if (adapter->msix_mem != NULL)
3899                 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
3900                     adapter->msix_mem);
3901
3902         if (adapter->pci_mem != NULL)
3903                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3904                     adapter->pci_mem);
3905
3906         return;
3907 } /* ixgbe_free_pci_resources */
3908
3909 /************************************************************************
3910  * ixgbe_set_sysctl_value
3911  ************************************************************************/
3912 static void
3913 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
3914     const char *description, int *limit, int value)
3915 {
3916         *limit = value;
3917         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3918             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3919             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
3920 } /* ixgbe_set_sysctl_value */
3921
3922 /************************************************************************
3923  * ixgbe_sysctl_flowcntl
3924  *
3925  *   SYSCTL wrapper around setting Flow Control
3926  ************************************************************************/
3927 static int
3928 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3929 {
3930         struct adapter *adapter;
3931         int            error, fc;
3932
3933         adapter = (struct adapter *)arg1;
3934         fc = adapter->hw.fc.current_mode;
3935
3936         error = sysctl_handle_int(oidp, &fc, 0, req);
3937         if ((error) || (req->newptr == NULL))
3938                 return (error);
3939
3940         /* Don't bother if it's not changed */
3941         if (fc == adapter->hw.fc.current_mode)
3942                 return (0);
3943
3944         return ixgbe_set_flowcntl(adapter, fc);
3945 } /* ixgbe_sysctl_flowcntl */
3946
3947 /************************************************************************
3948  * ixgbe_set_flowcntl - Set flow control
3949  *
3950  *   Flow control values:
3951  *     0 - off
3952  *     1 - rx pause
3953  *     2 - tx pause
3954  *     3 - full
3955  ************************************************************************/
3956 static int
3957 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3958 {
3959         switch (fc) {
3960         case ixgbe_fc_rx_pause:
3961         case ixgbe_fc_tx_pause:
3962         case ixgbe_fc_full:
3963                 adapter->hw.fc.requested_mode = fc;
3964                 if (adapter->num_queues > 1)
3965                         ixgbe_disable_rx_drop(adapter);
3966                 break;
3967         case ixgbe_fc_none:
3968                 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3969                 if (adapter->num_queues > 1)
3970                         ixgbe_enable_rx_drop(adapter);
3971                 break;
3972         default:
3973                 return (EINVAL);
3974         }
3975
3976         /* Don't autoneg if forcing a value */
3977         adapter->hw.fc.disable_fc_autoneg = TRUE;
3978         ixgbe_fc_enable(&adapter->hw);
3979
3980         return (0);
3981 } /* ixgbe_set_flowcntl */
3982
3983 /************************************************************************
3984  * ixgbe_enable_rx_drop
3985  *
3986  *   Enable the hardware to drop packets when the buffer is
3987  *   full. This is useful with multiqueue, so that no single
3988  *   queue being full stalls the entire RX engine. We only
3989  *   enable this when Multiqueue is enabled AND Flow Control
3990  *   is disabled.
3991  ************************************************************************/
3992 static void
3993 ixgbe_enable_rx_drop(struct adapter *adapter)
3994 {
3995         struct ixgbe_hw *hw = &adapter->hw;
3996         struct rx_ring  *rxr;
3997         u32             srrctl;
3998
3999         for (int i = 0; i < adapter->num_queues; i++) {
4000                 rxr = &adapter->rx_rings[i];
4001                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4002                 srrctl |= IXGBE_SRRCTL_DROP_EN;
4003                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4004         }
4005
4006         /* enable drop for each vf */
4007         for (int i = 0; i < adapter->num_vfs; i++) {
4008                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4009                     (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4010                     IXGBE_QDE_ENABLE));
4011         }
4012 } /* ixgbe_enable_rx_drop */
4013
4014 /************************************************************************
4015  * ixgbe_disable_rx_drop
4016  ************************************************************************/
4017 static void
4018 ixgbe_disable_rx_drop(struct adapter *adapter)
4019 {
4020         struct ixgbe_hw *hw = &adapter->hw;
4021         struct rx_ring  *rxr;
4022         u32             srrctl;
4023
4024         for (int i = 0; i < adapter->num_queues; i++) {
4025                 rxr = &adapter->rx_rings[i];
4026                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4027                 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4028                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4029         }
4030
4031         /* disable drop for each vf */
4032         for (int i = 0; i < adapter->num_vfs; i++) {
4033                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4034                     (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4035         }
4036 } /* ixgbe_disable_rx_drop */
4037
4038 /************************************************************************
4039  * ixgbe_sysctl_advertise
4040  *
4041  *   SYSCTL wrapper around setting advertised speed
4042  ************************************************************************/
4043 static int
4044 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4045 {
4046         struct adapter *adapter;
4047         int            error, advertise;
4048
4049         adapter = (struct adapter *)arg1;
4050         advertise = adapter->advertise;
4051
4052         error = sysctl_handle_int(oidp, &advertise, 0, req);
4053         if ((error) || (req->newptr == NULL))
4054                 return (error);
4055
4056         return ixgbe_set_advertise(adapter, advertise);
4057 } /* ixgbe_sysctl_advertise */
4058
4059 /************************************************************************
4060  * ixgbe_set_advertise - Control advertised link speed
4061  *
4062  *   Flags:
4063  *     0x1 - advertise 100 Mb
4064  *     0x2 - advertise 1G
4065  *     0x4 - advertise 10G
4066  *     0x8 - advertise 10 Mb (yes, Mb)
4067  ************************************************************************/
4068 static int
4069 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4070 {
4071         device_t         dev;
4072         struct ixgbe_hw  *hw;
4073         ixgbe_link_speed speed = 0;
4074         ixgbe_link_speed link_caps = 0;
4075         s32              err = IXGBE_NOT_IMPLEMENTED;
4076         bool             negotiate = FALSE;
4077
4078         /* Checks to validate new value */
4079         if (adapter->advertise == advertise) /* no change */
4080                 return (0);
4081
4082         dev = adapter->dev;
4083         hw = &adapter->hw;
4084
4085         /* No speed changes for backplane media */
4086         if (hw->phy.media_type == ixgbe_media_type_backplane)
4087                 return (ENODEV);
4088
4089         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4090               (hw->phy.multispeed_fiber))) {
4091                 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4092                 return (EINVAL);
4093         }
4094
4095         if (advertise < 0x1 || advertise > 0xF) {
4096                 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4097                 return (EINVAL);
4098         }
4099
4100         if (hw->mac.ops.get_link_capabilities) {
4101                 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4102                     &negotiate);
4103                 if (err != IXGBE_SUCCESS) {
4104                         device_printf(dev, "Unable to determine supported advertise speeds\n");
4105                         return (ENODEV);
4106                 }
4107         }
4108
4109         /* Set new value and report new advertised mode */
4110         if (advertise & 0x1) {
4111                 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4112                         device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4113                         return (EINVAL);
4114                 }
4115                 speed |= IXGBE_LINK_SPEED_100_FULL;
4116         }
4117         if (advertise & 0x2) {
4118                 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4119                         device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4120                         return (EINVAL);
4121                 }
4122                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4123         }
4124         if (advertise & 0x4) {
4125                 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4126                         device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4127                         return (EINVAL);
4128                 }
4129                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4130         }
4131         if (advertise & 0x8) {
4132                 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4133                         device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4134                         return (EINVAL);
4135                 }
4136                 speed |= IXGBE_LINK_SPEED_10_FULL;
4137         }
4138
4139         hw->mac.autotry_restart = TRUE;
4140         hw->mac.ops.setup_link(hw, speed, TRUE);
4141         adapter->advertise = advertise;
4142
4143         return (0);
4144 } /* ixgbe_set_advertise */
4145
4146 /************************************************************************
4147  * ixgbe_get_advertise - Get current advertised speed settings
4148  *
4149  *   Formatted for sysctl usage.
4150  *   Flags:
4151  *     0x1 - advertise 100 Mb
4152  *     0x2 - advertise 1G
4153  *     0x4 - advertise 10G
4154  *     0x8 - advertise 10 Mb (yes, Mb)
4155  ************************************************************************/
4156 static int
4157 ixgbe_get_advertise(struct adapter *adapter)
4158 {
4159         struct ixgbe_hw  *hw = &adapter->hw;
4160         int              speed;
4161         ixgbe_link_speed link_caps = 0;
4162         s32              err;
4163         bool             negotiate = FALSE;
4164
4165         /*
4166          * Advertised speed means nothing unless it's copper or
4167          * multi-speed fiber
4168          */
4169         if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4170             !(hw->phy.multispeed_fiber))
4171                 return (0);
4172
4173         err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4174         if (err != IXGBE_SUCCESS)
4175                 return (0);
4176
4177         speed =
4178             ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4179             ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4180             ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4181             ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4182
4183         return speed;
4184 } /* ixgbe_get_advertise */
4185
4186 /************************************************************************
4187  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4188  *
4189  *   Control values:
4190  *     0/1 - off / on (use default value of 1000)
4191  *
4192  *     Legal timer values are:
4193  *     50,100,250,500,1000,2000,5000,10000
4194  *
4195  *     Turning off interrupt moderation will also turn this off.
4196  ************************************************************************/
4197 static int
4198 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4199 {
4200         struct adapter *adapter = (struct adapter *)arg1;
4201         struct ifnet   *ifp = adapter->ifp;
4202         int            error;
4203         u32            newval;
4204
4205         newval = adapter->dmac;
4206         error = sysctl_handle_int(oidp, &newval, 0, req);
4207         if ((error) || (req->newptr == NULL))
4208                 return (error);
4209
4210         switch (newval) {
4211         case 0:
4212                 /* Disabled */
4213                 adapter->dmac = 0;
4214                 break;
4215         case 1:
4216                 /* Enable and use default */
4217                 adapter->dmac = 1000;
4218                 break;
4219         case 50:
4220         case 100:
4221         case 250:
4222         case 500:
4223         case 1000:
4224         case 2000:
4225         case 5000:
4226         case 10000:
4227                 /* Legal values - allow */
4228                 adapter->dmac = newval;
4229                 break;
4230         default:
4231                 /* Do nothing, illegal value */
4232                 return (EINVAL);
4233         }
4234
4235         /* Re-initialize hardware if it's already running */
4236         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4237                 ixgbe_init(adapter);
4238
4239         return (0);
4240 } /* ixgbe_sysctl_dmac */
4241
4242 #ifdef IXGBE_DEBUG
4243 /************************************************************************
4244  * ixgbe_sysctl_power_state
4245  *
4246  *   Sysctl to test power states
4247  *   Values:
4248  *     0      - set device to D0
4249  *     3      - set device to D3
4250  *     (none) - get current device power state
4251  ************************************************************************/
4252 static int
4253 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4254 {
4255         struct adapter *adapter = (struct adapter *)arg1;
4256         device_t       dev = adapter->dev;
4257         int            curr_ps, new_ps, error = 0;
4258
4259         curr_ps = new_ps = pci_get_powerstate(dev);
4260
4261         error = sysctl_handle_int(oidp, &new_ps, 0, req);
4262         if ((error) || (req->newptr == NULL))
4263                 return (error);
4264
4265         if (new_ps == curr_ps)
4266                 return (0);
4267
4268         if (new_ps == 3 && curr_ps == 0)
4269                 error = DEVICE_SUSPEND(dev);
4270         else if (new_ps == 0 && curr_ps == 3)
4271                 error = DEVICE_RESUME(dev);
4272         else
4273                 return (EINVAL);
4274
4275         device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4276
4277         return (error);
4278 } /* ixgbe_sysctl_power_state */
4279 #endif
4280
4281 /************************************************************************
4282  * ixgbe_sysctl_wol_enable
4283  *
4284  *   Sysctl to enable/disable the WoL capability,
4285  *   if supported by the adapter.
4286  *
4287  *   Values:
4288  *     0 - disabled
4289  *     1 - enabled
4290  ************************************************************************/
4291 static int
4292 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4293 {
4294         struct adapter  *adapter = (struct adapter *)arg1;
4295         struct ixgbe_hw *hw = &adapter->hw;
4296         int             new_wol_enabled;
4297         int             error = 0;
4298
4299         new_wol_enabled = hw->wol_enabled;
4300         error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4301         if ((error) || (req->newptr == NULL))
4302                 return (error);
4303         new_wol_enabled = !!(new_wol_enabled);
4304         if (new_wol_enabled == hw->wol_enabled)
4305                 return (0);
4306
4307         if (new_wol_enabled > 0 && !adapter->wol_support)
4308                 return (ENODEV);
4309         else
4310                 hw->wol_enabled = new_wol_enabled;
4311
4312         return (0);
4313 } /* ixgbe_sysctl_wol_enable */
4314
4315 /************************************************************************
4316  * ixgbe_sysctl_wufc - Wake Up Filter Control
4317  *
4318  *   Sysctl to enable/disable the types of packets that the
4319  *   adapter will wake up on upon receipt.
4320  *   Flags:
4321  *     0x1  - Link Status Change
4322  *     0x2  - Magic Packet
4323  *     0x4  - Direct Exact
4324  *     0x8  - Directed Multicast
4325  *     0x10 - Broadcast
4326  *     0x20 - ARP/IPv4 Request Packet
4327  *     0x40 - Direct IPv4 Packet
4328  *     0x80 - Direct IPv6 Packet
4329  *
4330  *   Settings not listed above will cause the sysctl to return an error.
4331  ************************************************************************/
4332 static int
4333 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4334 {
4335         struct adapter *adapter = (struct adapter *)arg1;
4336         int            error = 0;
4337         u32            new_wufc;
4338
4339         new_wufc = adapter->wufc;
4340
4341         error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4342         if ((error) || (req->newptr == NULL))
4343                 return (error);
4344         if (new_wufc == adapter->wufc)
4345                 return (0);
4346
4347         if (new_wufc & 0xffffff00)
4348                 return (EINVAL);
4349
4350         new_wufc &= 0xff;
4351         new_wufc |= (0xffffff & adapter->wufc);
4352         adapter->wufc = new_wufc;
4353
4354         return (0);
4355 } /* ixgbe_sysctl_wufc */
4356
4357 #ifdef IXGBE_DEBUG
4358 /************************************************************************
4359  * ixgbe_sysctl_print_rss_config
4360  ************************************************************************/
4361 static int
4362 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4363 {
4364         struct adapter  *adapter = (struct adapter *)arg1;
4365         struct ixgbe_hw *hw = &adapter->hw;
4366         device_t        dev = adapter->dev;
4367         struct sbuf     *buf;
4368         int             error = 0, reta_size;
4369         u32             reg;
4370
4371         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4372         if (!buf) {
4373                 device_printf(dev, "Could not allocate sbuf for output.\n");
4374                 return (ENOMEM);
4375         }
4376
4377         // TODO: use sbufs to make a string to print out
4378         /* Set multiplier for RETA setup and table size based on MAC */
4379         switch (adapter->hw.mac.type) {
4380         case ixgbe_mac_X550:
4381         case ixgbe_mac_X550EM_x:
4382         case ixgbe_mac_X550EM_a:
4383                 reta_size = 128;
4384                 break;
4385         default:
4386                 reta_size = 32;
4387                 break;
4388         }
4389
4390         /* Print out the redirection table */
4391         sbuf_cat(buf, "\n");
4392         for (int i = 0; i < reta_size; i++) {
4393                 if (i < 32) {
4394                         reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4395                         sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4396                 } else {
4397                         reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4398                         sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4399                 }
4400         }
4401
4402         // TODO: print more config
4403
4404         error = sbuf_finish(buf);
4405         if (error)
4406                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4407
4408         sbuf_delete(buf);
4409
4410         return (0);
4411 } /* ixgbe_sysctl_print_rss_config */
4412 #endif /* IXGBE_DEBUG */
4413
4414 /************************************************************************
4415  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4416  *
4417  *   For X552/X557-AT devices using an external PHY
4418  ************************************************************************/
4419 static int
4420 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4421 {
4422         struct adapter  *adapter = (struct adapter *)arg1;
4423         struct ixgbe_hw *hw = &adapter->hw;
4424         u16             reg;
4425
4426         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4427                 device_printf(adapter->dev,
4428                     "Device has no supported external thermal sensor.\n");
4429                 return (ENODEV);
4430         }
4431
4432         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4433             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4434                 device_printf(adapter->dev,
4435                     "Error reading from PHY's current temperature register\n");
4436                 return (EAGAIN);
4437         }
4438
4439         /* Shift temp for output */
4440         reg = reg >> 8;
4441
4442         return (sysctl_handle_int(oidp, NULL, reg, req));
4443 } /* ixgbe_sysctl_phy_temp */
4444
4445 /************************************************************************
4446  * ixgbe_sysctl_phy_overtemp_occurred
4447  *
4448  *   Reports (directly from the PHY) whether the current PHY
4449  *   temperature is over the overtemp threshold.
4450  ************************************************************************/
4451 static int
4452 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4453 {
4454         struct adapter  *adapter = (struct adapter *)arg1;
4455         struct ixgbe_hw *hw = &adapter->hw;
4456         u16             reg;
4457
4458         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4459                 device_printf(adapter->dev,
4460                     "Device has no supported external thermal sensor.\n");
4461                 return (ENODEV);
4462         }
4463
4464         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4465             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4466                 device_printf(adapter->dev,
4467                     "Error reading from PHY's temperature status register\n");
4468                 return (EAGAIN);
4469         }
4470
4471         /* Get occurrence bit */
4472         reg = !!(reg & 0x4000);
4473
4474         return (sysctl_handle_int(oidp, 0, reg, req));
4475 } /* ixgbe_sysctl_phy_overtemp_occurred */
4476
4477 /************************************************************************
4478  * ixgbe_sysctl_eee_state
4479  *
4480  *   Sysctl to set EEE power saving feature
4481  *   Values:
4482  *     0      - disable EEE
4483  *     1      - enable EEE
4484  *     (none) - get current device EEE state
4485  ************************************************************************/
4486 static int
4487 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4488 {
4489         struct adapter *adapter = (struct adapter *)arg1;
4490         device_t       dev = adapter->dev;
4491         int            curr_eee, new_eee, error = 0;
4492         s32            retval;
4493
4494         curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4495
4496         error = sysctl_handle_int(oidp, &new_eee, 0, req);
4497         if ((error) || (req->newptr == NULL))
4498                 return (error);
4499
4500         /* Nothing to do */
4501         if (new_eee == curr_eee)
4502                 return (0);
4503
4504         /* Not supported */
4505         if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4506                 return (EINVAL);
4507
4508         /* Bounds checking */
4509         if ((new_eee < 0) || (new_eee > 1))
4510                 return (EINVAL);
4511
4512         retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4513         if (retval) {
4514                 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4515                 return (EINVAL);
4516         }
4517
4518         /* Restart auto-neg */
4519         ixgbe_init(adapter);
4520
4521         device_printf(dev, "New EEE state: %d\n", new_eee);
4522
4523         /* Cache new value */
4524         if (new_eee)
4525                 adapter->feat_en |= IXGBE_FEATURE_EEE;
4526         else
4527                 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4528
4529         return (error);
4530 } /* ixgbe_sysctl_eee_state */
4531
4532 /************************************************************************
4533  * ixgbe_init_device_features
4534  ************************************************************************/
4535 static void
4536 ixgbe_init_device_features(struct adapter *adapter)
4537 {
4538         adapter->feat_cap = IXGBE_FEATURE_NETMAP
4539                           | IXGBE_FEATURE_RSS
4540                           | IXGBE_FEATURE_MSI
4541                           | IXGBE_FEATURE_MSIX
4542                           | IXGBE_FEATURE_LEGACY_IRQ
4543                           | IXGBE_FEATURE_LEGACY_TX;
4544
4545         /* Set capabilities first... */
4546         switch (adapter->hw.mac.type) {
4547         case ixgbe_mac_82598EB:
4548                 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4549                         adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4550                 break;
4551         case ixgbe_mac_X540:
4552                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4553                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4554                 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4555                     (adapter->hw.bus.func == 0))
4556                         adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4557                 break;
4558         case ixgbe_mac_X550:
4559                 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4560                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4561                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4562                 break;
4563         case ixgbe_mac_X550EM_x:
4564                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4565                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4566                 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4567                         adapter->feat_cap |= IXGBE_FEATURE_EEE;
4568                 break;
4569         case ixgbe_mac_X550EM_a:
4570                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4571                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4572                 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4573                 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4574                     (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4575                         adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4576                         adapter->feat_cap |= IXGBE_FEATURE_EEE;
4577                 }
4578                 break;
4579         case ixgbe_mac_82599EB:
4580                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4581                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4582                 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4583                     (adapter->hw.bus.func == 0))
4584                         adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4585                 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4586                         adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4587                 break;
4588         default:
4589                 break;
4590         }
4591
4592         /* Enabled by default... */
4593         /* Fan failure detection */
4594         if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4595                 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4596         /* Netmap */
4597         if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4598                 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4599         /* EEE */
4600         if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4601                 adapter->feat_en |= IXGBE_FEATURE_EEE;
4602         /* Thermal Sensor */
4603         if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4604                 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4605
4606         /* Enabled via global sysctl... */
4607         /* Flow Director */
4608         if (ixgbe_enable_fdir) {
4609                 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4610                         adapter->feat_en |= IXGBE_FEATURE_FDIR;
4611                 else
4612                         device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4613         }
4614         /* Legacy (single queue) transmit */
4615         if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
4616             ixgbe_enable_legacy_tx)
4617                 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
4618         /*
4619          * Message Signal Interrupts - Extended (MSI-X)
4620          * Normal MSI is only enabled if MSI-X calls fail.
4621          */
4622         if (!ixgbe_enable_msix)
4623                 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4624         /* Receive-Side Scaling (RSS) */
4625         if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4626                 adapter->feat_en |= IXGBE_FEATURE_RSS;
4627
4628         /* Disable features with unmet dependencies... */
4629         /* No MSI-X */
4630         if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4631                 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4632                 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4633                 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4634                 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4635         }
4636 } /* ixgbe_init_device_features */
4637
4638 /************************************************************************
4639  * ixgbe_probe - Device identification routine
4640  *
4641  *   Determines if the driver should be loaded on
4642  *   adapter based on its PCI vendor/device ID.
4643  *
4644  *   return BUS_PROBE_DEFAULT on success, positive on failure
4645  ************************************************************************/
4646 static int
4647 ixgbe_probe(device_t dev)
4648 {
4649         ixgbe_vendor_info_t *ent;
4650
4651         u16  pci_vendor_id = 0;
4652         u16  pci_device_id = 0;
4653         u16  pci_subvendor_id = 0;
4654         u16  pci_subdevice_id = 0;
4655         char adapter_name[256];
4656
4657         INIT_DEBUGOUT("ixgbe_probe: begin");
4658
4659         pci_vendor_id = pci_get_vendor(dev);
4660         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
4661                 return (ENXIO);
4662
4663         pci_device_id = pci_get_device(dev);
4664         pci_subvendor_id = pci_get_subvendor(dev);
4665         pci_subdevice_id = pci_get_subdevice(dev);
4666
4667         ent = ixgbe_vendor_info_array;
4668         while (ent->vendor_id != 0) {
4669                 if ((pci_vendor_id == ent->vendor_id) &&
4670                     (pci_device_id == ent->device_id) &&
4671                     ((pci_subvendor_id == ent->subvendor_id) ||
4672                      (ent->subvendor_id == 0)) &&
4673                     ((pci_subdevice_id == ent->subdevice_id) ||
4674                      (ent->subdevice_id == 0))) {
4675                         sprintf(adapter_name, "%s, Version - %s",
4676                                 ixgbe_strings[ent->index],
4677                                 ixgbe_driver_version);
4678                         device_set_desc_copy(dev, adapter_name);
4679                         ++ixgbe_total_ports;
4680                         return (BUS_PROBE_DEFAULT);
4681                 }
4682                 ent++;
4683         }
4684
4685         return (ENXIO);
4686 } /* ixgbe_probe */
4687
4688
4689 /************************************************************************
4690  * ixgbe_ioctl - Ioctl entry point
4691  *
4692  *   Called when the user wants to configure the interface.
4693  *
4694  *   return 0 on success, positive on failure
4695  ************************************************************************/
4696 static int
4697 ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4698 {
4699         struct adapter *adapter = ifp->if_softc;
4700         struct ifreq   *ifr = (struct ifreq *) data;
4701 #if defined(INET) || defined(INET6)
4702         struct ifaddr  *ifa = (struct ifaddr *)data;
4703 #endif
4704         int            error = 0;
4705         bool           avoid_reset = FALSE;
4706
4707         switch (command) {
4708         case SIOCSIFADDR:
4709 #ifdef INET
4710                 if (ifa->ifa_addr->sa_family == AF_INET)
4711                         avoid_reset = TRUE;
4712 #endif
4713 #ifdef INET6
4714                 if (ifa->ifa_addr->sa_family == AF_INET6)
4715                         avoid_reset = TRUE;
4716 #endif
4717                 /*
4718                  * Calling init results in link renegotiation,
4719                  * so we avoid doing it when possible.
4720                  */
4721                 if (avoid_reset) {
4722                         ifp->if_flags |= IFF_UP;
4723                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4724                                 ixgbe_init(adapter);
4725 #ifdef INET
4726                         if (!(ifp->if_flags & IFF_NOARP))
4727                                 arp_ifinit(ifp, ifa);
4728 #endif
4729                 } else
4730                         error = ether_ioctl(ifp, command, data);
4731                 break;
4732         case SIOCSIFMTU:
4733                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4734                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
4735                         error = EINVAL;
4736                 } else {
4737                         IXGBE_CORE_LOCK(adapter);
4738                         ifp->if_mtu = ifr->ifr_mtu;
4739                         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
4740                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4741                                 ixgbe_init_locked(adapter);
4742                         ixgbe_recalculate_max_frame(adapter);
4743                         IXGBE_CORE_UNLOCK(adapter);
4744                 }
4745                 break;
4746         case SIOCSIFFLAGS:
4747                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4748                 IXGBE_CORE_LOCK(adapter);
4749                 if (ifp->if_flags & IFF_UP) {
4750                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4751                                 if ((ifp->if_flags ^ adapter->if_flags) &
4752                                     (IFF_PROMISC | IFF_ALLMULTI)) {
4753                                         ixgbe_set_promisc(adapter);
4754                                 }
4755                         } else
4756                                 ixgbe_init_locked(adapter);
4757                 } else
4758                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4759                                 ixgbe_stop(adapter);
4760                 adapter->if_flags = ifp->if_flags;
4761                 IXGBE_CORE_UNLOCK(adapter);
4762                 break;
4763         case SIOCADDMULTI:
4764         case SIOCDELMULTI:
4765                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
4766                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4767                         IXGBE_CORE_LOCK(adapter);
4768                         ixgbe_disable_intr(adapter);
4769                         ixgbe_set_multi(adapter);
4770                         ixgbe_enable_intr(adapter);
4771                         IXGBE_CORE_UNLOCK(adapter);
4772                 }
4773                 break;
4774         case SIOCSIFMEDIA:
4775         case SIOCGIFMEDIA:
4776                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4777                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
4778                 break;
4779         case SIOCSIFCAP:
4780         {
4781                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
4782
4783                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4784
4785                 if (!mask)
4786                         break;
4787
4788                 /* HW cannot turn these on/off separately */
4789                 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4790                         ifp->if_capenable ^= IFCAP_RXCSUM;
4791                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
4792                 }
4793                 if (mask & IFCAP_TXCSUM)
4794                         ifp->if_capenable ^= IFCAP_TXCSUM;
4795                 if (mask & IFCAP_TXCSUM_IPV6)
4796                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
4797                 if (mask & IFCAP_TSO4)
4798                         ifp->if_capenable ^= IFCAP_TSO4;
4799                 if (mask & IFCAP_TSO6)
4800                         ifp->if_capenable ^= IFCAP_TSO6;
4801                 if (mask & IFCAP_LRO)
4802                         ifp->if_capenable ^= IFCAP_LRO;
4803                 if (mask & IFCAP_VLAN_HWTAGGING)
4804                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4805                 if (mask & IFCAP_VLAN_HWFILTER)
4806                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
4807                 if (mask & IFCAP_VLAN_HWTSO)
4808                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4809
4810                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4811                         IXGBE_CORE_LOCK(adapter);
4812                         ixgbe_init_locked(adapter);
4813                         IXGBE_CORE_UNLOCK(adapter);
4814                 }
4815                 VLAN_CAPABILITIES(ifp);
4816                 break;
4817         }
4818 #if __FreeBSD_version >= 1100036
4819         case SIOCGI2C:
4820         {
4821                 struct ixgbe_hw *hw = &adapter->hw;
4822                 struct ifi2creq i2c;
4823                 int i;
4824
4825                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
4826                 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
4827                 if (error != 0)
4828                         break;
4829                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4830                         error = EINVAL;
4831                         break;
4832                 }
4833                 if (i2c.len > sizeof(i2c.data)) {
4834                         error = EINVAL;
4835                         break;
4836                 }
4837
4838                 for (i = 0; i < i2c.len; i++)
4839                         hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
4840                             i2c.dev_addr, &i2c.data[i]);
4841                 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
4842                 break;
4843         }
4844 #endif
4845         default:
4846                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
4847                 error = ether_ioctl(ifp, command, data);
4848                 break;
4849         }
4850
4851         return (error);
4852 } /* ixgbe_ioctl */
4853
4854 /************************************************************************
4855  * ixgbe_check_fan_failure
4856  ************************************************************************/
4857 static void
4858 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4859 {
4860         u32 mask;
4861
4862         mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4863             IXGBE_ESDP_SDP1;
4864
4865         if (reg & mask)
4866                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4867 } /* ixgbe_check_fan_failure */
4868
4869 /************************************************************************
4870  * ixgbe_handle_que
4871  ************************************************************************/
4872 static void
4873 ixgbe_handle_que(void *context, int pending)
4874 {
4875         struct ix_queue *que = context;
4876         struct adapter  *adapter = que->adapter;
4877         struct tx_ring  *txr = que->txr;
4878         struct ifnet    *ifp = adapter->ifp;
4879
4880         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4881                 ixgbe_rxeof(que);
4882                 IXGBE_TX_LOCK(txr);
4883                 ixgbe_txeof(txr);
4884                 if (!ixgbe_ring_empty(ifp, txr->br))
4885                         ixgbe_start_locked(ifp, txr);
4886                 IXGBE_TX_UNLOCK(txr);
4887         }
4888
4889         /* Re-enable this interrupt */
4890         if (que->res != NULL)
4891                 ixgbe_enable_queue(adapter, que->msix);
4892         else
4893                 ixgbe_enable_intr(adapter);
4894
4895         return;
4896 } /* ixgbe_handle_que */
4897
4898
4899
4900 /************************************************************************
4901  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
4902  ************************************************************************/
4903 static int
4904 ixgbe_allocate_legacy(struct adapter *adapter)
4905 {
4906         device_t        dev = adapter->dev;
4907         struct ix_queue *que = adapter->queues;
4908         struct tx_ring  *txr = adapter->tx_rings;
4909         int             error;
4910
4911         /* We allocate a single interrupt resource */
4912         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
4913             &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
4914         if (adapter->res == NULL) {
4915                 device_printf(dev,
4916                     "Unable to allocate bus resource: interrupt\n");
4917                 return (ENXIO);
4918         }
4919
4920         /*
4921          * Try allocating a fast interrupt and the associated deferred
4922          * processing contexts.
4923          */
4924         if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
4925                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
4926         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
4927         que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
4928             taskqueue_thread_enqueue, &que->tq);
4929         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
4930             device_get_nameunit(adapter->dev));
4931
4932         /* Tasklets for Link, SFP and Multispeed Fiber */
4933         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
4934         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
4935         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
4936         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
4937         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4938                 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
4939         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
4940             taskqueue_thread_enqueue, &adapter->tq);
4941         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
4942             device_get_nameunit(adapter->dev));
4943
4944         if ((error = bus_setup_intr(dev, adapter->res,
4945             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
4946             &adapter->tag)) != 0) {
4947                 device_printf(dev,
4948                     "Failed to register fast interrupt handler: %d\n", error);
4949                 taskqueue_free(que->tq);
4950                 taskqueue_free(adapter->tq);
4951                 que->tq = NULL;
4952                 adapter->tq = NULL;
4953
4954                 return (error);
4955         }
4956         /* For simplicity in the handlers */
4957         adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
4958
4959         return (0);
4960 } /* ixgbe_allocate_legacy */
4961
4962
4963 /************************************************************************
4964  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
4965  ************************************************************************/
4966 static int
4967 ixgbe_allocate_msix(struct adapter *adapter)
4968 {
4969         device_t        dev = adapter->dev;
4970         struct ix_queue *que = adapter->queues;
4971         struct tx_ring  *txr = adapter->tx_rings;
4972         int             error, rid, vector = 0;
4973         int             cpu_id = 0;
4974         unsigned int    rss_buckets = 0;
4975         cpuset_t        cpu_mask;
4976
4977         /*
4978          * If we're doing RSS, the number of queues needs to
4979          * match the number of RSS buckets that are configured.
4980          *
4981          * + If there's more queues than RSS buckets, we'll end
4982          *   up with queues that get no traffic.
4983          *
4984          * + If there's more RSS buckets than queues, we'll end
4985          *   up having multiple RSS buckets map to the same queue,
4986          *   so there'll be some contention.
4987          */
4988         rss_buckets = rss_getnumbuckets();
4989         if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
4990             (adapter->num_queues != rss_buckets)) {
4991                 device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
4992                     __func__, adapter->num_queues, rss_buckets);
4993         }
4994
4995         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
4996                 rid = vector + 1;
4997                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
4998                     RF_SHAREABLE | RF_ACTIVE);
4999                 if (que->res == NULL) {
5000                         device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
5001                             vector);
5002                         return (ENXIO);
5003                 }
5004                 /* Set the handler function */
5005                 error = bus_setup_intr(dev, que->res,
5006                     INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
5007                     &que->tag);
5008                 if (error) {
5009                         que->res = NULL;
5010                         device_printf(dev, "Failed to register QUE handler");
5011                         return (error);
5012                 }
5013 #if __FreeBSD_version >= 800504
5014                 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
5015 #endif
5016                 que->msix = vector;
5017                 adapter->active_queues |= (u64)(1 << que->msix);
5018
5019                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
5020                         /*
5021                          * The queue ID is used as the RSS layer bucket ID.
5022                          * We look up the queue ID -> RSS CPU ID and select
5023                          * that.
5024                          */
5025                         cpu_id = rss_getcpu(i % rss_buckets);
5026                         CPU_SETOF(cpu_id, &cpu_mask);
5027                 } else {
5028                         /*
5029                          * Bind the MSI-X vector, and thus the
5030                          * rings to the corresponding CPU.
5031                          *
5032                          * This just happens to match the default RSS
5033                          * round-robin bucket -> queue -> CPU allocation.
5034                          */
5035                         if (adapter->num_queues > 1)
5036                                 cpu_id = i;
5037                 }
5038                 if (adapter->num_queues > 1)
5039                         bus_bind_intr(dev, que->res, cpu_id);
5040 #ifdef IXGBE_DEBUG
5041                 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5042                         device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
5043                             cpu_id);
5044                 else
5045                         device_printf(dev, "Bound queue %d to cpu %d\n", i,
5046                             cpu_id);
5047 #endif /* IXGBE_DEBUG */
5048
5049
5050                 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5051                         TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
5052                             txr);
5053                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5054                 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5055                     taskqueue_thread_enqueue, &que->tq);
5056 #if __FreeBSD_version < 1100000
5057                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
5058                     device_get_nameunit(adapter->dev), i);
5059 #else
5060                 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5061                         taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5062                             &cpu_mask, "%s (bucket %d)",
5063                             device_get_nameunit(adapter->dev), cpu_id);
5064                 else
5065                         taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5066                             NULL, "%s:q%d", device_get_nameunit(adapter->dev),
5067                             i);
5068 #endif
5069         }
5070
5071         /* and Link */
5072         adapter->link_rid = vector + 1;
5073         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
5074             &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
5075         if (!adapter->res) {
5076                 device_printf(dev,
5077                     "Unable to allocate bus resource: Link interrupt [%d]\n",
5078                     adapter->link_rid);
5079                 return (ENXIO);
5080         }
5081         /* Set the link handler function */
5082         error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
5083             NULL, ixgbe_msix_link, adapter, &adapter->tag);
5084         if (error) {
5085                 adapter->res = NULL;
5086                 device_printf(dev, "Failed to register LINK handler");
5087                 return (error);
5088         }
5089 #if __FreeBSD_version >= 800504
5090         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
5091 #endif
5092         adapter->vector = vector;
5093         /* Tasklets for Link, SFP and Multispeed Fiber */
5094         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
5095         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
5096         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
5097         if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5098                 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
5099         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
5100         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5101                 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
5102         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
5103             taskqueue_thread_enqueue, &adapter->tq);
5104         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
5105             device_get_nameunit(adapter->dev));
5106
5107         return (0);
5108 } /* ixgbe_allocate_msix */
5109
5110 /************************************************************************
5111  * ixgbe_configure_interrupts
5112  *
5113  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
5114  *   This will also depend on user settings.
5115  ************************************************************************/
5116 static int
5117 ixgbe_configure_interrupts(struct adapter *adapter)
5118 {
5119         device_t dev = adapter->dev;
5120         int      rid, want, queues, msgs;
5121
5122         /* Default to 1 queue if MSI-X setup fails */
5123         adapter->num_queues = 1;
5124
5125         /* Override by tuneable */
5126         if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
5127                 goto msi;
5128
5129         /* First try MSI-X */
5130         msgs = pci_msix_count(dev);
5131         if (msgs == 0)
5132                 goto msi;
5133         rid = PCIR_BAR(MSIX_82598_BAR);
5134         adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5135             RF_ACTIVE);
5136         if (adapter->msix_mem == NULL) {
5137                 rid += 4;  /* 82599 maps in higher BAR */
5138                 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
5139                     &rid, RF_ACTIVE);
5140         }
5141         if (adapter->msix_mem == NULL) {
5142                 /* May not be enabled */
5143                 device_printf(adapter->dev, "Unable to map MSI-X table.\n");
5144                 goto msi;
5145         }
5146
5147         /* Figure out a reasonable auto config value */
5148         queues = min(mp_ncpus, msgs - 1);
5149         /* If we're doing RSS, clamp at the number of RSS buckets */
5150         if (adapter->feat_en & IXGBE_FEATURE_RSS)
5151                 queues = min(queues, rss_getnumbuckets());
5152         if (ixgbe_num_queues > queues) {
5153                 device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
5154                 ixgbe_num_queues = queues;
5155         }
5156
5157         if (ixgbe_num_queues != 0)
5158                 queues = ixgbe_num_queues;
5159         /* Set max queues to 8 when autoconfiguring */
5160         else
5161                 queues = min(queues, 8);
5162
5163         /* reflect correct sysctl value */
5164         ixgbe_num_queues = queues;
5165
5166         /*
5167          * Want one vector (RX/TX pair) per queue
5168          * plus an additional for Link.
5169          */
5170         want = queues + 1;
5171         if (msgs >= want)
5172                 msgs = want;
5173         else {
5174                 device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
5175                     msgs, want);
5176                 goto msi;
5177         }
5178         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
5179                 device_printf(adapter->dev,
5180                     "Using MSI-X interrupts with %d vectors\n", msgs);
5181                 adapter->num_queues = queues;
5182                 adapter->feat_en |= IXGBE_FEATURE_MSIX;
5183                 return (0);
5184         }
5185         /*
5186          * MSI-X allocation failed or provided us with
5187          * less vectors than needed. Free MSI-X resources
5188          * and we'll try enabling MSI.
5189          */
5190         pci_release_msi(dev);
5191
5192 msi:
5193         /* Without MSI-X, some features are no longer supported */
5194         adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5195         adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
5196         adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5197         adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
5198
5199         if (adapter->msix_mem != NULL) {
5200                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
5201                     adapter->msix_mem);
5202                 adapter->msix_mem = NULL;
5203         }
5204         msgs = 1;
5205         if (pci_alloc_msi(dev, &msgs) == 0) {
5206                 adapter->feat_en |= IXGBE_FEATURE_MSI;
5207                 adapter->link_rid = 1;
5208                 device_printf(adapter->dev, "Using an MSI interrupt\n");
5209                 return (0);
5210         }
5211
5212         if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
5213                 device_printf(adapter->dev,
5214                     "Device does not support legacy interrupts.\n");
5215                 return 1;
5216         }
5217
5218         adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5219         adapter->link_rid = 0;
5220         device_printf(adapter->dev, "Using a Legacy interrupt\n");
5221
5222         return (0);
5223 } /* ixgbe_configure_interrupts */
5224
5225
5226 /************************************************************************
5227  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
5228  *
5229  *   Done outside of interrupt context since the driver might sleep
5230  ************************************************************************/
5231 static void
5232 ixgbe_handle_link(void *context, int pending)
5233 {
5234         struct adapter  *adapter = context;
5235         struct ixgbe_hw *hw = &adapter->hw;
5236
5237         ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
5238         ixgbe_update_link_status(adapter);
5239
5240         /* Re-enable link interrupts */
5241         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
5242 } /* ixgbe_handle_link */
5243
5244 /************************************************************************
5245  * ixgbe_rearm_queues
5246  ************************************************************************/
5247 static void
5248 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5249 {
5250         u32 mask;
5251
5252         switch (adapter->hw.mac.type) {
5253         case ixgbe_mac_82598EB:
5254                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5255                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5256                 break;
5257         case ixgbe_mac_82599EB:
5258         case ixgbe_mac_X540:
5259         case ixgbe_mac_X550:
5260         case ixgbe_mac_X550EM_x:
5261         case ixgbe_mac_X550EM_a:
5262                 mask = (queues & 0xFFFFFFFF);
5263                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5264                 mask = (queues >> 32);
5265                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5266                 break;
5267         default:
5268                 break;
5269         }
5270 } /* ixgbe_rearm_queues */
5271